repo_name
string
path
string
copies
string
size
string
content
string
license
string
Vegaviet-DevTeam/Kernel_N4_N910SLK
sound/pci/ctxfi/ctpcm.c
2959
12277
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctpcm.c * * @Brief * This file contains the definition of the pcm device functions. * * @Author Liu Chun * @Date Apr 2 2008 * */ #include "ctpcm.h" #include "cttimer.h" #include <linux/slab.h> #include <sound/pcm.h> /* Hardware descriptions for playback */ static struct snd_pcm_hardware ct_pcm_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_192000), .rate_min = 8000, .rate_max = 192000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (64), .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware ct_spdif_passthru_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_32000), .rate_min = 32000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (64), .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* Hardware descriptions for capture */ static struct snd_pcm_hardware ct_pcm_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_96000), .rate_min = 8000, .rate_max = 96000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (384), .period_bytes_max = (64*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static void ct_atc_pcm_interrupt(struct ct_atc_pcm *atc_pcm) { struct ct_atc_pcm *apcm = atc_pcm; if (!apcm->substream) return; snd_pcm_period_elapsed(apcm->substream); } static void ct_atc_pcm_free_substream(struct snd_pcm_runtime *runtime) { struct ct_atc_pcm *apcm = runtime->private_data; struct ct_atc *atc = snd_pcm_substream_chip(apcm->substream); atc->pcm_release_resources(atc, apcm); ct_timer_instance_free(apcm->timer); kfree(apcm); runtime->private_data = NULL; } /* pcm playback operations */ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm; int err; apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); if (!apcm) return -ENOMEM; apcm->substream = substream; apcm->interrupt = ct_atc_pcm_interrupt; if (IEC958 == substream->pcm->device) { runtime->hw = ct_spdif_passthru_playback_hw; atc->spdif_out_passthru(atc, 1); } else { runtime->hw = ct_pcm_playback_hw; if (FRONT == substream->pcm->device) runtime->hw.channels_max = 8; } err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { kfree(apcm); return err; } err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024, UINT_MAX); if (err < 0) { kfree(apcm); return err; } apcm->timer = ct_timer_instance_new(atc->timer, apcm); if (!apcm->timer) { kfree(apcm); return -ENOMEM; } runtime->private_data = apcm; runtime->private_free = ct_atc_pcm_free_substream; return 0; } static int ct_pcm_playback_close(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); /* TODO: Notify mixer inactive. */ if (IEC958 == substream->pcm->device) atc->spdif_out_passthru(atc, 0); /* The ct_atc_pcm object will be freed by runtime->private_free */ return 0; } static int ct_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct ct_atc_pcm *apcm = substream->runtime->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; /* clear previous resources */ atc->pcm_release_resources(atc, apcm); return err; } static int ct_pcm_hw_free(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct ct_atc_pcm *apcm = substream->runtime->private_data; /* clear previous resources */ atc->pcm_release_resources(atc, apcm); /* Free snd-allocated pages */ return snd_pcm_lib_free_pages(substream); } static int ct_pcm_playback_prepare(struct snd_pcm_substream *substream) { int err; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; if (IEC958 == substream->pcm->device) err = atc->spdif_passthru_playback_prepare(atc, apcm); else err = atc->pcm_playback_prepare(atc, apcm); if (err < 0) { printk(KERN_ERR "ctxfi: Preparing pcm playback failed!!!\n"); return err; } return 0; } static int ct_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: atc->pcm_playback_start(atc, apcm); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: atc->pcm_playback_stop(atc, apcm); break; default: break; } return 0; } static snd_pcm_uframes_t ct_pcm_playback_pointer(struct snd_pcm_substream *substream) { unsigned long position; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; /* Read out playback position */ position = atc->pcm_playback_position(atc, apcm); position = bytes_to_frames(runtime, position); if (position >= runtime->buffer_size) position = 0; return position; } /* pcm capture operations */ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm; int err; apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); if (!apcm) return -ENOMEM; apcm->started = 0; apcm->substream = substream; apcm->interrupt = ct_atc_pcm_interrupt; runtime->hw = ct_pcm_capture_hw; runtime->hw.rate_max = atc->rsr * atc->msr; err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { kfree(apcm); return err; } err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024, UINT_MAX); if (err < 0) { kfree(apcm); return err; } apcm->timer = ct_timer_instance_new(atc->timer, apcm); if (!apcm->timer) { kfree(apcm); return -ENOMEM; } runtime->private_data = apcm; runtime->private_free = ct_atc_pcm_free_substream; return 0; } static int ct_pcm_capture_close(struct snd_pcm_substream *substream) { /* The ct_atc_pcm object will be freed by runtime->private_free */ /* TODO: Notify mixer inactive. */ return 0; } static int ct_pcm_capture_prepare(struct snd_pcm_substream *substream) { int err; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; err = atc->pcm_capture_prepare(atc, apcm); if (err < 0) { printk(KERN_ERR "ctxfi: Preparing pcm capture failed!!!\n"); return err; } return 0; } static int ct_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: atc->pcm_capture_start(atc, apcm); break; case SNDRV_PCM_TRIGGER_STOP: atc->pcm_capture_stop(atc, apcm); break; default: atc->pcm_capture_stop(atc, apcm); break; } return 0; } static snd_pcm_uframes_t ct_pcm_capture_pointer(struct snd_pcm_substream *substream) { unsigned long position; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; /* Read out playback position */ position = atc->pcm_capture_position(atc, apcm); position = bytes_to_frames(runtime, position); if (position >= runtime->buffer_size) position = 0; return position; } /* PCM operators for playback */ static struct snd_pcm_ops ct_pcm_playback_ops = { .open = ct_pcm_playback_open, .close = ct_pcm_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = ct_pcm_hw_params, .hw_free = ct_pcm_hw_free, .prepare = ct_pcm_playback_prepare, .trigger = ct_pcm_playback_trigger, .pointer = ct_pcm_playback_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* PCM operators for capture */ static struct snd_pcm_ops ct_pcm_capture_ops = { .open = ct_pcm_capture_open, .close = ct_pcm_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = ct_pcm_hw_params, .hw_free = ct_pcm_hw_free, .prepare = ct_pcm_capture_prepare, .trigger = ct_pcm_capture_trigger, .pointer = ct_pcm_capture_pointer, .page = snd_pcm_sgbuf_ops_page, }; static const struct snd_pcm_chmap_elem surround_map[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { } }; static const struct snd_pcm_chmap_elem clfe_map[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, { } }; static const struct snd_pcm_chmap_elem side_map[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; /* Create ALSA pcm device */ int ct_alsa_pcm_create(struct ct_atc *atc, enum CTALSADEVS device, const char *device_name) { struct snd_pcm *pcm; const struct snd_pcm_chmap_elem *map; int chs; int err; int playback_count, capture_count; playback_count = (IEC958 == device) ? 1 : 256; capture_count = (FRONT == device) ? 1 : 0; err = snd_pcm_new(atc->card, "ctxfi", device, playback_count, capture_count, &pcm); if (err < 0) { printk(KERN_ERR "ctxfi: snd_pcm_new failed!! Err=%d\n", err); return err; } pcm->private_data = atc; pcm->info_flags = 0; pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX; strlcpy(pcm->name, device_name, sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ct_pcm_playback_ops); if (FRONT == device) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &ct_pcm_capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(atc->pci), 128*1024, 128*1024); chs = 2; switch (device) { case FRONT: chs = 8; map = snd_pcm_std_chmaps; break; case SURROUND: map = surround_map; break; case CLFE: map = clfe_map; break; case SIDE: map = side_map; break; default: map = snd_pcm_std_chmaps; break; } err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, chs, 0, NULL); if (err < 0) return err; #ifdef CONFIG_PM_SLEEP atc->pcms[device] = pcm; #endif return 0; }
gpl-2.0
ssuthiku/linux
drivers/video/fbdev/aty/mach64_accel.c
4495
11909
/* * ATI Mach64 Hardware Acceleration */ #include <linux/delay.h> #include <asm/unaligned.h> #include <linux/fb.h> #include <video/mach64.h> #include "atyfb.h" /* * Generic Mach64 routines */ /* this is for DMA GUI engine! work in progress */ typedef struct { u32 frame_buf_offset; u32 system_mem_addr; u32 command; u32 reserved; } BM_DESCRIPTOR_ENTRY; #define LAST_DESCRIPTOR (1 << 31) #define SYSTEM_TO_FRAME_BUFFER 0 static u32 rotation24bpp(u32 dx, u32 direction) { u32 rotation; if (direction & DST_X_LEFT_TO_RIGHT) { rotation = (dx / 4) % 6; } else { rotation = ((dx + 2) / 4) % 6; } return ((rotation << 8) | DST_24_ROTATION_ENABLE); } void aty_reset_engine(const struct atyfb_par *par) { /* reset engine */ aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) & ~(GUI_ENGINE_ENABLE | HWCURSOR_ENABLE), par); /* enable engine */ aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) | GUI_ENGINE_ENABLE, par); /* ensure engine is not locked up by clearing any FIFO or */ /* HOST errors */ aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_HOST_ERR_ACK | BUS_FIFO_ERR_ACK, par); } static void reset_GTC_3D_engine(const struct atyfb_par *par) { aty_st_le32(SCALE_3D_CNTL, 0xc0, par); mdelay(GTC_3D_RESET_DELAY); aty_st_le32(SETUP_CNTL, 0x00, par); mdelay(GTC_3D_RESET_DELAY); aty_st_le32(SCALE_3D_CNTL, 0x00, par); mdelay(GTC_3D_RESET_DELAY); } void aty_init_engine(struct atyfb_par *par, struct fb_info *info) { u32 pitch_value; u32 vxres; /* determine modal information from global mode structure */ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8); vxres = info->var.xres_virtual; if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ pitch_value *= 3; vxres *= 3; } /* On GTC (RagePro), we need to reset the 3D engine before */ if (M64_HAS(RESET_3D)) reset_GTC_3D_engine(par); /* Reset engine, enable, and clear any engine errors */ aty_reset_engine(par); /* Ensure that vga page pointers are set to zero - the upper */ /* page pointers are set to 1 to handle overflows in the */ /* lower page */ aty_st_le32(MEM_VGA_WP_SEL, 0x00010000, par); aty_st_le32(MEM_VGA_RP_SEL, 0x00010000, par); /* ---- Setup standard engine context ---- */ /* All GUI registers here are FIFOed - therefore, wait for */ /* the appropriate number of empty FIFO entries */ wait_for_fifo(14, par); /* enable all registers to be loaded for context loads */ aty_st_le32(CONTEXT_MASK, 0xFFFFFFFF, par); /* set destination pitch to modal pitch, set offset to zero */ aty_st_le32(DST_OFF_PITCH, (pitch_value / 8) << 22, par); /* zero these registers (set them to a known state) */ aty_st_le32(DST_Y_X, 0, par); aty_st_le32(DST_HEIGHT, 0, par); aty_st_le32(DST_BRES_ERR, 0, par); aty_st_le32(DST_BRES_INC, 0, par); aty_st_le32(DST_BRES_DEC, 0, par); /* set destination drawing attributes */ aty_st_le32(DST_CNTL, DST_LAST_PEL | DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT, par); /* set source pitch to modal pitch, set offset to zero */ aty_st_le32(SRC_OFF_PITCH, (pitch_value / 8) << 22, par); /* set these registers to a known state */ aty_st_le32(SRC_Y_X, 0, par); aty_st_le32(SRC_HEIGHT1_WIDTH1, 1, par); aty_st_le32(SRC_Y_X_START, 0, par); aty_st_le32(SRC_HEIGHT2_WIDTH2, 1, par); /* set source pixel retrieving attributes */ aty_st_le32(SRC_CNTL, SRC_LINE_X_LEFT_TO_RIGHT, par); /* set host attributes */ wait_for_fifo(13, par); aty_st_le32(HOST_CNTL, 0, par); /* set pattern attributes */ aty_st_le32(PAT_REG0, 0, par); aty_st_le32(PAT_REG1, 0, par); aty_st_le32(PAT_CNTL, 0, par); /* set scissors to modal size */ aty_st_le32(SC_LEFT, 0, par); aty_st_le32(SC_TOP, 0, par); aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); aty_st_le32(SC_RIGHT, vxres - 1, par); /* set background color to minimum value (usually BLACK) */ aty_st_le32(DP_BKGD_CLR, 0, par); /* set foreground color to maximum value (usually WHITE) */ aty_st_le32(DP_FRGD_CLR, 0xFFFFFFFF, par); /* set write mask to effect all pixel bits */ aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par); /* set foreground mix to overpaint and background mix to */ /* no-effect */ aty_st_le32(DP_MIX, FRGD_MIX_S | BKGD_MIX_D, par); /* set primary source pixel channel to foreground color */ /* register */ aty_st_le32(DP_SRC, FRGD_SRC_FRGD_CLR, par); /* set compare functionality to false (no-effect on */ /* destination) */ wait_for_fifo(3, par); aty_st_le32(CLR_CMP_CLR, 0, par); aty_st_le32(CLR_CMP_MASK, 0xFFFFFFFF, par); aty_st_le32(CLR_CMP_CNTL, 0, par); /* set pixel depth */ wait_for_fifo(2, par); aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par); aty_st_le32(DP_CHAIN_MASK, par->crtc.dp_chain_mask, par); wait_for_fifo(5, par); aty_st_le32(SCALE_3D_CNTL, 0, par); aty_st_le32(Z_CNTL, 0, par); aty_st_le32(CRTC_INT_CNTL, aty_ld_le32(CRTC_INT_CNTL, par) & ~0x20, par); aty_st_le32(GUI_TRAJ_CNTL, 0x100023, par); /* insure engine is idle before leaving */ wait_for_idle(par); } /* * Accelerated functions */ static inline void draw_rect(s16 x, s16 y, u16 width, u16 height, struct atyfb_par *par) { /* perform rectangle fill */ wait_for_fifo(2, par); aty_st_le32(DST_Y_X, (x << 16) | y, par); aty_st_le32(DST_HEIGHT_WIDTH, (width << 16) | height, par); par->blitter_may_be_busy = 1; } void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 dy = area->dy, sy = area->sy, direction = DST_LAST_PEL; u32 sx = area->sx, dx = area->dx, width = area->width, rotation = 0; if (par->asleep) return; if (!area->width || !area->height) return; if (!par->accel_flags) { cfb_copyarea(info, area); return; } if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ sx *= 3; dx *= 3; width *= 3; } if (area->sy < area->dy) { dy += area->height - 1; sy += area->height - 1; } else direction |= DST_Y_TOP_TO_BOTTOM; if (sx < dx) { dx += width - 1; sx += width - 1; } else direction |= DST_X_LEFT_TO_RIGHT; if (info->var.bits_per_pixel == 24) { rotation = rotation24bpp(dx, direction); } wait_for_fifo(4, par); aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par); aty_st_le32(SRC_Y_X, (sx << 16) | sy, par); aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par); aty_st_le32(DST_CNTL, direction | rotation, par); draw_rect(dx, dy, width, area->height, par); } void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 color, dx = rect->dx, width = rect->width, rotation = 0; if (par->asleep) return; if (!rect->width || !rect->height) return; if (!par->accel_flags) { cfb_fillrect(info, rect); return; } if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) color = ((u32 *)(info->pseudo_palette))[rect->color]; else color = rect->color; if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ dx *= 3; width *= 3; rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT); } wait_for_fifo(3, par); aty_st_le32(DP_FRGD_CLR, color, par); aty_st_le32(DP_SRC, BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE, par); aty_st_le32(DST_CNTL, DST_LAST_PEL | DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par); draw_rect(dx, rect->dy, width, rect->height, par); } void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width; u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix; if (par->asleep) return; if (!image->width || !image->height) return; if (!par->accel_flags || (image->depth != 1 && info->var.bits_per_pixel != image->depth)) { cfb_imageblit(info, image); return; } pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par); host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN; switch (image->depth) { case 1: pix_width &= ~(BYTE_ORDER_MASK | HOST_MASK); pix_width |= (BYTE_ORDER_MSB_TO_LSB | HOST_1BPP); break; case 4: pix_width &= ~(BYTE_ORDER_MASK | HOST_MASK); pix_width |= (BYTE_ORDER_MSB_TO_LSB | HOST_4BPP); break; case 8: pix_width &= ~HOST_MASK; pix_width |= HOST_8BPP; break; case 15: pix_width &= ~HOST_MASK; pix_width |= HOST_15BPP; break; case 16: pix_width &= ~HOST_MASK; pix_width |= HOST_16BPP; break; case 24: pix_width &= ~HOST_MASK; pix_width |= HOST_24BPP; break; case 32: pix_width &= ~HOST_MASK; pix_width |= HOST_32BPP; break; } if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ dx *= 3; width *= 3; rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT); pix_width &= ~DST_MASK; pix_width |= DST_8BPP; /* * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit * this hwaccelerated triple has an issue with not aligned data */ if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0) pix_width |= DP_HOST_TRIPLE_EN; } if (image->depth == 1) { u32 fg, bg; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((u32*)(info->pseudo_palette))[image->fg_color]; bg = ((u32*)(info->pseudo_palette))[image->bg_color]; } else { fg = image->fg_color; bg = image->bg_color; } wait_for_fifo(2, par); aty_st_le32(DP_BKGD_CLR, bg, par); aty_st_le32(DP_FRGD_CLR, fg, par); src = MONO_SRC_HOST | FRGD_SRC_FRGD_CLR | BKGD_SRC_BKGD_CLR; mix = FRGD_MIX_S | BKGD_MIX_S; } else { src = MONO_SRC_ONE | FRGD_SRC_HOST; mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D; } wait_for_fifo(6, par); aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par); aty_st_le32(DP_PIX_WIDTH, pix_width, par); aty_st_le32(DP_MIX, mix, par); aty_st_le32(DP_SRC, src, par); aty_st_le32(HOST_CNTL, host_cntl, par); aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par); draw_rect(dx, dy, width, image->height, par); src_bytes = (((image->width * image->depth) + 7) / 8) * image->height; /* manual triple each pixel */ if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) { int inbit, outbit, mult24, byte_id_in_dword, width; u8 *pbitmapin = (u8*)image->data, *pbitmapout; u32 hostdword; for (width = image->width, inbit = 7, mult24 = 0; src_bytes; ) { for (hostdword = 0, pbitmapout = (u8*)&hostdword, byte_id_in_dword = 0; byte_id_in_dword < 4 && src_bytes; byte_id_in_dword++, pbitmapout++) { for (outbit = 7; outbit >= 0; outbit--) { *pbitmapout |= (((*pbitmapin >> inbit) & 1) << outbit); mult24++; /* next bit */ if (mult24 == 3) { mult24 = 0; inbit--; width--; } /* next byte */ if (inbit < 0 || width == 0) { src_bytes--; pbitmapin++; inbit = 7; if (width == 0) { width = image->width; outbit = 0; } } } } wait_for_fifo(1, par); aty_st_le32(HOST_DATA0, hostdword, par); } } else { u32 *pbitmap, dwords = (src_bytes + 3) / 4; for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) { wait_for_fifo(1, par); aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par); } } /* restore pix_width */ wait_for_fifo(1, par); aty_st_le32(DP_PIX_WIDTH, pix_width_save, par); }
gpl-2.0
RepoB/android_kernel_sony_msm8974-GreatDevs
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
5263
11139
/* * Copyright © 2006-2011 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * jim liu <jim.liu@intel.com> * * FIXME: * We should probably make this generic and share it with Medfield */ #include <drm/drmP.h> #include <drm/drm.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include "psb_intel_drv.h" #include "psb_drv.h" #include "psb_intel_reg.h" #include "cdv_device.h" #include <linux/pm_runtime.h> /* hdmi control bits */ #define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9) #define HDMI_BORDER_ENABLE (1 << 7) #define HDMI_AUDIO_ENABLE (1 << 6) #define HDMI_VSYNC_ACTIVE_HIGH (1 << 4) #define HDMI_HSYNC_ACTIVE_HIGH (1 << 3) /* hdmi-b control bits */ #define HDMIB_PIPE_B_SELECT (1 << 30) struct mid_intel_hdmi_priv { u32 hdmi_reg; u32 save_HDMIB; bool has_hdmi_sink; bool has_hdmi_audio; /* Should set this when detect hotplug */ bool hdmi_device_connected; struct mdfld_hdmi_i2c *i2c_bus; struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */ struct drm_device *dev; }; static void cdv_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; u32 hdmib; struct drm_crtc *crtc = encoder->crtc; struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); hdmib = (2 << 10); if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) hdmib |= HDMI_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) hdmib |= HDMI_HSYNC_ACTIVE_HIGH; if (intel_crtc->pipe == 1) hdmib |= HDMIB_PIPE_B_SELECT; if (hdmi_priv->has_hdmi_audio) { hdmib |= HDMI_AUDIO_ENABLE; hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC; } REG_WRITE(hdmi_priv->hdmi_reg, hdmib); REG_READ(hdmi_priv->hdmi_reg); } static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; u32 hdmib; hdmib = REG_READ(hdmi_priv->hdmi_reg); if (mode != DRM_MODE_DPMS_ON) REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN); else REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN); REG_READ(hdmi_priv->hdmi_reg); } static void cdv_hdmi_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg); } static void cdv_hdmi_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB); REG_READ(hdmi_priv->hdmi_reg); } static enum drm_connector_status cdv_hdmi_detect( struct drm_connector *connector, bool force) { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct psb_intel_connector *psb_intel_connector = to_psb_intel_connector(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); hdmi_priv->has_hdmi_sink = false; hdmi_priv->has_hdmi_audio = false; if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); hdmi_priv->has_hdmi_audio = drm_detect_monitor_audio(edid); } psb_intel_connector->base.display_info.raw_edid = NULL; kfree(edid); } return status; } static int cdv_hdmi_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { struct drm_encoder *encoder = connector->encoder; if (!strcmp(property->name, "scaling mode") && encoder) { struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); bool centre; uint64_t curValue; if (!crtc) return -1; switch (value) { case DRM_MODE_SCALE_FULLSCREEN: break; case DRM_MODE_SCALE_NO_SCALE: break; case DRM_MODE_SCALE_ASPECT: break; default: return -1; } if (drm_connector_property_get_value(connector, property, &curValue)) return -1; if (curValue == value) return 0; if (drm_connector_property_set_value(connector, property, value)) return -1; centre = (curValue == DRM_MODE_SCALE_NO_SCALE) || (value == DRM_MODE_SCALE_NO_SCALE); if (crtc->saved_mode.hdisplay != 0 && crtc->saved_mode.vdisplay != 0) { if (centre) { if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode, encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb)) return -1; } else { struct drm_encoder_helper_funcs *helpers = encoder->helper_private; helpers->mode_set(encoder, &crtc->saved_mode, &crtc->saved_adjusted_mode); } } } return 0; } /* * Return the list of HDMI DDC modes if available. */ static int cdv_hdmi_get_modes(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct edid *edid = NULL; int ret = 0; edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter); if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } return ret; } static int cdv_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; if (mode->clock > 165000) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_HIGH; /* just in case */ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* just in case */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; /* We assume worst case scenario of 32 bpp here, since we don't know */ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) > dev_priv->vram_stolen_size) return MODE_MEM; return MODE_OK; } static void cdv_hdmi_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); if (psb_intel_encoder->i2c_bus) psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { .dpms = cdv_hdmi_dpms, .mode_fixup = cdv_hdmi_mode_fixup, .prepare = psb_intel_encoder_prepare, .mode_set = cdv_hdmi_mode_set, .commit = psb_intel_encoder_commit, }; static const struct drm_connector_helper_funcs cdv_hdmi_connector_helper_funcs = { .get_modes = cdv_hdmi_get_modes, .mode_valid = cdv_hdmi_mode_valid, .best_encoder = psb_intel_best_encoder, }; static const struct drm_connector_funcs cdv_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = cdv_hdmi_save, .restore = cdv_hdmi_restore, .detect = cdv_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = cdv_hdmi_set_property, .destroy = cdv_hdmi_destroy, }; void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int reg) { struct psb_intel_encoder *psb_intel_encoder; struct psb_intel_connector *psb_intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct mid_intel_hdmi_priv *hdmi_priv; int ddc_bus; psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); if (!psb_intel_encoder) return; psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); if (!psb_intel_connector) goto err_connector; hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL); if (!hdmi_priv) goto err_priv; connector = &psb_intel_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &cdv_hdmi_connector_funcs, DRM_MODE_CONNECTOR_DVID); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_TMDS); psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_HDMI; hdmi_priv->hdmi_reg = reg; hdmi_priv->has_hdmi_sink = false; psb_intel_encoder->dev_priv = hdmi_priv; drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs); drm_connector_helper_add(connector, &cdv_hdmi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); switch (reg) { case SDVOB: ddc_bus = GPIOE; break; case SDVOC: ddc_bus = GPIOD; break; default: DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); goto failed_ddc; break; } psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev, ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC"); if (!psb_intel_encoder->i2c_bus) { dev_err(dev->dev, "No ddc adapter available!\n"); goto failed_ddc; } hdmi_priv->hdmi_i2c_adapter = &(psb_intel_encoder->i2c_bus->adapter); hdmi_priv->dev = dev; drm_sysfs_connector_add(connector); return; failed_ddc: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); err_priv: kfree(psb_intel_connector); err_connector: kfree(psb_intel_encoder); }
gpl-2.0
AOKP/kernel_samsung_smdk4412
drivers/gpu/drm/nouveau/nv50_vm.c
5263
4827
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_vm.h" void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, struct nouveau_gpuobj *pgt[2]) { u64 phys = 0xdeadcafe00000000ULL; u32 coverage = 0; if (pgt[0]) { phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */ coverage = (pgt[0]->size >> 3) << 12; } else if (pgt[1]) { phys = 0x00000001 | pgt[1]->vinst; /* present */ coverage = (pgt[1]->size >> 3) << 16; } if (phys & 1) { if (coverage <= 32 * 1024 * 1024) phys |= 0x60; else if (coverage <= 64 * 1024 * 1024) phys |= 0x40; else if (coverage <= 128 * 1024 * 1024) phys |= 0x20; } nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); } static inline u64 vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) { phys |= 1; /* present */ phys |= (u64)memtype << 40; phys |= target << 4; if (vma->access & NV_MEM_ACCESS_SYS) phys |= (1 << 6); if (!(vma->access & NV_MEM_ACCESS_WO)) phys |= (1 << 3); return phys; } void nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) { struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private; u32 comp = (mem->memtype & 0x180) >> 7; u32 block, target; int i; /* IGPs don't have real VRAM, re-target to stolen system memory */ target = 0; if (dev_priv->vram_sys_base) { phys += dev_priv->vram_sys_base; target = 3; } phys = vm_addr(vma, phys, mem->memtype, target); pte <<= 3; cnt <<= 3; while (cnt) { u32 offset_h = upper_32_bits(phys); u32 offset_l = lower_32_bits(phys); for (i = 7; i >= 0; i--) { block = 1 << (i + 3); if (cnt >= block && !(pte & (block - 1))) break; } offset_l |= (i << 7); phys += block << (vma->node->type - 3); cnt -= block; if (comp) { u32 tag = mem->tag->start + ((delta >> 16) * comp); offset_h |= (tag << 17); delta += block << (vma->node->type - 3); } while (block) { nv_wo32(pgt, pte + 0, offset_l); nv_wo32(pgt, pte + 4, offset_h); pte += 8; block -= 8; } } } void nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; pte <<= 3; while (cnt--) { u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); pte += 8; } } void nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) { pte <<= 3; while (cnt--) { nv_wo32(pgt, pte + 0, 0x00000000); nv_wo32(pgt, pte + 4, 0x00000000); pte += 8; } } void nv50_vm_flush(struct nouveau_vm *vm) { struct drm_nouveau_private *dev_priv = vm->dev->dev_private; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; int i; pinstmem->flush(vm->dev); /* BAR */ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { nv50_vm_flush_engine(vm->dev, 6); return; } pfifo->tlb_flush(vm->dev); for (i = 0; i < NVOBJ_ENGINE_NR; i++) { if (atomic_read(&vm->engref[i])) dev_priv->eng[i]->tlb_flush(vm->dev, i); } } void nv50_vm_flush_engine(struct drm_device *dev, int engine) { struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->vm_lock, flags); nv_wr32(dev, 0x100c80, (engine << 16) | 1); if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); spin_unlock_irqrestore(&dev_priv->vm_lock, flags); }
gpl-2.0
SlimRoms/kernel_samsung_crespo
crypto/chainiv.c
9615
8763
/* * chainiv: Chain IV Generator * * Generate IVs simply be using the last block of the previous encryption. * This is mainly useful for CBC with a synchronous algorithm. * * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/skcipher.h> #include <crypto/rng.h> #include <crypto/crypto_wq.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/workqueue.h> enum { CHAINIV_STATE_INUSE = 0, }; struct chainiv_ctx { spinlock_t lock; char iv[]; }; struct async_chainiv_ctx { unsigned long state; spinlock_t lock; int err; struct crypto_queue queue; struct work_struct postponed; char iv[]; }; static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); unsigned int ivsize; int err; ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); ablkcipher_request_set_callback(subreq, req->creq.base.flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, req->creq.base.complete, req->creq.base.data); ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, req->creq.nbytes, req->creq.info); spin_lock_bh(&ctx->lock); ivsize = crypto_ablkcipher_ivsize(geniv); memcpy(req->giv, ctx->iv, ivsize); memcpy(subreq->info, ctx->iv, ivsize); err = crypto_ablkcipher_encrypt(subreq); if (err) goto unlock; memcpy(ctx->iv, subreq->info, ivsize); unlock: spin_unlock_bh(&ctx->lock); return err; } static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); int err = 0; spin_lock_bh(&ctx->lock); if (crypto_ablkcipher_crt(geniv)->givencrypt != chainiv_givencrypt_first) goto unlock; crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, crypto_ablkcipher_ivsize(geniv)); unlock: spin_unlock_bh(&ctx->lock); if (err) return err; return chainiv_givencrypt(req); } static int chainiv_init_common(struct crypto_tfm *tfm) { tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); return skcipher_geniv_init(tfm); } static int chainiv_init(struct crypto_tfm *tfm) { struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); return chainiv_init_common(tfm); } static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) { int queued; int err = ctx->err; if (!ctx->queue.qlen) { smp_mb__before_clear_bit(); clear_bit(CHAINIV_STATE_INUSE, &ctx->state); if (!ctx->queue.qlen || test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) goto out; } queued = queue_work(kcrypto_wq, &ctx->postponed); BUG_ON(!queued); out: return err; } static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); int err; spin_lock_bh(&ctx->lock); err = skcipher_enqueue_givcrypt(&ctx->queue, req); spin_unlock_bh(&ctx->lock); if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) return err; ctx->err = err; return async_chainiv_schedule_work(ctx); } static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); memcpy(req->giv, ctx->iv, ivsize); memcpy(subreq->info, ctx->iv, ivsize); ctx->err = crypto_ablkcipher_encrypt(subreq); if (ctx->err) goto out; memcpy(ctx->iv, subreq->info, ivsize); out: return async_chainiv_schedule_work(ctx); } static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); ablkcipher_request_set_callback(subreq, req->creq.base.flags, req->creq.base.complete, req->creq.base.data); ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, req->creq.nbytes, req->creq.info); if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) goto postpone; if (ctx->queue.qlen) { clear_bit(CHAINIV_STATE_INUSE, &ctx->state); goto postpone; } return async_chainiv_givencrypt_tail(req); postpone: return async_chainiv_postpone_request(req); } static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); int err = 0; if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) goto out; if (crypto_ablkcipher_crt(geniv)->givencrypt != async_chainiv_givencrypt_first) goto unlock; crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, crypto_ablkcipher_ivsize(geniv)); unlock: clear_bit(CHAINIV_STATE_INUSE, &ctx->state); if (err) return err; out: return async_chainiv_givencrypt(req); } static void async_chainiv_do_postponed(struct work_struct *work) { struct async_chainiv_ctx *ctx = container_of(work, struct async_chainiv_ctx, postponed); struct skcipher_givcrypt_request *req; struct ablkcipher_request *subreq; int err; /* Only handle one request at a time to avoid hogging keventd. */ spin_lock_bh(&ctx->lock); req = skcipher_dequeue_givcrypt(&ctx->queue); spin_unlock_bh(&ctx->lock); if (!req) { async_chainiv_schedule_work(ctx); return; } subreq = skcipher_givcrypt_reqctx(req); subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; err = async_chainiv_givencrypt_tail(req); local_bh_disable(); skcipher_givcrypt_complete(req, err); local_bh_enable(); } static int async_chainiv_init(struct crypto_tfm *tfm) { struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); crypto_init_queue(&ctx->queue, 100); INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); return chainiv_init_common(tfm); } static void async_chainiv_exit(struct crypto_tfm *tfm) { struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); skcipher_geniv_exit(tfm); } static struct crypto_template chainiv_tmpl; static struct crypto_instance *chainiv_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; int err; algt = crypto_get_attr_type(tb); err = PTR_ERR(algt); if (IS_ERR(algt)) return ERR_PTR(err); err = crypto_get_default_rng(); if (err) return ERR_PTR(err); inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); if (IS_ERR(inst)) goto put_rng; inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; inst->alg.cra_init = chainiv_init; inst->alg.cra_exit = skcipher_geniv_exit; inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); if (!crypto_requires_sync(algt->type, algt->mask)) { inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.cra_ablkcipher.givencrypt = async_chainiv_givencrypt_first; inst->alg.cra_init = async_chainiv_init; inst->alg.cra_exit = async_chainiv_exit; inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); } inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; out: return inst; put_rng: crypto_put_default_rng(); goto out; } static void chainiv_free(struct crypto_instance *inst) { skcipher_geniv_free(inst); crypto_put_default_rng(); } static struct crypto_template chainiv_tmpl = { .name = "chainiv", .alloc = chainiv_alloc, .free = chainiv_free, .module = THIS_MODULE, }; static int __init chainiv_module_init(void) { return crypto_register_template(&chainiv_tmpl); } static void chainiv_module_exit(void) { crypto_unregister_template(&chainiv_tmpl); } module_init(chainiv_module_init); module_exit(chainiv_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Chain IV Generator");
gpl-2.0
LG-V10/LGV10__pplus_msm8992_kernel
lib/mpi/generic_mpih-add1.c
9871
2027
/* mpihelp-add_1.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, * 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size) { mpi_limb_t x, y, cy; mpi_size_t j; /* The loop counter and index J goes from -SIZE to -1. This way the loop becomes faster. */ j = -size; /* Offset the base pointers to compensate for the negative indices. */ s1_ptr -= j; s2_ptr -= j; res_ptr -= j; cy = 0; do { y = s2_ptr[j]; x = s1_ptr[j]; y += cy; /* add previous carry to one addend */ cy = y < cy; /* get out carry from that addition */ y += x; /* add other addend */ cy += y < x; /* get out carry from that add, combine */ res_ptr[j] = y; } while (++j); return cy; }
gpl-2.0
Entropy512/kernel_yuga_reference
lib/mpi/generic_mpih-mul3.c
9871
1977
/* mpihelp-mul_3.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb) { mpi_limb_t cy_limb; mpi_size_t j; mpi_limb_t prod_high, prod_low; mpi_limb_t x; /* The loop counter and index J goes from -SIZE to -1. This way * the loop becomes faster. */ j = -s1_size; res_ptr -= j; s1_ptr -= j; cy_limb = 0; do { umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); prod_low += cy_limb; cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; x = res_ptr[j]; prod_low = x - prod_low; cy_limb += prod_low > x ? 1 : 0; res_ptr[j] = prod_low; } while (++j); return cy_limb; }
gpl-2.0
BigBrother1984/android_kernel_lge_mako
drivers/uwb/drp.c
10383
24709
/* * Ultra Wide Band * Dynamic Reservation Protocol handling * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/delay.h> #include "uwb-internal.h" /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ enum uwb_drp_conflict_action { /* Reservation is maintained, no action needed */ UWB_DRP_CONFLICT_MANTAIN = 0, /* the device shall not transmit frames in conflicting MASs in * the following superframe. If the device is the reservation * target, it shall also set the Reason Code in its DRP IE to * Conflict in its beacon in the following superframe. */ UWB_DRP_CONFLICT_ACT1, /* the device shall not set the Reservation Status bit to ONE * and shall not transmit frames in conflicting MASs. If the * device is the reservation target, it shall also set the * Reason Code in its DRP IE to Conflict. */ UWB_DRP_CONFLICT_ACT2, /* the device shall not transmit frames in conflicting MASs in * the following superframe. It shall remove the conflicting * MASs from the reservation or set the Reservation Status to * ZERO in its beacon in the following superframe. If the * device is the reservation target, it shall also set the * Reason Code in its DRP IE to Conflict. */ UWB_DRP_CONFLICT_ACT3, }; static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, struct uwb_rceb *reply, ssize_t reply_size) { struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; if (r != NULL) { if (r->bResultCode != UWB_RC_RES_SUCCESS) dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", uwb_rc_strerror(r->bResultCode), r->bResultCode); } else dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); spin_lock_bh(&rc->rsvs_lock); if (rc->set_drp_ie_pending > 1) { rc->set_drp_ie_pending = 0; uwb_rsv_queue_update(rc); } else { rc->set_drp_ie_pending = 0; } spin_unlock_bh(&rc->rsvs_lock); } /** * Construct and send the SET DRP IE * * @rc: UWB Host controller * @returns: >= 0 number of bytes still available in the beacon * < 0 errno code on error. * * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the * device to include in its beacon at the same time. We thus have to * traverse all reservations and include the DRP IEs of all PENDING * and NEGOTIATED reservations in a SET DRP command for transmission. * * A DRP Availability IE is appended. * * rc->rsvs_mutex is held * * FIXME We currently ignore the returned value indicating the remaining space * in beacon. This could be used to deny reservation requests earlier if * determined that they would cause the beacon space to be exceeded. */ int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) { int result; struct uwb_rc_cmd_set_drp_ie *cmd; struct uwb_rsv *rsv; struct uwb_rsv_move *mv; int num_bytes = 0; u8 *IEDataptr; result = -ENOMEM; /* First traverse all reservations to determine memory needed. */ list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->drp_ie != NULL) { num_bytes += rsv->drp_ie->hdr.length + 2; if (uwb_rsv_has_two_drp_ies(rsv) && (rsv->mv.companion_drp_ie != NULL)) { mv = &rsv->mv; num_bytes += mv->companion_drp_ie->hdr.length + 2; } } } num_bytes += sizeof(rc->drp_avail.ie); cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); if (cmd == NULL) goto error; cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE); cmd->wIELength = num_bytes; IEDataptr = (u8 *)&cmd->IEData[0]; /* FIXME: DRV avail IE is not always needed */ /* put DRP avail IE first */ memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); IEDataptr += sizeof(struct uwb_ie_drp_avail); /* Next traverse all reservations to place IEs in allocated memory. */ list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->drp_ie != NULL) { memcpy(IEDataptr, rsv->drp_ie, rsv->drp_ie->hdr.length + 2); IEDataptr += rsv->drp_ie->hdr.length + 2; if (uwb_rsv_has_two_drp_ies(rsv) && (rsv->mv.companion_drp_ie != NULL)) { mv = &rsv->mv; memcpy(IEDataptr, mv->companion_drp_ie, mv->companion_drp_ie->hdr.length + 2); IEDataptr += mv->companion_drp_ie->hdr.length + 2; } } } result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, uwb_rc_set_drp_cmd_done, NULL); rc->set_drp_ie_pending = 1; kfree(cmd); error: return result; } /* * Evaluate the action to perform using conflict resolution rules * * Return a uwb_drp_conflict_action. */ static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, struct uwb_rsv *rsv, int our_status) { int our_tie_breaker = rsv->tiebreaker; int our_type = rsv->type; int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); int ext_status = uwb_ie_drp_status(ext_drp_ie); int ext_type = uwb_ie_drp_type(ext_drp_ie); /* [ECMA-368 2nd Edition] 17.4.6 */ if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { return UWB_DRP_CONFLICT_MANTAIN; } /* [ECMA-368 2nd Edition] 17.4.6-1 */ if (our_type == UWB_DRP_TYPE_ALIEN_BP) { return UWB_DRP_CONFLICT_MANTAIN; } /* [ECMA-368 2nd Edition] 17.4.6-2 */ if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ return UWB_DRP_CONFLICT_ACT1; } /* [ECMA-368 2nd Edition] 17.4.6-3 */ if (our_status == 0 && ext_status == 1) { return UWB_DRP_CONFLICT_ACT2; } /* [ECMA-368 2nd Edition] 17.4.6-4 */ if (our_status == 1 && ext_status == 0) { return UWB_DRP_CONFLICT_MANTAIN; } /* [ECMA-368 2nd Edition] 17.4.6-5a */ if (our_tie_breaker == ext_tie_breaker && our_beacon_slot < ext_beacon_slot) { return UWB_DRP_CONFLICT_MANTAIN; } /* [ECMA-368 2nd Edition] 17.4.6-5b */ if (our_tie_breaker != ext_tie_breaker && our_beacon_slot > ext_beacon_slot) { return UWB_DRP_CONFLICT_MANTAIN; } if (our_status == 0) { if (our_tie_breaker == ext_tie_breaker) { /* [ECMA-368 2nd Edition] 17.4.6-6a */ if (our_beacon_slot > ext_beacon_slot) { return UWB_DRP_CONFLICT_ACT2; } } else { /* [ECMA-368 2nd Edition] 17.4.6-6b */ if (our_beacon_slot < ext_beacon_slot) { return UWB_DRP_CONFLICT_ACT2; } } } else { if (our_tie_breaker == ext_tie_breaker) { /* [ECMA-368 2nd Edition] 17.4.6-7a */ if (our_beacon_slot > ext_beacon_slot) { return UWB_DRP_CONFLICT_ACT3; } } else { /* [ECMA-368 2nd Edition] 17.4.6-7b */ if (our_beacon_slot < ext_beacon_slot) { return UWB_DRP_CONFLICT_ACT3; } } } return UWB_DRP_CONFLICT_MANTAIN; } static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, struct uwb_rsv *rsv, struct uwb_mas_bm *conflicting_mas) { struct uwb_rc *rc = rsv->rc; struct uwb_rsv_move *mv = &rsv->mv; struct uwb_drp_backoff_win *bow = &rc->bow; int action; action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); if (uwb_rsv_is_owner(rsv)) { switch(action) { case UWB_DRP_CONFLICT_ACT2: /* try move */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); if (bow->can_reserve_extra_mases == false) uwb_rsv_backoff_win_increment(rc); break; case UWB_DRP_CONFLICT_ACT3: uwb_rsv_backoff_win_increment(rc); /* drop some mases with reason modified */ /* put in the companion the mases to be dropped */ bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); default: break; } } else { switch(action) { case UWB_DRP_CONFLICT_ACT2: case UWB_DRP_CONFLICT_ACT3: uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); default: break; } } } static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, struct uwb_rsv *rsv, bool companion_only, struct uwb_mas_bm *conflicting_mas) { struct uwb_rc *rc = rsv->rc; struct uwb_drp_backoff_win *bow = &rc->bow; struct uwb_rsv_move *mv = &rsv->mv; int action; if (companion_only) { /* status of companion is 0 at this point */ action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); if (uwb_rsv_is_owner(rsv)) { switch(action) { case UWB_DRP_CONFLICT_ACT2: case UWB_DRP_CONFLICT_ACT3: uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); rsv->needs_release_companion_mas = false; if (bow->can_reserve_extra_mases == false) uwb_rsv_backoff_win_increment(rc); uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); } } else { /* rsv is target */ switch(action) { case UWB_DRP_CONFLICT_ACT2: case UWB_DRP_CONFLICT_ACT3: uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); /* send_drp_avail_ie = true; */ } } } else { /* also base part of the reservation is conflicting */ if (uwb_rsv_is_owner(rsv)) { uwb_rsv_backoff_win_increment(rc); /* remove companion part */ uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); /* drop some mases with reason modified */ /* put in the companion the mases to be dropped */ bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); } else { /* it is a target rsv */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); /* send_drp_avail_ie = true; */ } } } static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt, struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *conflicting_mas) { struct uwb_rsv_move *mv; /* check if the conflicting reservation has two drp_ies */ if (uwb_rsv_has_two_drp_ies(rsv)) { mv = &rsv->mv; if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, rsv, false, conflicting_mas); } else { if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, rsv, true, conflicting_mas); } } } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); } } static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *conflicting_mas) { struct uwb_rsv *rsv; list_for_each_entry(rsv, &rc->reservations, rc_node) { uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); } } /* * Based on the DRP IE, transition a target reservation to a new * state. */ static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) { struct device *dev = &rc->uwb_dev.dev; struct uwb_rsv_move *mv = &rsv->mv; int status; enum uwb_drp_reason reason_code; struct uwb_mas_bm mas; status = uwb_ie_drp_status(drp_ie); reason_code = uwb_ie_drp_reason_code(drp_ie); uwb_drp_ie_to_bm(&mas, drp_ie); switch (reason_code) { case UWB_DRP_REASON_ACCEPTED: if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); break; } if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { /* drp_ie is companion */ if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) /* stroke companion */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); } else { if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { /* FIXME: there is a conflict, find * the conflicting reservations and * take a sensible action. Consider * that in drp_ie there is the * "neighbour" */ uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); } else { /* accept the extra reservation */ bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); } } else { if (status) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); } } } break; case UWB_DRP_REASON_MODIFIED: /* check to see if we have already modified the reservation */ if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); break; } /* find if the owner wants to expand or reduce */ if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { /* owner is reducing */ bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); uwb_drp_avail_release(rsv->rc, &mv->companion_mas); } bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); break; default: dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", reason_code, status); } } /* * Based on the DRP IE, transition an owner reservation to a new * state. */ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, struct uwb_dev *src, struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) { struct device *dev = &rc->uwb_dev.dev; struct uwb_rsv_move *mv = &rsv->mv; int status; enum uwb_drp_reason reason_code; struct uwb_mas_bm mas; status = uwb_ie_drp_status(drp_ie); reason_code = uwb_ie_drp_reason_code(drp_ie); uwb_drp_ie_to_bm(&mas, drp_ie); if (status) { switch (reason_code) { case UWB_DRP_REASON_ACCEPTED: switch (rsv->state) { case UWB_RSV_STATE_O_PENDING: case UWB_RSV_STATE_O_INITIATED: case UWB_RSV_STATE_O_ESTABLISHED: uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); break; case UWB_RSV_STATE_O_MODIFIED: if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); } else { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); } break; case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); } else { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); } break; case UWB_RSV_STATE_O_MOVE_EXPANDING: if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { /* Companion reservation accepted */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); } else { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); } break; case UWB_RSV_STATE_O_MOVE_COMBINING: if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); else uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); break; default: break; } break; default: dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", reason_code, status); } } else { switch (reason_code) { case UWB_DRP_REASON_PENDING: uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING); break; case UWB_DRP_REASON_DENIED: uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); break; case UWB_DRP_REASON_CONFLICT: /* resolve the conflict */ bitmap_complement(mas.bm, src->last_availability_bm, UWB_NUM_MAS); uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); break; default: dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", reason_code, status); } } } static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) { unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); } static void uwb_cnflt_update_work(struct work_struct *work) { struct uwb_cnflt_alien *cnflt = container_of(work, struct uwb_cnflt_alien, cnflt_update_work); struct uwb_cnflt_alien *c; struct uwb_rc *rc = cnflt->rc; unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; mutex_lock(&rc->rsvs_mutex); list_del(&cnflt->rc_node); /* update rc global conflicting alien bitmap */ bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); } queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); kfree(cnflt); mutex_unlock(&rc->rsvs_mutex); } static void uwb_cnflt_timer(unsigned long arg) { struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); } /* * We have received an DRP_IE of type Alien BP and we need to make * sure we do not transmit in conflicting MASs. */ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) { struct device *dev = &rc->uwb_dev.dev; struct uwb_mas_bm mas; struct uwb_cnflt_alien *cnflt; char buf[72]; unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; uwb_drp_ie_to_bm(&mas, drp_ie); bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { /* Existing alien BP reservation conflicting * bitmap, just reset the timer */ uwb_cnflt_alien_stroke_timer(cnflt); return; } } /* New alien BP reservation conflicting bitmap */ /* alloc and initialize new uwb_cnflt_alien */ cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); if (!cnflt) dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); INIT_LIST_HEAD(&cnflt->rc_node); init_timer(&cnflt->timer); cnflt->timer.function = uwb_cnflt_timer; cnflt->timer.data = (unsigned long)cnflt; cnflt->rc = rc; INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); /* update rc global conflicting alien bitmap */ bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); /* start the timer */ uwb_cnflt_alien_stroke_timer(cnflt); } static void uwb_drp_process_not_involved(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, struct uwb_ie_drp *drp_ie) { struct uwb_mas_bm mas; uwb_drp_ie_to_bm(&mas, drp_ie); uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); } static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_rc_evt_drp *drp_evt, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; rsv = uwb_rsv_find(rc, src, drp_ie); if (!rsv) { /* * No reservation? It's either for a recently * terminated reservation; or the DRP IE couldn't be * processed (e.g., an invalid IE or out of memory). */ return; } /* * Do nothing with DRP IEs for reservations that have been * terminated. */ if (rsv->state == UWB_RSV_STATE_NONE) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); return; } if (uwb_ie_drp_owner(drp_ie)) uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); else uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); } static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) { return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; } /* * Process a received DRP IE. */ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) uwb_drp_handle_alien_drp(rc, drp_ie); else if (uwb_drp_involves_us(rc, drp_ie)) uwb_drp_process_involved(rc, src, drp_evt, drp_ie); else uwb_drp_process_not_involved(rc, drp_evt, drp_ie); } /* * Process a received DRP Availability IE */ static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp_avail *drp_availability_ie) { bitmap_copy(src->last_availability_bm, drp_availability_ie->bmp, UWB_NUM_MAS); } /* * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) * from a device. */ static void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, size_t ielen, struct uwb_dev *src_dev) { struct device *dev = &rc->uwb_dev.dev; struct uwb_ie_hdr *ie_hdr; void *ptr; ptr = drp_evt->ie_data; for (;;) { ie_hdr = uwb_ie_next(&ptr, &ielen); if (!ie_hdr) break; switch (ie_hdr->element_id) { case UWB_IE_DRP_AVAILABILITY: uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); break; case UWB_IE_DRP: uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); break; default: dev_warn(dev, "unexpected IE in DRP notification\n"); break; } } if (ielen > 0) dev_warn(dev, "%d octets remaining in DRP notification\n", (int)ielen); } /** * uwbd_evt_handle_rc_drp - handle a DRP_IE event * @evt: the DRP_IE event from the radio controller * * This processes DRP notifications from the radio controller, either * initiating a new reservation or transitioning an existing * reservation into a different state. * * DRP notifications can occur for three different reasons: * * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as * the target or source have been received. * * These DRP IEs could be new or for an existing reservation. * * If the DRP IE for an existing reservation ceases to be to * received for at least mMaxLostBeacons, the reservation should be * considered to be terminated. Note that the TERMINATE reason (see * below) may not always be signalled (e.g., the remote device has * two or more reservations established with the RC). * * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon * group conflict with the RC's reservations. * * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received * from a device (i.e., it's terminated all reservations). * * Only the software state of the reservations is changed; the setting * of the radio controller's DRP IEs is done after all the events in * an event buffer are processed. This saves waiting multiple times * for the SET_DRP_IE command to complete. */ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) { struct device *dev = &evt->rc->uwb_dev.dev; struct uwb_rc *rc = evt->rc; struct uwb_rc_evt_drp *drp_evt; size_t ielength, bytes_left; struct uwb_dev_addr src_addr; struct uwb_dev *src_dev; /* Is there enough data to decode the event (and any IEs in its payload)? */ if (evt->notif.size < sizeof(*drp_evt)) { dev_err(dev, "DRP event: Not enough data to decode event " "[%zu bytes left, %zu needed]\n", evt->notif.size, sizeof(*drp_evt)); return 0; } bytes_left = evt->notif.size - sizeof(*drp_evt); drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb); ielength = le16_to_cpu(drp_evt->ie_length); if (bytes_left != ielength) { dev_err(dev, "DRP event: Not enough data in payload [%zu" "bytes left, %zu declared in the event]\n", bytes_left, ielength); return 0; } memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr)); src_dev = uwb_dev_get_by_devaddr(rc, &src_addr); if (!src_dev) { /* * A DRP notification from an unrecognized device. * * This is probably from a WUSB device that doesn't * have an EUI-48 and therefore doesn't show up in the * UWB device database. It's safe to simply ignore * these. */ return 0; } mutex_lock(&rc->rsvs_mutex); /* We do not distinguish from the reason */ uwb_drp_process_all(rc, drp_evt, ielength, src_dev); mutex_unlock(&rc->rsvs_mutex); uwb_dev_put(src_dev); return 0; }
gpl-2.0
xiaokang1986/Kylin-kernel-3.4.10
drivers/net/fddi/skfp/smt.c
10639
52521
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #include "h/smt_p.h" #include <linux/bitrev.h> #include <linux/kernel.h> #define KERNEL #include "h/smtstate.h" #ifndef lint static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; #endif /* * FC in SMbuf */ #define m_fc(mb) ((mb)->sm_data[0]) #define SMT_TID_MAGIC 0x1f0a7b3c #ifdef DEBUG static const char *const smt_type_name[] = { "SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??", "SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??", "SMT_08??", "SMT_09??", "SMT_0A??", "SMT_0B??", "SMT_0C??", "SMT_0D??", "SMT_0E??", "SMT_NSA" } ; static const char *const smt_class_name[] = { "UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF", "SRF","PMF_GET","PMF_SET","ESF" } ; #endif #define LAST_CLASS (SMT_PMF_SET) static const struct fddi_addr SMT_Unknown = { { 0,0,0x1f,0,0,0 } } ; /* * function prototypes */ #ifdef LITTLE_ENDIAN static int smt_swap_short(u_short s); #endif static int mac_index(struct s_smc *smc, int mac); static int phy_index(struct s_smc *smc, int phy); static int mac_con_resource_index(struct s_smc *smc, int mac); static int phy_con_resource_index(struct s_smc *smc, int phy); static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, int local); static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest, int fc, u_long tid, int type, int local); static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc, u_long tid, int type, int len); static void smt_echo_test(struct s_smc *smc, int dna); static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local); static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local); #ifdef LITTLE_ENDIAN static void smt_string_swap(char *data, const char *format, int len); #endif static void smt_add_frame_len(SMbuf *mb, int len); static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una); static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde); static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state); static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts); static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy); static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency); static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor); static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path); static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st); static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy); static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers); static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc); static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc); static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc); static void smt_fill_manufacturer(struct s_smc *smc, struct smp_p_manufacturer *man); static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user); static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount); static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed, int len); static void smt_clear_una_dna(struct s_smc *smc); static void smt_clear_old_una_dna(struct s_smc *smc); #ifdef CONCENTRATOR static int entity_to_index(void); #endif static void update_dac(struct s_smc *smc, int report); static int div_ratio(u_long upper, u_long lower); #ifdef USE_CAN_ADDR static void hwm_conv_can(struct s_smc *smc, char *data, int len); #else #define hwm_conv_can(smc,data,len) #endif static inline int is_my_addr(const struct s_smc *smc, const struct fddi_addr *addr) { return(*(short *)(&addr->a[0]) == *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[0]) && *(short *)(&addr->a[2]) == *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[2]) && *(short *)(&addr->a[4]) == *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[4])) ; } static inline int is_broadcast(const struct fddi_addr *addr) { return *(u_short *)(&addr->a[0]) == 0xffff && *(u_short *)(&addr->a[2]) == 0xffff && *(u_short *)(&addr->a[4]) == 0xffff; } static inline int is_individual(const struct fddi_addr *addr) { return !(addr->a[0] & GROUP_ADDR); } static inline int is_equal(const struct fddi_addr *addr1, const struct fddi_addr *addr2) { return *(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) && *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) && *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]); } /* * list of mandatory paras in frames */ static const u_short plist_nif[] = { SMT_P_UNA,SMT_P_SDE,SMT_P_STATE,0 } ; /* * init SMT agent */ void smt_agent_init(struct s_smc *smc) { int i ; /* * get MAC address */ smc->mib.m[MAC0].fddiMACSMTAddress = smc->hw.fddi_home_addr ; /* * get OUI address from driver (bia == built-in-address) */ smc->mib.fddiSMTStationId.sid_oem[0] = 0 ; smc->mib.fddiSMTStationId.sid_oem[1] = 0 ; driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ; for (i = 0 ; i < 6 ; i ++) { smc->mib.fddiSMTStationId.sid_node.a[i] = bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]); } smc->mib.fddiSMTManufacturerData[0] = smc->mib.fddiSMTStationId.sid_node.a[0] ; smc->mib.fddiSMTManufacturerData[1] = smc->mib.fddiSMTStationId.sid_node.a[1] ; smc->mib.fddiSMTManufacturerData[2] = smc->mib.fddiSMTStationId.sid_node.a[2] ; smc->sm.smt_tid = 0 ; smc->mib.m[MAC0].fddiMACDupAddressTest = DA_NONE ; smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ; #ifndef SLIM_SMT smt_clear_una_dna(smc) ; smt_clear_old_una_dna(smc) ; #endif for (i = 0 ; i < SMT_MAX_TEST ; i++) smc->sm.pend[i] = 0 ; smc->sm.please_reconnect = 0 ; smc->sm.uniq_ticks = 0 ; } /* * SMT task * forever * delay 30 seconds * send NIF * check tvu & tvd * end */ void smt_agent_task(struct s_smc *smc) { smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L, EV_TOKEN(EVENT_SMT,SM_TIMER)) ; DB_SMT("SMT agent task\n",0,0) ; } #ifndef SMT_REAL_TOKEN_CT void smt_emulate_token_ct(struct s_smc *smc, int mac_index) { u_long count; u_long time; time = smt_get_time(); count = ((time - smc->sm.last_tok_time[mac_index]) * 100)/TICKS_PER_SECOND; /* * Only when ring is up we will have a token count. The * flag is unfortunately a single instance value. This * doesn't matter now, because we currently have only * one MAC instance. */ if (smc->hw.mac_ring_is_up){ smc->mib.m[mac_index].fddiMACToken_Ct += count; } /* Remember current time */ smc->sm.last_tok_time[mac_index] = time; } #endif /*ARGSUSED1*/ void smt_event(struct s_smc *smc, int event) { u_long time ; #ifndef SMT_REAL_TOKEN_CT int i ; #endif if (smc->sm.please_reconnect) { smc->sm.please_reconnect -- ; if (smc->sm.please_reconnect == 0) { /* Counted down */ queue_event(smc,EVENT_ECM,EC_CONNECT) ; } } if (event == SM_FAST) return ; /* * timer for periodic cleanup in driver * reset and start the watchdog (FM2) * ESS timer * SBA timer */ smt_timer_poll(smc) ; smt_start_watchdog(smc) ; #ifndef SLIM_SMT #ifndef BOOT #ifdef ESS ess_timer_poll(smc) ; #endif #endif #ifdef SBA sba_timer_poll(smc) ; #endif smt_srf_event(smc,0,0,0) ; #endif /* no SLIM_SMT */ time = smt_get_time() ; if (time - smc->sm.smt_last_lem >= TICKS_PER_SECOND*8) { /* * Use 8 sec. for the time intervall, it simplifies the * LER estimation. */ struct fddi_mib_m *mib ; u_long upper ; u_long lower ; int cond ; int port; struct s_phy *phy ; /* * calculate LEM bit error rate */ sm_lem_evaluate(smc) ; smc->sm.smt_last_lem = time ; /* * check conditions */ #ifndef SLIM_SMT mac_update_counter(smc) ; mib = smc->mib.m ; upper = (mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) + (mib->fddiMACError_Ct - mib->fddiMACOld_Error_Ct) ; lower = (mib->fddiMACFrame_Ct - mib->fddiMACOld_Frame_Ct) + (mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) ; mib->fddiMACFrameErrorRatio = div_ratio(upper,lower) ; cond = ((!mib->fddiMACFrameErrorThreshold && mib->fddiMACError_Ct != mib->fddiMACOld_Error_Ct) || (mib->fddiMACFrameErrorRatio > mib->fddiMACFrameErrorThreshold)) ; if (cond != mib->fddiMACFrameErrorFlag) smt_srf_event(smc,SMT_COND_MAC_FRAME_ERROR, INDEX_MAC,cond) ; upper = (mib->fddiMACNotCopied_Ct - mib->fddiMACOld_NotCopied_Ct) ; lower = upper + (mib->fddiMACCopied_Ct - mib->fddiMACOld_Copied_Ct) ; mib->fddiMACNotCopiedRatio = div_ratio(upper,lower) ; cond = ((!mib->fddiMACNotCopiedThreshold && mib->fddiMACNotCopied_Ct != mib->fddiMACOld_NotCopied_Ct)|| (mib->fddiMACNotCopiedRatio > mib->fddiMACNotCopiedThreshold)) ; if (cond != mib->fddiMACNotCopiedFlag) smt_srf_event(smc,SMT_COND_MAC_NOT_COPIED, INDEX_MAC,cond) ; /* * set old values */ mib->fddiMACOld_Frame_Ct = mib->fddiMACFrame_Ct ; mib->fddiMACOld_Copied_Ct = mib->fddiMACCopied_Ct ; mib->fddiMACOld_Error_Ct = mib->fddiMACError_Ct ; mib->fddiMACOld_Lost_Ct = mib->fddiMACLost_Ct ; mib->fddiMACOld_NotCopied_Ct = mib->fddiMACNotCopied_Ct ; /* * Check port EBError Condition */ for (port = 0; port < NUMPHYS; port ++) { phy = &smc->y[port] ; if (!phy->mib->fddiPORTHardwarePresent) { continue; } cond = (phy->mib->fddiPORTEBError_Ct - phy->mib->fddiPORTOldEBError_Ct > 5) ; /* If ratio is more than 5 in 8 seconds * Set the condition. */ smt_srf_event(smc,SMT_COND_PORT_EB_ERROR, (int) (INDEX_PORT+ phy->np) ,cond) ; /* * set old values */ phy->mib->fddiPORTOldEBError_Ct = phy->mib->fddiPORTEBError_Ct ; } #endif /* no SLIM_SMT */ } #ifndef SLIM_SMT if (time - smc->sm.smt_last_notify >= (u_long) (smc->mib.fddiSMTTT_Notify * TICKS_PER_SECOND) ) { /* * we can either send an announcement or a request * a request will trigger a reply so that we can update * our dna * note: same tid must be used until reply is received */ if (!smc->sm.pend[SMT_TID_NIF]) smc->sm.pend[SMT_TID_NIF] = smt_get_tid(smc) ; smt_send_nif(smc,&fddi_broadcast, FC_SMT_NSA, smc->sm.pend[SMT_TID_NIF], SMT_REQUEST,0) ; smc->sm.smt_last_notify = time ; } /* * check timer */ if (smc->sm.smt_tvu && time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) { DB_SMT("SMT : UNA expired\n",0,0) ; smc->sm.smt_tvu = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldUpstreamNbr= smc->mib.m[MAC0].fddiMACUpstreamNbr ; } smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ; smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ; /* * Make sure the fddiMACUNDA_Flag = FALSE is * included in the SRF so we don't generate * a separate SRF for the deassertion of this * condition */ update_dac(smc,0) ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; } if (smc->sm.smt_tvd && time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) { DB_SMT("SMT : DNA expired\n",0,0) ; smc->sm.smt_tvd = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldDownstreamNbr= smc->mib.m[MAC0].fddiMACDownstreamNbr ; } smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; } #endif /* no SLIM_SMT */ #ifndef SMT_REAL_TOKEN_CT /* * Token counter emulation section. If hardware supports the token * count, the token counter will be updated in mac_update_counter. */ for (i = MAC0; i < NUMMACS; i++ ){ if (time - smc->sm.last_tok_time[i] > 2*TICKS_PER_SECOND ){ smt_emulate_token_ct( smc, i ); } } #endif smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L, EV_TOKEN(EVENT_SMT,SM_TIMER)) ; } static int div_ratio(u_long upper, u_long lower) { if ((upper<<16L) < upper) upper = 0xffff0000L ; else upper <<= 16L ; if (!lower) return 0; return (int)(upper/lower) ; } #ifndef SLIM_SMT /* * receive packet handler */ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) /* int fs; frame status */ { struct smt_header *sm ; int local ; int illegal = 0 ; switch (m_fc(mb)) { case FC_SMT_INFO : case FC_SMT_LAN_LOC : case FC_SMT_LOC : case FC_SMT_NSA : break ; default : smt_free_mbuf(smc,mb) ; return ; } smc->mib.m[MAC0].fddiMACSMTCopied_Ct++ ; sm = smtod(mb,struct smt_header *) ; local = ((fs & L_INDICATOR) != 0) ; hwm_conv_can(smc,(char *)sm,12) ; /* check destination address */ if (is_individual(&sm->smt_dest) && !is_my_addr(smc,&sm->smt_dest)) { smt_free_mbuf(smc,mb) ; return ; } #if 0 /* for DUP recognition, do NOT filter them */ /* ignore loop back packets */ if (is_my_addr(smc,&sm->smt_source) && !local) { smt_free_mbuf(smc,mb) ; return ; } #endif smt_swap_para(sm,(int) mb->sm_len,1) ; DB_SMT("SMT : received packet [%s] at 0x%x\n", smt_type_name[m_fc(mb) & 0xf],sm) ; DB_SMT("SMT : version %d, class %s\n",sm->smt_version, smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ; #ifdef SBA /* * check if NSA frame */ if (m_fc(mb) == FC_SMT_NSA && sm->smt_class == SMT_NIF && (sm->smt_type == SMT_ANNOUNCE || sm->smt_type == SMT_REQUEST)) { smc->sba.sm = sm ; sba(smc,NIF) ; } #endif /* * ignore any packet with NSA and A-indicator set */ if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) { DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n", addr_to_string(&sm->smt_source),0) ; smt_free_mbuf(smc,mb) ; return ; } /* * ignore frames with illegal length */ if (((sm->smt_class == SMT_ECF) && (sm->smt_len > SMT_MAX_ECHO_LEN)) || ((sm->smt_class != SMT_ECF) && (sm->smt_len > SMT_MAX_INFO_LEN))) { smt_free_mbuf(smc,mb) ; return ; } /* * check SMT version */ switch (sm->smt_class) { case SMT_NIF : case SMT_SIF_CONFIG : case SMT_SIF_OPER : case SMT_ECF : if (sm->smt_version != SMT_VID) illegal = 1; break ; default : if (sm->smt_version != SMT_VID_2) illegal = 1; break ; } if (illegal) { DB_SMT("SMT : version = %d, dest = %s\n", sm->smt_version,addr_to_string(&sm->smt_source)) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ; smt_free_mbuf(smc,mb) ; return ; } if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) || ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) { DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ; smt_free_mbuf(smc,mb) ; return ; } switch (sm->smt_class) { case SMT_NIF : if (smt_check_para(smc,sm,plist_nif)) { DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ; break ; } switch (sm->smt_type) { case SMT_ANNOUNCE : case SMT_REQUEST : if (!(fs & C_INDICATOR) && m_fc(mb) == FC_SMT_NSA && is_broadcast(&sm->smt_dest)) { struct smt_p_state *st ; /* set my UNA */ if (!is_equal( &smc->mib.m[MAC0].fddiMACUpstreamNbr, &sm->smt_source)) { DB_SMT("SMT : updated my UNA = %s\n", addr_to_string(&sm->smt_source),0) ; if (!is_equal(&smc->mib.m[MAC0]. fddiMACUpstreamNbr,&SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldUpstreamNbr= smc->mib.m[MAC0].fddiMACUpstreamNbr ; } smc->mib.m[MAC0].fddiMACUpstreamNbr = sm->smt_source ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; smt_echo_test(smc,0) ; } smc->sm.smt_tvu = smt_get_time() ; st = (struct smt_p_state *) sm_to_para(smc,sm,SMT_P_STATE) ; if (st) { smc->mib.m[MAC0].fddiMACUNDA_Flag = (st->st_dupl_addr & SMT_ST_MY_DUPA) ? TRUE : FALSE ; update_dac(smc,1) ; } } if ((sm->smt_type == SMT_REQUEST) && is_individual(&sm->smt_source) && ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) || (m_fc(mb) != FC_SMT_NSA))) { DB_SMT("SMT : replying to NIF request %s\n", addr_to_string(&sm->smt_source),0) ; smt_send_nif(smc,&sm->smt_source, FC_SMT_INFO, sm->smt_tid, SMT_REPLY,local) ; } break ; case SMT_REPLY : DB_SMT("SMT : received NIF response from %s\n", addr_to_string(&sm->smt_source),0) ; if (fs & A_INDICATOR) { smc->sm.pend[SMT_TID_NIF] = 0 ; DB_SMT("SMT : duplicate address\n",0,0) ; smc->mib.m[MAC0].fddiMACDupAddressTest = DA_FAILED ; smc->r.dup_addr_test = DA_FAILED ; queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ; smc->mib.m[MAC0].fddiMACDA_Flag = TRUE ; update_dac(smc,1) ; break ; } if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF]) { smc->sm.pend[SMT_TID_NIF] = 0 ; /* set my DNA */ if (!is_equal( &smc->mib.m[MAC0].fddiMACDownstreamNbr, &sm->smt_source)) { DB_SMT("SMT : updated my DNA\n",0,0) ; if (!is_equal(&smc->mib.m[MAC0]. fddiMACDownstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ smc->mib.m[MAC0].fddiMACOldDownstreamNbr = smc->mib.m[MAC0].fddiMACDownstreamNbr ; } smc->mib.m[MAC0].fddiMACDownstreamNbr = sm->smt_source ; smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC,0) ; smt_echo_test(smc,1) ; } smc->mib.m[MAC0].fddiMACDA_Flag = FALSE ; update_dac(smc,1) ; smc->sm.smt_tvd = smt_get_time() ; smc->mib.m[MAC0].fddiMACDupAddressTest = DA_PASSED ; if (smc->r.dup_addr_test != DA_PASSED) { smc->r.dup_addr_test = DA_PASSED ; queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ; } } else if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF_TEST]) { DB_SMT("SMT : NIF test TID ok\n",0,0) ; } else { DB_SMT("SMT : expected TID %lx, got %lx\n", smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ; } break ; default : illegal = 2 ; break ; } break ; case SMT_SIF_CONFIG : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; DB_SMT("SMT : replying to SIF Config request from %s\n", addr_to_string(&sm->smt_source),0) ; smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_SIF_OPER : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; DB_SMT("SMT : replying to SIF Operation request from %s\n", addr_to_string(&sm->smt_source),0) ; smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_ECF : /* echo frame */ switch (sm->smt_type) { case SMT_REPLY : smc->mib.priv.fddiPRIVECF_Reply_Rx++ ; DB_SMT("SMT: received ECF reply from %s\n", addr_to_string(&sm->smt_source),0) ; if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) { DB_SMT("SMT: ECHODATA missing\n",0,0) ; break ; } if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) { DB_SMT("SMT : ECF test TID ok\n",0,0) ; } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) { DB_SMT("SMT : ECF test UNA ok\n",0,0) ; } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) { DB_SMT("SMT : ECF test DNA ok\n",0,0) ; } else { DB_SMT("SMT : expected TID %lx, got %lx\n", smc->sm.pend[SMT_TID_ECF], sm->smt_tid) ; } break ; case SMT_REQUEST : smc->mib.priv.fddiPRIVECF_Req_Rx++ ; { if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) { DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH, local) ; break ; } DB_SMT("SMT - sending ECF reply to %s\n", addr_to_string(&sm->smt_source),0) ; /* set destination addr. & reply */ sm->smt_dest = sm->smt_source ; sm->smt_type = SMT_REPLY ; dump_smt(smc,sm,"ECF REPLY") ; smc->mib.priv.fddiPRIVECF_Reply_Tx++ ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; return ; /* DON'T free mbuf */ } default : illegal = 1 ; break ; } break ; #ifndef BOOT case SMT_RAF : /* resource allocation */ #ifdef ESS DB_ESSN(2,"ESS: RAF frame received\n",0,0) ; fs = ess_raf_received_pack(smc,mb,sm,fs) ; #endif #ifdef SBA DB_SBAN(2,"SBA: RAF frame received\n",0,0) ; sba_raf_received_pack(smc,sm,fs) ; #endif break ; case SMT_RDF : /* request denied */ smc->mib.priv.fddiPRIVRDF_Rx++ ; break ; case SMT_ESF : /* extended service - not supported */ if (sm->smt_type == SMT_REQUEST) { DB_SMT("SMT - received ESF, sending RDF\n",0,0) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; } break ; case SMT_PMF_GET : case SMT_PMF_SET : if (sm->smt_type != SMT_REQUEST) break ; /* update statistics */ if (sm->smt_class == SMT_PMF_GET) smc->mib.priv.fddiPRIVPMF_Get_Rx++ ; else smc->mib.priv.fddiPRIVPMF_Set_Rx++ ; /* * ignore PMF SET with I/G set */ if ((sm->smt_class == SMT_PMF_SET) && !is_individual(&sm->smt_dest)) { DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ; break ; } smt_pmf_received_pack(smc,mb, local) ; break ; case SMT_SRF : dump_smt(smc,sm,"SRF received") ; break ; default : if (sm->smt_type != SMT_REQUEST) break ; /* * For frames with unknown class: * we need to send a RDF frame according to 8.1.3.1.1, * only if it is a REQUEST. */ DB_SMT("SMT : class = %d, send RDF to %s\n", sm->smt_class, addr_to_string(&sm->smt_source)) ; smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; break ; #endif } if (illegal) { DB_SMT("SMT: discarding invalid frame, reason = %d\n", illegal,0) ; } smt_free_mbuf(smc,mb) ; } static void update_dac(struct s_smc *smc, int report) { int cond ; cond = ( smc->mib.m[MAC0].fddiMACUNDA_Flag | smc->mib.m[MAC0].fddiMACDA_Flag) != 0 ; if (report && (cond != smc->mib.m[MAC0].fddiMACDuplicateAddressCond)) smt_srf_event(smc, SMT_COND_MAC_DUP_ADDR,INDEX_MAC,cond) ; else smc->mib.m[MAC0].fddiMACDuplicateAddressCond = cond ; } /* * send SMT frame * set source address * set station ID * send frame */ void smt_send_frame(struct s_smc *smc, SMbuf *mb, int fc, int local) /* SMbuf *mb; buffer to send */ /* int fc; FC value */ { struct smt_header *sm ; if (!smc->r.sm_ma_avail && !local) { smt_free_mbuf(smc,mb) ; return ; } sm = smtod(mb,struct smt_header *) ; sm->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ; sm->smt_sid = smc->mib.fddiSMTStationId ; smt_swap_para(sm,(int) mb->sm_len,0) ; /* swap para & header */ hwm_conv_can(smc,(char *)sm,12) ; /* convert SA and DA */ smc->mib.m[MAC0].fddiMACSMTTransmit_Ct++ ; smt_send_mbuf(smc,mb,local ? FC_SMT_LOC : fc) ; } /* * generate and send RDF */ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, int local) /* SMbuf *rej; mbuf of offending frame */ /* int fc; FC of denied frame */ /* int reason; reason code */ { SMbuf *mb ; struct smt_header *sm ; /* header of offending frame */ struct smt_rdf *rdf ; int len ; int frame_len ; sm = smtod(rej,struct smt_header *) ; if (sm->smt_type != SMT_REQUEST) return ; DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n", addr_to_string(&sm->smt_source),reason) ; /* * note: get framelength from MAC length, NOT from SMT header * smt header length is included in sm_len */ frame_len = rej->sm_len ; if (!(mb=smt_build_frame(smc,SMT_RDF,SMT_REPLY,sizeof(struct smt_rdf)))) return ; rdf = smtod(mb,struct smt_rdf *) ; rdf->smt.smt_tid = sm->smt_tid ; /* use TID from sm */ rdf->smt.smt_dest = sm->smt_source ; /* set dest = source */ /* set P12 */ rdf->reason.para.p_type = SMT_P_REASON ; rdf->reason.para.p_len = sizeof(struct smt_p_reason) - PARA_LEN ; rdf->reason.rdf_reason = reason ; /* set P14 */ rdf->version.para.p_type = SMT_P_VERSION ; rdf->version.para.p_len = sizeof(struct smt_p_version) - PARA_LEN ; rdf->version.v_pad = 0 ; rdf->version.v_n = 1 ; rdf->version.v_index = 1 ; rdf->version.v_version[0] = SMT_VID_2 ; rdf->version.v_pad2 = 0 ; /* set P13 */ if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) + 2*sizeof(struct smt_header)) len = frame_len ; else len = SMT_MAX_INFO_LEN - sizeof(*rdf) + 2*sizeof(struct smt_header) ; /* make length multiple of 4 */ len &= ~3 ; rdf->refused.para.p_type = SMT_P_REFUSED ; /* length of para is smt_frame + ref_fc */ rdf->refused.para.p_len = len + 4 ; rdf->refused.ref_fc = fc ; /* swap it back */ smt_swap_para(sm,frame_len,0) ; memcpy((char *) &rdf->refused.ref_header,(char *) sm,len) ; len -= sizeof(struct smt_header) ; mb->sm_len += len ; rdf->smt.smt_len += len ; dump_smt(smc,(struct smt_header *)rdf,"RDF") ; smc->mib.priv.fddiPRIVRDF_Tx++ ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; } /* * generate and send NIF */ static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest, int fc, u_long tid, int type, int local) /* struct fddi_addr *dest; dest address */ /* int fc; frame control */ /* u_long tid; transaction id */ /* int type; frame type */ { struct smt_nif *nif ; SMbuf *mb ; if (!(mb = smt_build_frame(smc,SMT_NIF,type,sizeof(struct smt_nif)))) return ; nif = smtod(mb, struct smt_nif *) ; smt_fill_una(smc,&nif->una) ; /* set UNA */ smt_fill_sde(smc,&nif->sde) ; /* set station descriptor */ smt_fill_state(smc,&nif->state) ; /* set state information */ #ifdef SMT6_10 smt_fill_fsc(smc,&nif->fsc) ; /* set frame status cap. */ #endif nif->smt.smt_dest = *dest ; /* destination address */ nif->smt.smt_tid = tid ; /* transaction ID */ dump_smt(smc,(struct smt_header *)nif,"NIF") ; smt_send_frame(smc,mb,fc,local) ; } #ifdef DEBUG /* * send NIF request (test purpose) */ static void smt_send_nif_request(struct s_smc *smc, struct fddi_addr *dest) { smc->sm.pend[SMT_TID_NIF_TEST] = smt_get_tid(smc) ; smt_send_nif(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_NIF_TEST], SMT_REQUEST,0) ; } /* * send ECF request (test purpose) */ static void smt_send_ecf_request(struct s_smc *smc, struct fddi_addr *dest, int len) { smc->sm.pend[SMT_TID_ECF] = smt_get_tid(smc) ; smt_send_ecf(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_ECF], SMT_REQUEST,len) ; } #endif /* * echo test */ static void smt_echo_test(struct s_smc *smc, int dna) { u_long tid ; smc->sm.pend[dna ? SMT_TID_ECF_DNA : SMT_TID_ECF_UNA] = tid = smt_get_tid(smc) ; smt_send_ecf(smc, dna ? &smc->mib.m[MAC0].fddiMACDownstreamNbr : &smc->mib.m[MAC0].fddiMACUpstreamNbr, FC_SMT_INFO,tid, SMT_REQUEST, (SMT_TEST_ECHO_LEN & ~3)-8) ; } /* * generate and send ECF */ static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc, u_long tid, int type, int len) /* struct fddi_addr *dest; dest address */ /* int fc; frame control */ /* u_long tid; transaction id */ /* int type; frame type */ /* int len; frame length */ { struct smt_ecf *ecf ; SMbuf *mb ; if (!(mb = smt_build_frame(smc,SMT_ECF,type,SMT_ECF_LEN + len))) return ; ecf = smtod(mb, struct smt_ecf *) ; smt_fill_echo(smc,&ecf->ec_echo,tid,len) ; /* set ECHO */ ecf->smt.smt_dest = *dest ; /* destination address */ ecf->smt.smt_tid = tid ; /* transaction ID */ smc->mib.priv.fddiPRIVECF_Req_Tx++ ; smt_send_frame(smc,mb,fc,0) ; } /* * generate and send SIF config response */ static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local) /* struct fddi_addr *dest; dest address */ /* u_long tid; transaction id */ { struct smt_sif_config *sif ; SMbuf *mb ; int len ; if (!(mb = smt_build_frame(smc,SMT_SIF_CONFIG,SMT_REPLY, SIZEOF_SMT_SIF_CONFIG))) return ; sif = smtod(mb, struct smt_sif_config *) ; smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */ smt_fill_sde(smc,&sif->sde) ; /* set station descriptor */ smt_fill_version(smc,&sif->version) ; /* set version information */ smt_fill_state(smc,&sif->state) ; /* set state information */ smt_fill_policy(smc,&sif->policy) ; /* set station policy */ smt_fill_latency(smc,&sif->latency); /* set station latency */ smt_fill_neighbor(smc,&sif->neighbor); /* set station neighbor */ smt_fill_setcount(smc,&sif->setcount) ; /* set count */ len = smt_fill_path(smc,&sif->path); /* set station path descriptor*/ sif->smt.smt_dest = *dest ; /* destination address */ sif->smt.smt_tid = tid ; /* transaction ID */ smt_add_frame_len(mb,len) ; /* adjust length fields */ dump_smt(smc,(struct smt_header *)sif,"SIF Configuration Reply") ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; } /* * generate and send SIF operation response */ static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest, u_long tid, int local) /* struct fddi_addr *dest; dest address */ /* u_long tid; transaction id */ { struct smt_sif_operation *sif ; SMbuf *mb ; int ports ; int i ; ports = NUMPHYS ; #ifndef CONCENTRATOR if (smc->s.sas == SMT_SAS) ports = 1 ; #endif if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY, SIZEOF_SMT_SIF_OPERATION+ports*sizeof(struct smt_p_lem)))) return ; sif = smtod(mb, struct smt_sif_operation *) ; smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */ smt_fill_mac_status(smc,&sif->status) ; /* set mac status */ smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */ smt_fill_mac_fnc(smc,&sif->fnc) ; /* set frame not copied counter */ smt_fill_manufacturer(smc,&sif->man) ; /* set manufacturer field */ smt_fill_user(smc,&sif->user) ; /* set user field */ smt_fill_setcount(smc,&sif->setcount) ; /* set count */ /* * set link error mon information */ if (ports == 1) { smt_fill_lem(smc,sif->lem,PS) ; } else { for (i = 0 ; i < ports ; i++) { smt_fill_lem(smc,&sif->lem[i],i) ; } } sif->smt.smt_dest = *dest ; /* destination address */ sif->smt.smt_tid = tid ; /* transaction ID */ dump_smt(smc,(struct smt_header *)sif,"SIF Operation Reply") ; smt_send_frame(smc,mb,FC_SMT_INFO,local) ; } /* * get and initialize SMT frame */ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type, int length) { SMbuf *mb ; struct smt_header *smt ; #if 0 if (!smc->r.sm_ma_avail) { return 0; } #endif if (!(mb = smt_get_mbuf(smc))) return mb; mb->sm_len = length ; smt = smtod(mb, struct smt_header *) ; smt->smt_dest = fddi_broadcast ; /* set dest = broadcast */ smt->smt_class = class ; smt->smt_type = type ; switch (class) { case SMT_NIF : case SMT_SIF_CONFIG : case SMT_SIF_OPER : case SMT_ECF : smt->smt_version = SMT_VID ; break ; default : smt->smt_version = SMT_VID_2 ; break ; } smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */ smt->smt_pad = 0 ; smt->smt_len = length - sizeof(struct smt_header) ; return mb; } static void smt_add_frame_len(SMbuf *mb, int len) { struct smt_header *smt ; smt = smtod(mb, struct smt_header *) ; smt->smt_len += len ; mb->sm_len += len ; } /* * fill values in UNA parameter */ static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una) { SMTSETPARA(una,SMT_P_UNA) ; una->una_pad = 0 ; una->una_node = smc->mib.m[MAC0].fddiMACUpstreamNbr ; } /* * fill values in SDE parameter */ static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde) { SMTSETPARA(sde,SMT_P_SDE) ; sde->sde_non_master = smc->mib.fddiSMTNonMaster_Ct ; sde->sde_master = smc->mib.fddiSMTMaster_Ct ; sde->sde_mac_count = NUMMACS ; /* only 1 MAC */ #ifdef CONCENTRATOR sde->sde_type = SMT_SDE_CONCENTRATOR ; #else sde->sde_type = SMT_SDE_STATION ; #endif } /* * fill in values in station state parameter */ static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state) { int top ; int twist ; SMTSETPARA(state,SMT_P_STATE) ; state->st_pad = 0 ; /* determine topology */ top = 0 ; if (smc->mib.fddiSMTPeerWrapFlag) { top |= SMT_ST_WRAPPED ; /* state wrapped */ } #ifdef CONCENTRATOR if (cfm_status_unattached(smc)) { top |= SMT_ST_UNATTACHED ; /* unattached concentrator */ } #endif if ((twist = pcm_status_twisted(smc)) & 1) { top |= SMT_ST_TWISTED_A ; /* twisted cable */ } if (twist & 2) { top |= SMT_ST_TWISTED_B ; /* twisted cable */ } #ifdef OPT_SRF top |= SMT_ST_SRF ; #endif if (pcm_rooted_station(smc)) top |= SMT_ST_ROOTED_S ; if (smc->mib.a[0].fddiPATHSbaPayload != 0) top |= SMT_ST_SYNC_SERVICE ; state->st_topology = top ; state->st_dupl_addr = ((smc->mib.m[MAC0].fddiMACDA_Flag ? SMT_ST_MY_DUPA : 0 ) | (smc->mib.m[MAC0].fddiMACUNDA_Flag ? SMT_ST_UNA_DUPA : 0)) ; } /* * fill values in timestamp parameter */ static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts) { SMTSETPARA(ts,SMT_P_TIMESTAMP) ; smt_set_timestamp(smc,ts->ts_time) ; } void smt_set_timestamp(struct s_smc *smc, u_char *p) { u_long time ; u_long utime ; /* * timestamp is 64 bits long ; resolution is 80 nS * our clock resolution is 10mS * 10mS/80ns = 125000 ~ 2^17 = 131072 */ utime = smt_get_time() ; time = utime * 100 ; time /= TICKS_PER_SECOND ; p[0] = 0 ; p[1] = (u_char)((time>>(8+8+8+8-1)) & 1) ; p[2] = (u_char)(time>>(8+8+8-1)) ; p[3] = (u_char)(time>>(8+8-1)) ; p[4] = (u_char)(time>>(8-1)) ; p[5] = (u_char)(time<<1) ; p[6] = (u_char)(smc->sm.uniq_ticks>>8) ; p[7] = (u_char)smc->sm.uniq_ticks ; /* * make sure we don't wrap: restart whenever the upper digits change */ if (utime != smc->sm.uniq_time) { smc->sm.uniq_ticks = 0 ; } smc->sm.uniq_ticks++ ; smc->sm.uniq_time = utime ; } /* * fill values in station policy parameter */ static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy) { int i ; const u_char *map ; u_short in ; u_short out ; /* * MIB para 101b (fddiSMTConnectionPolicy) coding * is different from 0005 coding */ static const u_char ansi_weirdness[16] = { 0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15 } ; SMTSETPARA(policy,SMT_P_POLICY) ; out = 0 ; in = smc->mib.fddiSMTConnectionPolicy ; for (i = 0, map = ansi_weirdness ; i < 16 ; i++) { if (in & 1) out |= (1<<*map) ; in >>= 1 ; map++ ; } policy->pl_config = smc->mib.fddiSMTConfigPolicy ; policy->pl_connect = out ; } /* * fill values in latency equivalent parameter */ static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency) { SMTSETPARA(latency,SMT_P_LATENCY) ; latency->lt_phyout_idx1 = phy_index(smc,0) ; latency->lt_latency1 = 10 ; /* in octets (byte clock) */ /* * note: latency has two phy entries by definition * for a SAS, the 2nd one is null */ if (smc->s.sas == SMT_DAS) { latency->lt_phyout_idx2 = phy_index(smc,1) ; latency->lt_latency2 = 10 ; /* in octets (byte clock) */ } else { latency->lt_phyout_idx2 = 0 ; latency->lt_latency2 = 0 ; } } /* * fill values in MAC neighbors parameter */ static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor) { SMTSETPARA(neighbor,SMT_P_NEIGHBORS) ; neighbor->nb_mib_index = INDEX_MAC ; neighbor->nb_mac_index = mac_index(smc,1) ; neighbor->nb_una = smc->mib.m[MAC0].fddiMACUpstreamNbr ; neighbor->nb_dna = smc->mib.m[MAC0].fddiMACDownstreamNbr ; } /* * fill values in path descriptor */ #ifdef CONCENTRATOR #define ALLPHYS NUMPHYS #else #define ALLPHYS ((smc->s.sas == SMT_SAS) ? 1 : 2) #endif static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path) { SK_LOC_DECL(int,type) ; SK_LOC_DECL(int,state) ; SK_LOC_DECL(int,remote) ; SK_LOC_DECL(int,mac) ; int len ; int p ; int physp ; struct smt_phy_rec *phy ; struct smt_mac_rec *pd_mac ; len = PARA_LEN + sizeof(struct smt_mac_rec) * NUMMACS + sizeof(struct smt_phy_rec) * ALLPHYS ; path->para.p_type = SMT_P_PATH ; path->para.p_len = len - PARA_LEN ; /* PHYs */ for (p = 0,phy = path->pd_phy ; p < ALLPHYS ; p++, phy++) { physp = p ; #ifndef CONCENTRATOR if (smc->s.sas == SMT_SAS) physp = PS ; #endif pcm_status_state(smc,physp,&type,&state,&remote,&mac) ; #ifdef LITTLE_ENDIAN phy->phy_mib_index = smt_swap_short((u_short)p+INDEX_PORT) ; #else phy->phy_mib_index = p+INDEX_PORT ; #endif phy->phy_type = type ; phy->phy_connect_state = state ; phy->phy_remote_type = remote ; phy->phy_remote_mac = mac ; phy->phy_resource_idx = phy_con_resource_index(smc,p) ; } /* MAC */ pd_mac = (struct smt_mac_rec *) phy ; pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ; pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ; return len; } /* * fill values in mac status */ static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st) { SMTSETPARA(st,SMT_P_MAC_STATUS) ; st->st_mib_index = INDEX_MAC ; st->st_mac_index = mac_index(smc,1) ; mac_update_counter(smc) ; /* * timer values are represented in SMT as 2's complement numbers * units : internal : 2's complement BCLK */ st->st_t_req = smc->mib.m[MAC0].fddiMACT_Req ; st->st_t_neg = smc->mib.m[MAC0].fddiMACT_Neg ; st->st_t_max = smc->mib.m[MAC0].fddiMACT_Max ; st->st_tvx_value = smc->mib.m[MAC0].fddiMACTvxValue ; st->st_t_min = smc->mib.m[MAC0].fddiMACT_Min ; st->st_sba = smc->mib.a[PATH0].fddiPATHSbaPayload ; st->st_frame_ct = smc->mib.m[MAC0].fddiMACFrame_Ct ; st->st_error_ct = smc->mib.m[MAC0].fddiMACError_Ct ; st->st_lost_ct = smc->mib.m[MAC0].fddiMACLost_Ct ; } /* * fill values in LEM status */ static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy) { struct fddi_mib_p *mib ; mib = smc->y[phy].mib ; SMTSETPARA(lem,SMT_P_LEM) ; lem->lem_mib_index = phy+INDEX_PORT ; lem->lem_phy_index = phy_index(smc,phy) ; lem->lem_pad2 = 0 ; lem->lem_cutoff = mib->fddiPORTLer_Cutoff ; lem->lem_alarm = mib->fddiPORTLer_Alarm ; /* long term bit error rate */ lem->lem_estimate = mib->fddiPORTLer_Estimate ; /* # of rejected connections */ lem->lem_reject_ct = mib->fddiPORTLem_Reject_Ct ; lem->lem_ct = mib->fddiPORTLem_Ct ; /* total number of errors */ } /* * fill version parameter */ static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers) { SK_UNUSED(smc) ; SMTSETPARA(vers,SMT_P_VERSION) ; vers->v_pad = 0 ; vers->v_n = 1 ; /* one version is enough .. */ vers->v_index = 1 ; vers->v_version[0] = SMT_VID_2 ; vers->v_pad2 = 0 ; } #ifdef SMT6_10 /* * fill frame status capabilities */ /* * note: this para 200B is NOT in swap table, because it's also set in * PMF add_para */ static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc) { SK_UNUSED(smc) ; SMTSETPARA(fsc,SMT_P_FSC) ; fsc->fsc_pad0 = 0 ; fsc->fsc_mac_index = INDEX_MAC ; /* this is MIB ; MIB is NOT * mac_index ()i ! */ fsc->fsc_pad1 = 0 ; fsc->fsc_value = FSC_TYPE0 ; /* "normal" node */ #ifdef LITTLE_ENDIAN fsc->fsc_mac_index = smt_swap_short(INDEX_MAC) ; fsc->fsc_value = smt_swap_short(FSC_TYPE0) ; #endif } #endif /* * fill mac counter field */ static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc) { SMTSETPARA(mc,SMT_P_MAC_COUNTER) ; mc->mc_mib_index = INDEX_MAC ; mc->mc_index = mac_index(smc,1) ; mc->mc_receive_ct = smc->mib.m[MAC0].fddiMACCopied_Ct ; mc->mc_transmit_ct = smc->mib.m[MAC0].fddiMACTransmit_Ct ; } /* * fill mac frame not copied counter */ static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc) { SMTSETPARA(fnc,SMT_P_MAC_FNC) ; fnc->nc_mib_index = INDEX_MAC ; fnc->nc_index = mac_index(smc,1) ; fnc->nc_counter = smc->mib.m[MAC0].fddiMACNotCopied_Ct ; } /* * fill manufacturer field */ static void smt_fill_manufacturer(struct s_smc *smc, struct smp_p_manufacturer *man) { SMTSETPARA(man,SMT_P_MANUFACTURER) ; memcpy((char *) man->mf_data, (char *) smc->mib.fddiSMTManufacturerData, sizeof(man->mf_data)) ; } /* * fill user field */ static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user) { SMTSETPARA(user,SMT_P_USER) ; memcpy((char *) user->us_data, (char *) smc->mib.fddiSMTUserData, sizeof(user->us_data)) ; } /* * fill set count */ static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount) { SK_UNUSED(smc) ; SMTSETPARA(setcount,SMT_P_SETCOUNT) ; setcount->count = smc->mib.fddiSMTSetCount.count ; memcpy((char *)setcount->timestamp, (char *)smc->mib.fddiSMTSetCount.timestamp,8) ; } /* * fill echo data */ static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed, int len) { u_char *p ; SK_UNUSED(smc) ; SMTSETPARA(echo,SMT_P_ECHODATA) ; echo->para.p_len = len ; for (p = echo->ec_data ; len ; len--) { *p++ = (u_char) seed ; seed += 13 ; } } /* * clear DNA and UNA * called from CFM if configuration changes */ static void smt_clear_una_dna(struct s_smc *smc) { smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ; smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ; } static void smt_clear_old_una_dna(struct s_smc *smc) { smc->mib.m[MAC0].fddiMACOldUpstreamNbr = SMT_Unknown ; smc->mib.m[MAC0].fddiMACOldDownstreamNbr = SMT_Unknown ; } u_long smt_get_tid(struct s_smc *smc) { u_long tid ; while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0) ; return tid & 0x3fffffffL; } /* * table of parameter lengths */ static const struct smt_pdef { int ptype ; int plen ; const char *pswap ; } smt_pdef[] = { { SMT_P_UNA, sizeof(struct smt_p_una) , SWAP_SMT_P_UNA } , { SMT_P_SDE, sizeof(struct smt_p_sde) , SWAP_SMT_P_SDE } , { SMT_P_STATE, sizeof(struct smt_p_state) , SWAP_SMT_P_STATE } , { SMT_P_TIMESTAMP,sizeof(struct smt_p_timestamp) , SWAP_SMT_P_TIMESTAMP } , { SMT_P_POLICY, sizeof(struct smt_p_policy) , SWAP_SMT_P_POLICY } , { SMT_P_LATENCY, sizeof(struct smt_p_latency) , SWAP_SMT_P_LATENCY } , { SMT_P_NEIGHBORS,sizeof(struct smt_p_neighbor) , SWAP_SMT_P_NEIGHBORS } , { SMT_P_PATH, sizeof(struct smt_p_path) , SWAP_SMT_P_PATH } , { SMT_P_MAC_STATUS,sizeof(struct smt_p_mac_status) , SWAP_SMT_P_MAC_STATUS } , { SMT_P_LEM, sizeof(struct smt_p_lem) , SWAP_SMT_P_LEM } , { SMT_P_MAC_COUNTER,sizeof(struct smt_p_mac_counter) , SWAP_SMT_P_MAC_COUNTER } , { SMT_P_MAC_FNC,sizeof(struct smt_p_mac_fnc) , SWAP_SMT_P_MAC_FNC } , { SMT_P_PRIORITY,sizeof(struct smt_p_priority) , SWAP_SMT_P_PRIORITY } , { SMT_P_EB,sizeof(struct smt_p_eb) , SWAP_SMT_P_EB } , { SMT_P_MANUFACTURER,sizeof(struct smp_p_manufacturer) , SWAP_SMT_P_MANUFACTURER } , { SMT_P_REASON, sizeof(struct smt_p_reason) , SWAP_SMT_P_REASON } , { SMT_P_REFUSED, sizeof(struct smt_p_refused) , SWAP_SMT_P_REFUSED } , { SMT_P_VERSION, sizeof(struct smt_p_version) , SWAP_SMT_P_VERSION } , #ifdef ESS { SMT_P0015, sizeof(struct smt_p_0015) , SWAP_SMT_P0015 } , { SMT_P0016, sizeof(struct smt_p_0016) , SWAP_SMT_P0016 } , { SMT_P0017, sizeof(struct smt_p_0017) , SWAP_SMT_P0017 } , { SMT_P0018, sizeof(struct smt_p_0018) , SWAP_SMT_P0018 } , { SMT_P0019, sizeof(struct smt_p_0019) , SWAP_SMT_P0019 } , { SMT_P001A, sizeof(struct smt_p_001a) , SWAP_SMT_P001A } , { SMT_P001B, sizeof(struct smt_p_001b) , SWAP_SMT_P001B } , { SMT_P001C, sizeof(struct smt_p_001c) , SWAP_SMT_P001C } , { SMT_P001D, sizeof(struct smt_p_001d) , SWAP_SMT_P001D } , #endif #if 0 { SMT_P_FSC, sizeof(struct smt_p_fsc) , SWAP_SMT_P_FSC } , #endif { SMT_P_SETCOUNT,0, SWAP_SMT_P_SETCOUNT } , { SMT_P1048, 0, SWAP_SMT_P1048 } , { SMT_P208C, 0, SWAP_SMT_P208C } , { SMT_P208D, 0, SWAP_SMT_P208D } , { SMT_P208E, 0, SWAP_SMT_P208E } , { SMT_P208F, 0, SWAP_SMT_P208F } , { SMT_P2090, 0, SWAP_SMT_P2090 } , #ifdef ESS { SMT_P320B, sizeof(struct smt_p_320b) , SWAP_SMT_P320B } , { SMT_P320F, sizeof(struct smt_p_320f) , SWAP_SMT_P320F } , { SMT_P3210, sizeof(struct smt_p_3210) , SWAP_SMT_P3210 } , #endif { SMT_P4050, 0, SWAP_SMT_P4050 } , { SMT_P4051, 0, SWAP_SMT_P4051 } , { SMT_P4052, 0, SWAP_SMT_P4052 } , { SMT_P4053, 0, SWAP_SMT_P4053 } , } ; #define N_SMT_PLEN ARRAY_SIZE(smt_pdef) int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short list[]) { const u_short *p = list ; while (*p) { if (!sm_to_para(smc,sm,(int) *p)) { DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0); return -1; } p++ ; } return 0; } void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para) { char *p ; int len ; int plen ; void *found = NULL; SK_UNUSED(smc) ; len = sm->smt_len ; p = (char *)(sm+1) ; /* pointer to info */ while (len > 0 ) { if (((struct smt_para *)p)->p_type == para) found = (void *) p ; plen = ((struct smt_para *)p)->p_len + PARA_LEN ; p += plen ; len -= plen ; if (len < 0) { DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ; return NULL; } if ((plen & 3) && (para != SMT_P_ECHODATA)) { DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ; return NULL; } if (found) return found; } return NULL; } #if 0 /* * send ANTC data test frame */ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest) { SK_UNUSED(smc) ; SK_UNUSED(dest) ; #if 0 SMbuf *mb ; struct smt_header *smt ; int i ; char *p ; mb = smt_get_mbuf() ; mb->sm_len = 3000+12 ; p = smtod(mb, char *) + 12 ; for (i = 0 ; i < 3000 ; i++) *p++ = 1 << (i&7) ; smt = smtod(mb, struct smt_header *) ; smt->smt_dest = *dest ; smt->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ; smt_send_mbuf(smc,mb,FC_ASYNC_LLC) ; #endif } #endif #ifdef DEBUG char *addr_to_string(struct fddi_addr *addr) { int i ; static char string[6*3] = "****" ; for (i = 0 ; i < 6 ; i++) { string[i * 3] = hex_asc_hi(addr->a[i]); string[i * 3 + 1] = hex_asc_lo(addr->a[i]); string[i * 3 + 2] = ':'; } string[5 * 3 + 2] = 0; return string; } #endif #ifdef AM29K int smt_ifconfig(int argc, char *argv[]) { if (argc >= 2 && !strcmp(argv[0],"opt_bypass") && !strcmp(argv[1],"yes")) { smc->mib.fddiSMTBypassPresent = 1 ; return 0; } return amdfddi_config(0, argc, argv); } #endif /* * return static mac index */ static int mac_index(struct s_smc *smc, int mac) { SK_UNUSED(mac) ; #ifdef CONCENTRATOR SK_UNUSED(smc) ; return NUMPHYS + 1; #else return (smc->s.sas == SMT_SAS) ? 2 : 3; #endif } /* * return static phy index */ static int phy_index(struct s_smc *smc, int phy) { SK_UNUSED(smc) ; return phy + 1; } /* * return dynamic mac connection resource index */ static int mac_con_resource_index(struct s_smc *smc, int mac) { #ifdef CONCENTRATOR SK_UNUSED(smc) ; SK_UNUSED(mac) ; return entity_to_index(smc, cem_get_downstream(smc, ENTITY_MAC)); #else SK_UNUSED(mac) ; switch (smc->mib.fddiSMTCF_State) { case SC9_C_WRAP_A : case SC5_THRU_B : case SC11_C_WRAP_S : return 1; case SC10_C_WRAP_B : case SC4_THRU_A : return 2; } return smc->s.sas == SMT_SAS ? 2 : 3; #endif } /* * return dynamic phy connection resource index */ static int phy_con_resource_index(struct s_smc *smc, int phy) { #ifdef CONCENTRATOR return entity_to_index(smc, cem_get_downstream(smc, ENTITY_PHY(phy))) ; #else switch (smc->mib.fddiSMTCF_State) { case SC9_C_WRAP_A : return phy == PA ? 3 : 2; case SC10_C_WRAP_B : return phy == PA ? 1 : 3; case SC4_THRU_A : return phy == PA ? 3 : 1; case SC5_THRU_B : return phy == PA ? 2 : 3; case SC11_C_WRAP_S : return 2; } return phy; #endif } #ifdef CONCENTRATOR static int entity_to_index(struct s_smc *smc, int e) { if (e == ENTITY_MAC) return mac_index(smc, 1); else return phy_index(smc, e - ENTITY_PHY(0)); } #endif #ifdef LITTLE_ENDIAN static int smt_swap_short(u_short s) { return ((s>>8)&0xff) | ((s&0xff)<<8); } void smt_swap_para(struct smt_header *sm, int len, int direction) /* int direction; 0 encode 1 decode */ { struct smt_para *pa ; const struct smt_pdef *pd ; char *p ; int plen ; int type ; int i ; /* printf("smt_swap_para sm %x len %d dir %d\n", sm,len,direction) ; */ smt_string_swap((char *)sm,SWAP_SMTHEADER,len) ; /* swap args */ len -= sizeof(struct smt_header) ; p = (char *) (sm + 1) ; while (len > 0) { pa = (struct smt_para *) p ; plen = pa->p_len ; type = pa->p_type ; pa->p_type = smt_swap_short(pa->p_type) ; pa->p_len = smt_swap_short(pa->p_len) ; if (direction) { plen = pa->p_len ; type = pa->p_type ; } /* * note: paras can have 0 length ! */ if (plen < 0) break ; plen += PARA_LEN ; for (i = N_SMT_PLEN, pd = smt_pdef; i ; i--,pd++) { if (pd->ptype == type) break ; } if (i && pd->pswap) { smt_string_swap(p+PARA_LEN,pd->pswap,len) ; } len -= plen ; p += plen ; } } static void smt_string_swap(char *data, const char *format, int len) { const char *open_paren = NULL ; int x ; while (len > 0 && *format) { switch (*format) { case '[' : open_paren = format ; break ; case ']' : format = open_paren ; break ; case '1' : case '2' : case '3' : case '4' : case '5' : case '6' : case '7' : case '8' : case '9' : data += *format - '0' ; len -= *format - '0' ; break ; case 'c': data++ ; len-- ; break ; case 's' : x = data[0] ; data[0] = data[1] ; data[1] = x ; data += 2 ; len -= 2 ; break ; case 'l' : x = data[0] ; data[0] = data[3] ; data[3] = x ; x = data[1] ; data[1] = data[2] ; data[2] = x ; data += 4 ; len -= 4 ; break ; } format++ ; } } #else void smt_swap_para(struct smt_header *sm, int len, int direction) /* int direction; 0 encode 1 decode */ { SK_UNUSED(sm) ; SK_UNUSED(len) ; SK_UNUSED(direction) ; } #endif /* * PMF actions */ int smt_action(struct s_smc *smc, int class, int code, int index) { int event ; int port ; DB_SMT("SMT: action %d code %d\n",class,code) ; switch(class) { case SMT_STATION_ACTION : switch(code) { case SMT_STATION_ACTION_CONNECT : smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ; queue_event(smc,EVENT_ECM,EC_CONNECT) ; break ; case SMT_STATION_ACTION_DISCONNECT : queue_event(smc,EVENT_ECM,EC_DISCONNECT) ; smc->mib.fddiSMTRemoteDisconnectFlag = TRUE ; RS_SET(smc,RS_DISCONNECT) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc)); break ; case SMT_STATION_ACTION_PATHTEST : AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_PATH_TEST, smt_get_event_word(smc)); break ; case SMT_STATION_ACTION_SELFTEST : AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_SELF_TEST, smt_get_event_word(smc)); break ; case SMT_STATION_ACTION_DISABLE_A : if (smc->y[PA].pc_mode == PM_PEER) { RS_SET(smc,RS_EVENT) ; queue_event(smc,EVENT_PCM+PA,PC_DISABLE) ; } break ; case SMT_STATION_ACTION_DISABLE_B : if (smc->y[PB].pc_mode == PM_PEER) { RS_SET(smc,RS_EVENT) ; queue_event(smc,EVENT_PCM+PB,PC_DISABLE) ; } break ; case SMT_STATION_ACTION_DISABLE_M : for (port = 0 ; port < NUMPHYS ; port++) { if (smc->mib.p[port].fddiPORTMy_Type != TM) continue ; RS_SET(smc,RS_EVENT) ; queue_event(smc,EVENT_PCM+port,PC_DISABLE) ; } break ; default : return 1; } break ; case SMT_PORT_ACTION : switch(code) { case SMT_PORT_ACTION_ENABLE : event = PC_ENABLE ; break ; case SMT_PORT_ACTION_DISABLE : event = PC_DISABLE ; break ; case SMT_PORT_ACTION_MAINT : event = PC_MAINT ; break ; case SMT_PORT_ACTION_START : event = PC_START ; break ; case SMT_PORT_ACTION_STOP : event = PC_STOP ; break ; default : return 1; } queue_event(smc,EVENT_PCM+index,event) ; break ; default : return 1; } return 0; } /* * canonical conversion of <len> bytes beginning form *data */ #ifdef USE_CAN_ADDR static void hwm_conv_can(struct s_smc *smc, char *data, int len) { int i ; SK_UNUSED(smc) ; for (i = len; i ; i--, data++) *data = bitrev8(*data); } #endif #endif /* no SLIM_SMT */
gpl-2.0
GuoqingJiang/linux
drivers/staging/vt6656/int.c
144
4717
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: int.c * * Purpose: Handle USB interrupt endpoint * * Author: Jerry Chen * * Date: Apr. 2, 2004 * * Functions: * * Revision History: * 04-02-2004 Jerry Chen: Initial release * */ #include "int.h" #include "mac.h" #include "power.h" #include "usbpipe.h" static const u8 fallback_rate0[5][5] = { {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M}, {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M}, {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M}, {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M}, {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M} }; static const u8 fallback_rate1[5][5] = { {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M}, {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M}, {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M}, {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M}, {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M} }; void vnt_int_start_interrupt(struct vnt_private *priv) { unsigned long flags; int status; dev_dbg(&priv->usb->dev, "---->Interrupt Polling Thread\n"); spin_lock_irqsave(&priv->lock, flags); status = vnt_start_interrupt_urb(priv); spin_unlock_irqrestore(&priv->lock, flags); } static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr) { struct vnt_usb_send_context *context; struct ieee80211_tx_info *info; struct ieee80211_rate *rate; u8 tx_retry = (tsr & 0xf0) >> 4; s8 idx; if (pkt_no >= priv->num_tx_context) return -EINVAL; context = priv->tx_context[pkt_no]; if (!context->skb) return -EINVAL; info = IEEE80211_SKB_CB(context->skb); idx = info->control.rates[0].idx; if (context->fb_option && !(tsr & (TSR_TMO | TSR_RETRYTMO))) { u8 tx_rate; u8 retry = tx_retry; rate = ieee80211_get_tx_rate(priv->hw, info); tx_rate = rate->hw_value - RATE_18M; if (retry > 4) retry = 4; if (context->fb_option == AUTO_FB_0) tx_rate = fallback_rate0[tx_rate][retry]; else if (context->fb_option == AUTO_FB_1) tx_rate = fallback_rate1[tx_rate][retry]; if (info->band == IEEE80211_BAND_5GHZ) idx = tx_rate - RATE_6M; else idx = tx_rate; } ieee80211_tx_info_clear_status(info); info->status.rates[0].count = tx_retry; if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) { info->status.rates[0].idx = idx; info->flags |= IEEE80211_TX_STAT_ACK; } ieee80211_tx_status_irqsafe(priv->hw, context->skb); context->in_use = false; return 0; } void vnt_int_process_data(struct vnt_private *priv) { struct vnt_interrupt_data *int_data; struct ieee80211_low_level_stats *low_stats = &priv->low_stats; dev_dbg(&priv->usb->dev, "---->s_nsInterruptProcessData\n"); int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf; if (int_data->tsr0 & TSR_VALID) vnt_int_report_rate(priv, int_data->pkt0, int_data->tsr0); if (int_data->tsr1 & TSR_VALID) vnt_int_report_rate(priv, int_data->pkt1, int_data->tsr1); if (int_data->tsr2 & TSR_VALID) vnt_int_report_rate(priv, int_data->pkt2, int_data->tsr2); if (int_data->tsr3 & TSR_VALID) vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3); if (int_data->isr0 != 0) { if (int_data->isr0 & ISR_BNTX && priv->op_mode == NL80211_IFTYPE_AP) vnt_schedule_command(priv, WLAN_CMD_BECON_SEND); if (int_data->isr0 & ISR_TBTT && priv->hw->conf.flags & IEEE80211_CONF_PS) { if (!priv->wake_up_count) priv->wake_up_count = priv->hw->conf.listen_interval; --priv->wake_up_count; /* Turn on wake up to listen next beacon */ if (priv->wake_up_count == 1) vnt_schedule_command(priv, WLAN_CMD_TBTT_WAKEUP); } priv->current_tsf = le64_to_cpu(int_data->tsf); low_stats->dot11RTSSuccessCount += int_data->rts_success; low_stats->dot11RTSFailureCount += int_data->rts_fail; low_stats->dot11ACKFailureCount += int_data->ack_fail; low_stats->dot11FCSErrorCount += int_data->fcs_err; } priv->int_buf.in_use = false; }
gpl-2.0
vfalico/popcorn
arch/sh/kernel/io_trapped.c
656
6568
/* * Trapped io support * * Copyright (C) 2008 Magnus Damm * * Intercept io operations by trapping. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/init.h> #include <asm/system.h> #include <asm/mmu_context.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/io_trapped.h> #define TRAPPED_PAGES_MAX 16 #ifdef CONFIG_HAS_IOPORT LIST_HEAD(trapped_io); EXPORT_SYMBOL_GPL(trapped_io); #endif #ifdef CONFIG_HAS_IOMEM LIST_HEAD(trapped_mem); EXPORT_SYMBOL_GPL(trapped_mem); #endif static DEFINE_SPINLOCK(trapped_lock); static int trapped_io_disable __read_mostly; static int __init trapped_io_setup(char *__unused) { trapped_io_disable = 1; return 1; } __setup("noiotrap", trapped_io_setup); int register_trapped_io(struct trapped_io *tiop) { struct resource *res; unsigned long len = 0, flags = 0; struct page *pages[TRAPPED_PAGES_MAX]; int k, n; if (unlikely(trapped_io_disable)) return 0; /* structure must be page aligned */ if ((unsigned long)tiop & (PAGE_SIZE - 1)) goto bad; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; len += roundup(resource_size(res), PAGE_SIZE); flags |= res->flags; } /* support IORESOURCE_IO _or_ MEM, not both */ if (hweight_long(flags) != 1) goto bad; n = len >> PAGE_SHIFT; if (n >= TRAPPED_PAGES_MAX) goto bad; for (k = 0; k < n; k++) pages[k] = virt_to_page(tiop); tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); if (!tiop->virt_base) goto bad; len = 0; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", (unsigned long)(tiop->virt_base + len), res->flags & IORESOURCE_IO ? "io" : "mmio", (unsigned long)res->start); len += roundup(resource_size(res), PAGE_SIZE); } tiop->magic = IO_TRAPPED_MAGIC; INIT_LIST_HEAD(&tiop->list); spin_lock_irq(&trapped_lock); #ifdef CONFIG_HAS_IOPORT if (flags & IORESOURCE_IO) list_add(&tiop->list, &trapped_io); #endif #ifdef CONFIG_HAS_IOMEM if (flags & IORESOURCE_MEM) list_add(&tiop->list, &trapped_mem); #endif spin_unlock_irq(&trapped_lock); return 0; bad: pr_warning("unable to install trapped io filter\n"); return -1; } EXPORT_SYMBOL_GPL(register_trapped_io); void __iomem *match_trapped_io_handler(struct list_head *list, unsigned long offset, unsigned long size) { unsigned long voffs; struct trapped_io *tiop; struct resource *res; int k, len; unsigned long flags; spin_lock_irqsave(&trapped_lock, flags); list_for_each_entry(tiop, list, list) { voffs = 0; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; if (res->start == offset) { spin_unlock_irqrestore(&trapped_lock, flags); return tiop->virt_base + voffs; } len = resource_size(res); voffs += roundup(len, PAGE_SIZE); } } spin_unlock_irqrestore(&trapped_lock, flags); return NULL; } EXPORT_SYMBOL_GPL(match_trapped_io_handler); static struct trapped_io *lookup_tiop(unsigned long address) { pgd_t *pgd_k; pud_t *pud_k; pmd_t *pmd_k; pte_t *pte_k; pte_t entry; pgd_k = swapper_pg_dir + pgd_index(address); if (!pgd_present(*pgd_k)) return NULL; pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) return NULL; pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) return NULL; pte_k = pte_offset_kernel(pmd_k, address); entry = *pte_k; return pfn_to_kaddr(pte_pfn(entry)); } static unsigned long lookup_address(struct trapped_io *tiop, unsigned long address) { struct resource *res; unsigned long vaddr = (unsigned long)tiop->virt_base; unsigned long len; int k; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; len = roundup(resource_size(res), PAGE_SIZE); if (address < (vaddr + len)) return res->start + (address - vaddr); vaddr += len; } return 0; } static unsigned long long copy_word(unsigned long src_addr, int src_len, unsigned long dst_addr, int dst_len) { unsigned long long tmp = 0; switch (src_len) { case 1: tmp = __raw_readb(src_addr); break; case 2: tmp = __raw_readw(src_addr); break; case 4: tmp = __raw_readl(src_addr); break; case 8: tmp = __raw_readq(src_addr); break; } switch (dst_len) { case 1: __raw_writeb(tmp, dst_addr); break; case 2: __raw_writew(tmp, dst_addr); break; case 4: __raw_writel(tmp, dst_addr); break; case 8: __raw_writeq(tmp, dst_addr); break; } return tmp; } static unsigned long from_device(void *dst, const void *src, unsigned long cnt) { struct trapped_io *tiop; unsigned long src_addr = (unsigned long)src; unsigned long long tmp; pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); tiop = lookup_tiop(src_addr); WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); src_addr = lookup_address(tiop, src_addr); if (!src_addr) return cnt; tmp = copy_word(src_addr, max_t(unsigned long, cnt, (tiop->minimum_bus_width / 8)), (unsigned long)dst, cnt); pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); return 0; } static unsigned long to_device(void *dst, const void *src, unsigned long cnt) { struct trapped_io *tiop; unsigned long dst_addr = (unsigned long)dst; unsigned long long tmp; pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); tiop = lookup_tiop(dst_addr); WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); dst_addr = lookup_address(tiop, dst_addr); if (!dst_addr) return cnt; tmp = copy_word((unsigned long)src, cnt, dst_addr, max_t(unsigned long, cnt, (tiop->minimum_bus_width / 8))); pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); return 0; } static struct mem_access trapped_io_access = { from_device, to_device, }; int handle_trapped_io(struct pt_regs *regs, unsigned long address) { mm_segment_t oldfs; insn_size_t instruction; int tmp; if (trapped_io_disable) return 0; if (!lookup_tiop(address)) return 0; WARN_ON(user_mode(regs)); oldfs = get_fs(); set_fs(KERNEL_DS); if (copy_from_user(&instruction, (void *)(regs->pc), sizeof(instruction))) { set_fs(oldfs); return 0; } tmp = handle_unaligned_access(instruction, regs, &trapped_io_access, 1, address); set_fs(oldfs); return tmp == 0; }
gpl-2.0
mfornero/linux
sound/pci/vx222/vx222_ops.c
1168
35778
/* * Driver for Digigram VX222 V2/Mic soundcards * * VX222-specific low-level routines * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include "vx222.h" static int vx2_reg_offset[VX_REG_MAX] = { [VX_ICR] = 0x00, [VX_CVR] = 0x04, [VX_ISR] = 0x08, [VX_IVR] = 0x0c, [VX_RXH] = 0x14, [VX_RXM] = 0x18, [VX_RXL] = 0x1c, [VX_DMA] = 0x10, [VX_CDSP] = 0x20, [VX_CFG] = 0x24, [VX_RUER] = 0x28, [VX_DATA] = 0x2c, [VX_STATUS] = 0x30, [VX_LOFREQ] = 0x34, [VX_HIFREQ] = 0x38, [VX_CSUER] = 0x3c, [VX_SELMIC] = 0x40, [VX_COMPOT] = 0x44, // Write: POTENTIOMETER ; Read: COMPRESSION LEVEL activate [VX_SCOMPR] = 0x48, // Read: COMPRESSION THRESHOLD activate [VX_GLIMIT] = 0x4c, // Read: LEVEL LIMITATION activate [VX_INTCSR] = 0x4c, // VX_INTCSR_REGISTER_OFFSET [VX_CNTRL] = 0x50, // VX_CNTRL_REGISTER_OFFSET [VX_GPIOC] = 0x54, // VX_GPIOC (new with PLX9030) }; static int vx2_reg_index[VX_REG_MAX] = { [VX_ICR] = 1, [VX_CVR] = 1, [VX_ISR] = 1, [VX_IVR] = 1, [VX_RXH] = 1, [VX_RXM] = 1, [VX_RXL] = 1, [VX_DMA] = 1, [VX_CDSP] = 1, [VX_CFG] = 1, [VX_RUER] = 1, [VX_DATA] = 1, [VX_STATUS] = 1, [VX_LOFREQ] = 1, [VX_HIFREQ] = 1, [VX_CSUER] = 1, [VX_SELMIC] = 1, [VX_COMPOT] = 1, [VX_SCOMPR] = 1, [VX_GLIMIT] = 1, [VX_INTCSR] = 0, /* on the PLX */ [VX_CNTRL] = 0, /* on the PLX */ [VX_GPIOC] = 0, /* on the PLX */ }; static inline unsigned long vx2_reg_addr(struct vx_core *_chip, int reg) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; return chip->port[vx2_reg_index[reg]] + vx2_reg_offset[reg]; } /** * snd_vx_inb - read a byte from the register * @chip: VX core instance * @offset: register enum */ static unsigned char vx2_inb(struct vx_core *chip, int offset) { return inb(vx2_reg_addr(chip, offset)); } /** * snd_vx_outb - write a byte on the register * @chip: VX core instance * @offset: the register offset * @val: the value to write */ static void vx2_outb(struct vx_core *chip, int offset, unsigned char val) { outb(val, vx2_reg_addr(chip, offset)); /* dev_dbg(chip->card->dev, "outb: %x -> %x\n", val, vx2_reg_addr(chip, offset)); */ } /** * snd_vx_inl - read a 32bit word from the register * @chip: VX core instance * @offset: register enum */ static unsigned int vx2_inl(struct vx_core *chip, int offset) { return inl(vx2_reg_addr(chip, offset)); } /** * snd_vx_outl - write a 32bit word on the register * @chip: VX core instance * @offset: the register enum * @val: the value to write */ static void vx2_outl(struct vx_core *chip, int offset, unsigned int val) { /* dev_dbg(chip->card->dev, "outl: %x -> %x\n", val, vx2_reg_addr(chip, offset)); */ outl(val, vx2_reg_addr(chip, offset)); } /* * redefine macros to call directly */ #undef vx_inb #define vx_inb(chip,reg) vx2_inb((struct vx_core*)(chip), VX_##reg) #undef vx_outb #define vx_outb(chip,reg,val) vx2_outb((struct vx_core*)(chip), VX_##reg, val) #undef vx_inl #define vx_inl(chip,reg) vx2_inl((struct vx_core*)(chip), VX_##reg) #undef vx_outl #define vx_outl(chip,reg,val) vx2_outl((struct vx_core*)(chip), VX_##reg, val) /* * vx_reset_dsp - reset the DSP */ #define XX_DSP_RESET_WAIT_TIME 2 /* ms */ static void vx2_reset_dsp(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* set the reset dsp bit to 0 */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_DSP_RESET_MASK); mdelay(XX_DSP_RESET_WAIT_TIME); chip->regCDSP |= VX_CDSP_DSP_RESET_MASK; /* set the reset dsp bit to 1 */ vx_outl(chip, CDSP, chip->regCDSP); } static int vx2_test_xilinx(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; unsigned int data; dev_dbg(_chip->card->dev, "testing xilinx...\n"); /* This test uses several write/read sequences on TEST0 and TEST1 bits * to figure out whever or not the xilinx was correctly loaded */ /* We write 1 on CDSP.TEST0. We should get 0 on STATUS.TEST0. */ vx_outl(chip, CDSP, chip->regCDSP | VX_CDSP_TEST0_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if ((data & VX_STATUS_VAL_TEST0_MASK) == VX_STATUS_VAL_TEST0_MASK) { dev_dbg(_chip->card->dev, "bad!\n"); return -ENODEV; } /* We write 0 on CDSP.TEST0. We should get 1 on STATUS.TEST0. */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_TEST0_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if (! (data & VX_STATUS_VAL_TEST0_MASK)) { dev_dbg(_chip->card->dev, "bad! #2\n"); return -ENODEV; } if (_chip->type == VX_TYPE_BOARD) { /* not implemented on VX_2_BOARDS */ /* We write 1 on CDSP.TEST1. We should get 0 on STATUS.TEST1. */ vx_outl(chip, CDSP, chip->regCDSP | VX_CDSP_TEST1_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if ((data & VX_STATUS_VAL_TEST1_MASK) == VX_STATUS_VAL_TEST1_MASK) { dev_dbg(_chip->card->dev, "bad! #3\n"); return -ENODEV; } /* We write 0 on CDSP.TEST1. We should get 1 on STATUS.TEST1. */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_TEST1_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if (! (data & VX_STATUS_VAL_TEST1_MASK)) { dev_dbg(_chip->card->dev, "bad! #4\n"); return -ENODEV; } } dev_dbg(_chip->card->dev, "ok, xilinx fine.\n"); return 0; } /** * vx_setup_pseudo_dma - set up the pseudo dma read/write mode. * @chip: VX core instance * @do_write: 0 = read, 1 = set up for DMA write */ static void vx2_setup_pseudo_dma(struct vx_core *chip, int do_write) { /* Interrupt mode and HREQ pin enabled for host transmit data transfers * (in case of the use of the pseudo-dma facility). */ vx_outl(chip, ICR, do_write ? ICR_TREQ : ICR_RREQ); /* Reset the pseudo-dma register (in case of the use of the * pseudo-dma facility). */ vx_outl(chip, RESET_DMA, 0); } /* * vx_release_pseudo_dma - disable the pseudo-DMA mode */ static inline void vx2_release_pseudo_dma(struct vx_core *chip) { /* HREQ pin disabled. */ vx_outl(chip, ICR, 0); } /* pseudo-dma write */ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { unsigned long port = vx2_reg_addr(chip, VX_DMA); int offset = pipe->hw_ptr; u32 *addr = (u32 *)(runtime->dma_area + offset); if (snd_BUG_ON(count % 4)) return; vx2_setup_pseudo_dma(chip, 1); /* Transfer using pseudo-dma. */ if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) { outl(cpu_to_le32(*addr), port); addr++; } addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) { outl(cpu_to_le32(*addr), port); addr++; } vx2_release_pseudo_dma(chip); } /* pseudo dma read */ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { int offset = pipe->hw_ptr; u32 *addr = (u32 *)(runtime->dma_area + offset); unsigned long port = vx2_reg_addr(chip, VX_DMA); if (snd_BUG_ON(count % 4)) return; vx2_setup_pseudo_dma(chip, 0); /* Transfer using pseudo-dma. */ if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) *addr++ = le32_to_cpu(inl(port)); addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) *addr++ = le32_to_cpu(inl(port)); vx2_release_pseudo_dma(chip); } #define VX_XILINX_RESET_MASK 0x40000000 #define VX_USERBIT0_MASK 0x00000004 #define VX_USERBIT1_MASK 0x00000020 #define VX_CNTRL_REGISTER_VALUE 0x00172012 /* * transfer counts bits to PLX */ static int put_xilinx_data(struct vx_core *chip, unsigned int port, unsigned int counts, unsigned char data) { unsigned int i; for (i = 0; i < counts; i++) { unsigned int val; /* set the clock bit to 0. */ val = VX_CNTRL_REGISTER_VALUE & ~VX_USERBIT0_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); udelay(1); if (data & (1 << i)) val |= VX_USERBIT1_MASK; else val &= ~VX_USERBIT1_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); /* set the clock bit to 1. */ val |= VX_USERBIT0_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); udelay(1); } return 0; } /* * load the xilinx image */ static int vx2_load_xilinx_binary(struct vx_core *chip, const struct firmware *xilinx) { unsigned int i; unsigned int port; const unsigned char *image; /* XILINX reset (wait at least 1 millisecond between reset on and off). */ vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE | VX_XILINX_RESET_MASK); vx_inl(chip, CNTRL); msleep(10); vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE); vx_inl(chip, CNTRL); msleep(10); if (chip->type == VX_TYPE_BOARD) port = VX_CNTRL; else port = VX_GPIOC; /* VX222 V2 and VX222_MIC_BOARD with new PLX9030 use this register */ image = xilinx->data; for (i = 0; i < xilinx->size; i++, image++) { if (put_xilinx_data(chip, port, 8, *image) < 0) return -EINVAL; /* don't take too much time in this loop... */ cond_resched(); } put_xilinx_data(chip, port, 4, 0xff); /* end signature */ msleep(200); /* test after loading (is buggy with VX222) */ if (chip->type != VX_TYPE_BOARD) { /* Test if load successful: test bit 8 of register GPIOC (VX222: use CNTRL) ! */ i = vx_inl(chip, GPIOC); if (i & 0x0100) return 0; dev_err(chip->card->dev, "xilinx test failed after load, GPIOC=0x%x\n", i); return -EINVAL; } return 0; } /* * load the boot/dsp images */ static int vx2_load_dsp(struct vx_core *vx, int index, const struct firmware *dsp) { int err; switch (index) { case 1: /* xilinx image */ if ((err = vx2_load_xilinx_binary(vx, dsp)) < 0) return err; if ((err = vx2_test_xilinx(vx)) < 0) return err; return 0; case 2: /* DSP boot */ return snd_vx_dsp_boot(vx, dsp); case 3: /* DSP image */ return snd_vx_dsp_load(vx, dsp); default: snd_BUG(); return -EINVAL; } } /* * vx_test_and_ack - test and acknowledge interrupt * * called from irq hander, too * * spinlock held! */ static int vx2_test_and_ack(struct vx_core *chip) { /* not booted yet? */ if (! (chip->chip_status & VX_STAT_XILINX_LOADED)) return -ENXIO; if (! (vx_inl(chip, STATUS) & VX_STATUS_MEMIRQ_MASK)) return -EIO; /* ok, interrupts generated, now ack it */ /* set ACQUIT bit up and down */ vx_outl(chip, STATUS, 0); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inl(chip, STATUS); /* ack */ vx_outl(chip, STATUS, VX_STATUS_MEMIRQ_MASK); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inl(chip, STATUS); /* clear */ vx_outl(chip, STATUS, 0); return 0; } /* * vx_validate_irq - enable/disable IRQ */ static void vx2_validate_irq(struct vx_core *_chip, int enable) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* Set the interrupt enable bit to 1 in CDSP register */ if (enable) { /* Set the PCI interrupt enable bit to 1.*/ vx_outl(chip, INTCSR, VX_INTCSR_VALUE|VX_PCI_INTERRUPT_MASK); chip->regCDSP |= VX_CDSP_VALID_IRQ_MASK; } else { /* Set the PCI interrupt enable bit to 0. */ vx_outl(chip, INTCSR, VX_INTCSR_VALUE&~VX_PCI_INTERRUPT_MASK); chip->regCDSP &= ~VX_CDSP_VALID_IRQ_MASK; } vx_outl(chip, CDSP, chip->regCDSP); } /* * write an AKM codec data (24bit) */ static void vx2_write_codec_reg(struct vx_core *chip, unsigned int data) { unsigned int i; vx_inl(chip, HIFREQ); /* We have to send 24 bits (3 x 8 bits). Start with most signif. Bit */ for (i = 0; i < 24; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inl(chip, RUER); } #define AKM_CODEC_POWER_CONTROL_CMD 0xA007 #define AKM_CODEC_RESET_ON_CMD 0xA100 #define AKM_CODEC_RESET_OFF_CMD 0xA103 #define AKM_CODEC_CLOCK_FORMAT_CMD 0xA240 #define AKM_CODEC_MUTE_CMD 0xA38D #define AKM_CODEC_UNMUTE_CMD 0xA30D #define AKM_CODEC_LEFT_LEVEL_CMD 0xA400 #define AKM_CODEC_RIGHT_LEVEL_CMD 0xA500 static const u8 vx2_akm_gains_lut[VX2_AKM_LEVEL_MAX+1] = { 0x7f, // [000] = +0.000 dB -> AKM(0x7f) = +0.000 dB error(+0.000 dB) 0x7d, // [001] = -0.500 dB -> AKM(0x7d) = -0.572 dB error(-0.072 dB) 0x7c, // [002] = -1.000 dB -> AKM(0x7c) = -0.873 dB error(+0.127 dB) 0x7a, // [003] = -1.500 dB -> AKM(0x7a) = -1.508 dB error(-0.008 dB) 0x79, // [004] = -2.000 dB -> AKM(0x79) = -1.844 dB error(+0.156 dB) 0x77, // [005] = -2.500 dB -> AKM(0x77) = -2.557 dB error(-0.057 dB) 0x76, // [006] = -3.000 dB -> AKM(0x76) = -2.937 dB error(+0.063 dB) 0x75, // [007] = -3.500 dB -> AKM(0x75) = -3.334 dB error(+0.166 dB) 0x73, // [008] = -4.000 dB -> AKM(0x73) = -4.188 dB error(-0.188 dB) 0x72, // [009] = -4.500 dB -> AKM(0x72) = -4.648 dB error(-0.148 dB) 0x71, // [010] = -5.000 dB -> AKM(0x71) = -5.134 dB error(-0.134 dB) 0x70, // [011] = -5.500 dB -> AKM(0x70) = -5.649 dB error(-0.149 dB) 0x6f, // [012] = -6.000 dB -> AKM(0x6f) = -6.056 dB error(-0.056 dB) 0x6d, // [013] = -6.500 dB -> AKM(0x6d) = -6.631 dB error(-0.131 dB) 0x6c, // [014] = -7.000 dB -> AKM(0x6c) = -6.933 dB error(+0.067 dB) 0x6a, // [015] = -7.500 dB -> AKM(0x6a) = -7.571 dB error(-0.071 dB) 0x69, // [016] = -8.000 dB -> AKM(0x69) = -7.909 dB error(+0.091 dB) 0x67, // [017] = -8.500 dB -> AKM(0x67) = -8.626 dB error(-0.126 dB) 0x66, // [018] = -9.000 dB -> AKM(0x66) = -9.008 dB error(-0.008 dB) 0x65, // [019] = -9.500 dB -> AKM(0x65) = -9.407 dB error(+0.093 dB) 0x64, // [020] = -10.000 dB -> AKM(0x64) = -9.826 dB error(+0.174 dB) 0x62, // [021] = -10.500 dB -> AKM(0x62) = -10.730 dB error(-0.230 dB) 0x61, // [022] = -11.000 dB -> AKM(0x61) = -11.219 dB error(-0.219 dB) 0x60, // [023] = -11.500 dB -> AKM(0x60) = -11.738 dB error(-0.238 dB) 0x5f, // [024] = -12.000 dB -> AKM(0x5f) = -12.149 dB error(-0.149 dB) 0x5e, // [025] = -12.500 dB -> AKM(0x5e) = -12.434 dB error(+0.066 dB) 0x5c, // [026] = -13.000 dB -> AKM(0x5c) = -13.033 dB error(-0.033 dB) 0x5b, // [027] = -13.500 dB -> AKM(0x5b) = -13.350 dB error(+0.150 dB) 0x59, // [028] = -14.000 dB -> AKM(0x59) = -14.018 dB error(-0.018 dB) 0x58, // [029] = -14.500 dB -> AKM(0x58) = -14.373 dB error(+0.127 dB) 0x56, // [030] = -15.000 dB -> AKM(0x56) = -15.130 dB error(-0.130 dB) 0x55, // [031] = -15.500 dB -> AKM(0x55) = -15.534 dB error(-0.034 dB) 0x54, // [032] = -16.000 dB -> AKM(0x54) = -15.958 dB error(+0.042 dB) 0x53, // [033] = -16.500 dB -> AKM(0x53) = -16.404 dB error(+0.096 dB) 0x52, // [034] = -17.000 dB -> AKM(0x52) = -16.874 dB error(+0.126 dB) 0x51, // [035] = -17.500 dB -> AKM(0x51) = -17.371 dB error(+0.129 dB) 0x50, // [036] = -18.000 dB -> AKM(0x50) = -17.898 dB error(+0.102 dB) 0x4e, // [037] = -18.500 dB -> AKM(0x4e) = -18.605 dB error(-0.105 dB) 0x4d, // [038] = -19.000 dB -> AKM(0x4d) = -18.905 dB error(+0.095 dB) 0x4b, // [039] = -19.500 dB -> AKM(0x4b) = -19.538 dB error(-0.038 dB) 0x4a, // [040] = -20.000 dB -> AKM(0x4a) = -19.872 dB error(+0.128 dB) 0x48, // [041] = -20.500 dB -> AKM(0x48) = -20.583 dB error(-0.083 dB) 0x47, // [042] = -21.000 dB -> AKM(0x47) = -20.961 dB error(+0.039 dB) 0x46, // [043] = -21.500 dB -> AKM(0x46) = -21.356 dB error(+0.144 dB) 0x44, // [044] = -22.000 dB -> AKM(0x44) = -22.206 dB error(-0.206 dB) 0x43, // [045] = -22.500 dB -> AKM(0x43) = -22.664 dB error(-0.164 dB) 0x42, // [046] = -23.000 dB -> AKM(0x42) = -23.147 dB error(-0.147 dB) 0x41, // [047] = -23.500 dB -> AKM(0x41) = -23.659 dB error(-0.159 dB) 0x40, // [048] = -24.000 dB -> AKM(0x40) = -24.203 dB error(-0.203 dB) 0x3f, // [049] = -24.500 dB -> AKM(0x3f) = -24.635 dB error(-0.135 dB) 0x3e, // [050] = -25.000 dB -> AKM(0x3e) = -24.935 dB error(+0.065 dB) 0x3c, // [051] = -25.500 dB -> AKM(0x3c) = -25.569 dB error(-0.069 dB) 0x3b, // [052] = -26.000 dB -> AKM(0x3b) = -25.904 dB error(+0.096 dB) 0x39, // [053] = -26.500 dB -> AKM(0x39) = -26.615 dB error(-0.115 dB) 0x38, // [054] = -27.000 dB -> AKM(0x38) = -26.994 dB error(+0.006 dB) 0x37, // [055] = -27.500 dB -> AKM(0x37) = -27.390 dB error(+0.110 dB) 0x36, // [056] = -28.000 dB -> AKM(0x36) = -27.804 dB error(+0.196 dB) 0x34, // [057] = -28.500 dB -> AKM(0x34) = -28.699 dB error(-0.199 dB) 0x33, // [058] = -29.000 dB -> AKM(0x33) = -29.183 dB error(-0.183 dB) 0x32, // [059] = -29.500 dB -> AKM(0x32) = -29.696 dB error(-0.196 dB) 0x31, // [060] = -30.000 dB -> AKM(0x31) = -30.241 dB error(-0.241 dB) 0x31, // [061] = -30.500 dB -> AKM(0x31) = -30.241 dB error(+0.259 dB) 0x30, // [062] = -31.000 dB -> AKM(0x30) = -30.823 dB error(+0.177 dB) 0x2e, // [063] = -31.500 dB -> AKM(0x2e) = -31.610 dB error(-0.110 dB) 0x2d, // [064] = -32.000 dB -> AKM(0x2d) = -31.945 dB error(+0.055 dB) 0x2b, // [065] = -32.500 dB -> AKM(0x2b) = -32.659 dB error(-0.159 dB) 0x2a, // [066] = -33.000 dB -> AKM(0x2a) = -33.038 dB error(-0.038 dB) 0x29, // [067] = -33.500 dB -> AKM(0x29) = -33.435 dB error(+0.065 dB) 0x28, // [068] = -34.000 dB -> AKM(0x28) = -33.852 dB error(+0.148 dB) 0x27, // [069] = -34.500 dB -> AKM(0x27) = -34.289 dB error(+0.211 dB) 0x25, // [070] = -35.000 dB -> AKM(0x25) = -35.235 dB error(-0.235 dB) 0x24, // [071] = -35.500 dB -> AKM(0x24) = -35.750 dB error(-0.250 dB) 0x24, // [072] = -36.000 dB -> AKM(0x24) = -35.750 dB error(+0.250 dB) 0x23, // [073] = -36.500 dB -> AKM(0x23) = -36.297 dB error(+0.203 dB) 0x22, // [074] = -37.000 dB -> AKM(0x22) = -36.881 dB error(+0.119 dB) 0x21, // [075] = -37.500 dB -> AKM(0x21) = -37.508 dB error(-0.008 dB) 0x20, // [076] = -38.000 dB -> AKM(0x20) = -38.183 dB error(-0.183 dB) 0x1f, // [077] = -38.500 dB -> AKM(0x1f) = -38.726 dB error(-0.226 dB) 0x1e, // [078] = -39.000 dB -> AKM(0x1e) = -39.108 dB error(-0.108 dB) 0x1d, // [079] = -39.500 dB -> AKM(0x1d) = -39.507 dB error(-0.007 dB) 0x1c, // [080] = -40.000 dB -> AKM(0x1c) = -39.926 dB error(+0.074 dB) 0x1b, // [081] = -40.500 dB -> AKM(0x1b) = -40.366 dB error(+0.134 dB) 0x1a, // [082] = -41.000 dB -> AKM(0x1a) = -40.829 dB error(+0.171 dB) 0x19, // [083] = -41.500 dB -> AKM(0x19) = -41.318 dB error(+0.182 dB) 0x18, // [084] = -42.000 dB -> AKM(0x18) = -41.837 dB error(+0.163 dB) 0x17, // [085] = -42.500 dB -> AKM(0x17) = -42.389 dB error(+0.111 dB) 0x16, // [086] = -43.000 dB -> AKM(0x16) = -42.978 dB error(+0.022 dB) 0x15, // [087] = -43.500 dB -> AKM(0x15) = -43.610 dB error(-0.110 dB) 0x14, // [088] = -44.000 dB -> AKM(0x14) = -44.291 dB error(-0.291 dB) 0x14, // [089] = -44.500 dB -> AKM(0x14) = -44.291 dB error(+0.209 dB) 0x13, // [090] = -45.000 dB -> AKM(0x13) = -45.031 dB error(-0.031 dB) 0x12, // [091] = -45.500 dB -> AKM(0x12) = -45.840 dB error(-0.340 dB) 0x12, // [092] = -46.000 dB -> AKM(0x12) = -45.840 dB error(+0.160 dB) 0x11, // [093] = -46.500 dB -> AKM(0x11) = -46.731 dB error(-0.231 dB) 0x11, // [094] = -47.000 dB -> AKM(0x11) = -46.731 dB error(+0.269 dB) 0x10, // [095] = -47.500 dB -> AKM(0x10) = -47.725 dB error(-0.225 dB) 0x10, // [096] = -48.000 dB -> AKM(0x10) = -47.725 dB error(+0.275 dB) 0x0f, // [097] = -48.500 dB -> AKM(0x0f) = -48.553 dB error(-0.053 dB) 0x0e, // [098] = -49.000 dB -> AKM(0x0e) = -49.152 dB error(-0.152 dB) 0x0d, // [099] = -49.500 dB -> AKM(0x0d) = -49.796 dB error(-0.296 dB) 0x0d, // [100] = -50.000 dB -> AKM(0x0d) = -49.796 dB error(+0.204 dB) 0x0c, // [101] = -50.500 dB -> AKM(0x0c) = -50.491 dB error(+0.009 dB) 0x0b, // [102] = -51.000 dB -> AKM(0x0b) = -51.247 dB error(-0.247 dB) 0x0b, // [103] = -51.500 dB -> AKM(0x0b) = -51.247 dB error(+0.253 dB) 0x0a, // [104] = -52.000 dB -> AKM(0x0a) = -52.075 dB error(-0.075 dB) 0x0a, // [105] = -52.500 dB -> AKM(0x0a) = -52.075 dB error(+0.425 dB) 0x09, // [106] = -53.000 dB -> AKM(0x09) = -52.990 dB error(+0.010 dB) 0x09, // [107] = -53.500 dB -> AKM(0x09) = -52.990 dB error(+0.510 dB) 0x08, // [108] = -54.000 dB -> AKM(0x08) = -54.013 dB error(-0.013 dB) 0x08, // [109] = -54.500 dB -> AKM(0x08) = -54.013 dB error(+0.487 dB) 0x07, // [110] = -55.000 dB -> AKM(0x07) = -55.173 dB error(-0.173 dB) 0x07, // [111] = -55.500 dB -> AKM(0x07) = -55.173 dB error(+0.327 dB) 0x06, // [112] = -56.000 dB -> AKM(0x06) = -56.512 dB error(-0.512 dB) 0x06, // [113] = -56.500 dB -> AKM(0x06) = -56.512 dB error(-0.012 dB) 0x06, // [114] = -57.000 dB -> AKM(0x06) = -56.512 dB error(+0.488 dB) 0x05, // [115] = -57.500 dB -> AKM(0x05) = -58.095 dB error(-0.595 dB) 0x05, // [116] = -58.000 dB -> AKM(0x05) = -58.095 dB error(-0.095 dB) 0x05, // [117] = -58.500 dB -> AKM(0x05) = -58.095 dB error(+0.405 dB) 0x05, // [118] = -59.000 dB -> AKM(0x05) = -58.095 dB error(+0.905 dB) 0x04, // [119] = -59.500 dB -> AKM(0x04) = -60.034 dB error(-0.534 dB) 0x04, // [120] = -60.000 dB -> AKM(0x04) = -60.034 dB error(-0.034 dB) 0x04, // [121] = -60.500 dB -> AKM(0x04) = -60.034 dB error(+0.466 dB) 0x04, // [122] = -61.000 dB -> AKM(0x04) = -60.034 dB error(+0.966 dB) 0x03, // [123] = -61.500 dB -> AKM(0x03) = -62.532 dB error(-1.032 dB) 0x03, // [124] = -62.000 dB -> AKM(0x03) = -62.532 dB error(-0.532 dB) 0x03, // [125] = -62.500 dB -> AKM(0x03) = -62.532 dB error(-0.032 dB) 0x03, // [126] = -63.000 dB -> AKM(0x03) = -62.532 dB error(+0.468 dB) 0x03, // [127] = -63.500 dB -> AKM(0x03) = -62.532 dB error(+0.968 dB) 0x03, // [128] = -64.000 dB -> AKM(0x03) = -62.532 dB error(+1.468 dB) 0x02, // [129] = -64.500 dB -> AKM(0x02) = -66.054 dB error(-1.554 dB) 0x02, // [130] = -65.000 dB -> AKM(0x02) = -66.054 dB error(-1.054 dB) 0x02, // [131] = -65.500 dB -> AKM(0x02) = -66.054 dB error(-0.554 dB) 0x02, // [132] = -66.000 dB -> AKM(0x02) = -66.054 dB error(-0.054 dB) 0x02, // [133] = -66.500 dB -> AKM(0x02) = -66.054 dB error(+0.446 dB) 0x02, // [134] = -67.000 dB -> AKM(0x02) = -66.054 dB error(+0.946 dB) 0x02, // [135] = -67.500 dB -> AKM(0x02) = -66.054 dB error(+1.446 dB) 0x02, // [136] = -68.000 dB -> AKM(0x02) = -66.054 dB error(+1.946 dB) 0x02, // [137] = -68.500 dB -> AKM(0x02) = -66.054 dB error(+2.446 dB) 0x02, // [138] = -69.000 dB -> AKM(0x02) = -66.054 dB error(+2.946 dB) 0x01, // [139] = -69.500 dB -> AKM(0x01) = -72.075 dB error(-2.575 dB) 0x01, // [140] = -70.000 dB -> AKM(0x01) = -72.075 dB error(-2.075 dB) 0x01, // [141] = -70.500 dB -> AKM(0x01) = -72.075 dB error(-1.575 dB) 0x01, // [142] = -71.000 dB -> AKM(0x01) = -72.075 dB error(-1.075 dB) 0x01, // [143] = -71.500 dB -> AKM(0x01) = -72.075 dB error(-0.575 dB) 0x01, // [144] = -72.000 dB -> AKM(0x01) = -72.075 dB error(-0.075 dB) 0x01, // [145] = -72.500 dB -> AKM(0x01) = -72.075 dB error(+0.425 dB) 0x01, // [146] = -73.000 dB -> AKM(0x01) = -72.075 dB error(+0.925 dB) 0x00}; // [147] = -73.500 dB -> AKM(0x00) = mute error(+infini) /* * pseudo-codec write entry */ static void vx2_write_akm(struct vx_core *chip, int reg, unsigned int data) { unsigned int val; if (reg == XX_CODEC_DAC_CONTROL_REGISTER) { vx2_write_codec_reg(chip, data ? AKM_CODEC_MUTE_CMD : AKM_CODEC_UNMUTE_CMD); return; } /* `data' is a value between 0x0 and VX2_AKM_LEVEL_MAX = 0x093, in the case of the AKM codecs, we need a look up table, as there is no linear matching between the driver codec values and the real dBu value */ if (snd_BUG_ON(data >= sizeof(vx2_akm_gains_lut))) return; switch (reg) { case XX_CODEC_LEVEL_LEFT_REGISTER: val = AKM_CODEC_LEFT_LEVEL_CMD; break; case XX_CODEC_LEVEL_RIGHT_REGISTER: val = AKM_CODEC_RIGHT_LEVEL_CMD; break; default: snd_BUG(); return; } val |= vx2_akm_gains_lut[data]; vx2_write_codec_reg(chip, val); } /* * write codec bit for old VX222 board */ static void vx2_old_write_codec_bit(struct vx_core *chip, int codec, unsigned int data) { int i; /* activate access to codec registers */ vx_inl(chip, HIFREQ); for (i = 0; i < 24; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inl(chip, RUER); } /* * reset codec bit */ static void vx2_reset_codec(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* Set the reset CODEC bit to 0. */ vx_outl(chip, CDSP, chip->regCDSP &~ VX_CDSP_CODEC_RESET_MASK); vx_inl(chip, CDSP); msleep(10); /* Set the reset CODEC bit to 1. */ chip->regCDSP |= VX_CDSP_CODEC_RESET_MASK; vx_outl(chip, CDSP, chip->regCDSP); vx_inl(chip, CDSP); if (_chip->type == VX_TYPE_BOARD) { msleep(1); return; } msleep(5); /* additionnel wait time for AKM's */ vx2_write_codec_reg(_chip, AKM_CODEC_POWER_CONTROL_CMD); /* DAC power up, ADC power up, Vref power down */ vx2_write_codec_reg(_chip, AKM_CODEC_CLOCK_FORMAT_CMD); /* default */ vx2_write_codec_reg(_chip, AKM_CODEC_MUTE_CMD); /* Mute = ON ,Deemphasis = OFF */ vx2_write_codec_reg(_chip, AKM_CODEC_RESET_OFF_CMD); /* DAC and ADC normal operation */ if (_chip->type == VX_TYPE_MIC) { /* set up the micro input selector */ chip->regSELMIC = MICRO_SELECT_INPUT_NORM | MICRO_SELECT_PREAMPLI_G_0 | MICRO_SELECT_NOISE_T_52DB; /* reset phantom power supply */ chip->regSELMIC &= ~MICRO_SELECT_PHANTOM_ALIM; vx_outl(_chip, SELMIC, chip->regSELMIC); } } /* * change the audio source */ static void vx2_change_audio_source(struct vx_core *_chip, int src) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; switch (src) { case VX_AUDIO_SRC_DIGITAL: chip->regCFG |= VX_CFG_DATAIN_SEL_MASK; break; default: chip->regCFG &= ~VX_CFG_DATAIN_SEL_MASK; break; } vx_outl(chip, CFG, chip->regCFG); } /* * set the clock source */ static void vx2_set_clock_source(struct vx_core *_chip, int source) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (source == INTERNAL_QUARTZ) chip->regCFG &= ~VX_CFG_CLOCKIN_SEL_MASK; else chip->regCFG |= VX_CFG_CLOCKIN_SEL_MASK; vx_outl(chip, CFG, chip->regCFG); } /* * reset the board */ static void vx2_reset_board(struct vx_core *_chip, int cold_reset) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* initialize the register values */ chip->regCDSP = VX_CDSP_CODEC_RESET_MASK | VX_CDSP_DSP_RESET_MASK ; chip->regCFG = 0; } /* * input level controls for VX222 Mic */ /* Micro level is specified to be adjustable from -96dB to 63 dB (board coded 0x00 ... 318), * 318 = 210 + 36 + 36 + 36 (210 = +9dB variable) (3 * 36 = 3 steps of 18dB pre ampli) * as we will mute if less than -110dB, so let's simply use line input coded levels and add constant offset ! */ #define V2_MICRO_LEVEL_RANGE (318 - 255) static void vx2_set_input_level(struct snd_vx222 *chip) { int i, miclevel, preamp; unsigned int data; miclevel = chip->mic_level; miclevel += V2_MICRO_LEVEL_RANGE; /* add 318 - 0xff */ preamp = 0; while (miclevel > 210) { /* limitation to +9dB of 3310 real gain */ preamp++; /* raise pre ampli + 18dB */ miclevel -= (18 * 2); /* lower level 18 dB (*2 because of 0.5 dB steps !) */ } if (snd_BUG_ON(preamp >= 4)) return; /* set pre-amp level */ chip->regSELMIC &= ~MICRO_SELECT_PREAMPLI_MASK; chip->regSELMIC |= (preamp << MICRO_SELECT_PREAMPLI_OFFSET) & MICRO_SELECT_PREAMPLI_MASK; vx_outl(chip, SELMIC, chip->regSELMIC); data = (unsigned int)miclevel << 16 | (unsigned int)chip->input_level[1] << 8 | (unsigned int)chip->input_level[0]; vx_inl(chip, DATA); /* Activate input level programming */ /* We have to send 32 bits (4 x 8 bits) */ for (i = 0; i < 32; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x80000000) ? VX_DATA_CODEC_MASK : 0)); vx_inl(chip, RUER); /* Terminate input level programming */ } #define MIC_LEVEL_MAX 0xff static const DECLARE_TLV_DB_SCALE(db_scale_mic, -6450, 50, 0); /* * controls API for input levels */ /* input levels */ static int vx_input_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_input_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; mutex_lock(&_chip->mixer_mutex); ucontrol->value.integer.value[0] = chip->input_level[0]; ucontrol->value.integer.value[1] = chip->input_level[1]; mutex_unlock(&_chip->mixer_mutex); return 0; } static int vx_input_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > MIC_LEVEL_MAX) return -EINVAL; if (ucontrol->value.integer.value[1] < 0 || ucontrol->value.integer.value[1] > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->input_level[0] != ucontrol->value.integer.value[0] || chip->input_level[1] != ucontrol->value.integer.value[1]) { chip->input_level[0] = ucontrol->value.integer.value[0]; chip->input_level[1] = ucontrol->value.integer.value[1]; vx2_set_input_level(chip); mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } /* mic level */ static int vx_mic_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; ucontrol->value.integer.value[0] = chip->mic_level; return 0; } static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->mic_level != ucontrol->value.integer.value[0]) { chip->mic_level = ucontrol->value.integer.value[0]; vx2_set_input_level(chip); mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } static struct snd_kcontrol_new vx_control_input_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Capture Volume", .info = vx_input_level_info, .get = vx_input_level_get, .put = vx_input_level_put, .tlv = { .p = db_scale_mic }, }; static struct snd_kcontrol_new vx_control_mic_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Mic Capture Volume", .info = vx_mic_level_info, .get = vx_mic_level_get, .put = vx_mic_level_put, .tlv = { .p = db_scale_mic }, }; /* * FIXME: compressor/limiter implementation is missing yet... */ static int vx2_add_mic_controls(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; int err; if (_chip->type != VX_TYPE_MIC) return 0; /* mute input levels */ chip->input_level[0] = chip->input_level[1] = 0; chip->mic_level = 0; vx2_set_input_level(chip); /* controls */ if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_input_level, chip))) < 0) return err; if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip))) < 0) return err; return 0; } /* * callbacks */ struct snd_vx_ops vx222_ops = { .in8 = vx2_inb, .in32 = vx2_inl, .out8 = vx2_outb, .out32 = vx2_outl, .test_and_ack = vx2_test_and_ack, .validate_irq = vx2_validate_irq, .akm_write = vx2_write_akm, .reset_codec = vx2_reset_codec, .change_audio_source = vx2_change_audio_source, .set_clock_source = vx2_set_clock_source, .load_dsp = vx2_load_dsp, .reset_dsp = vx2_reset_dsp, .reset_board = vx2_reset_board, .dma_write = vx2_dma_write, .dma_read = vx2_dma_read, .add_controls = vx2_add_mic_controls, }; /* for old VX222 board */ struct snd_vx_ops vx222_old_ops = { .in8 = vx2_inb, .in32 = vx2_inl, .out8 = vx2_outb, .out32 = vx2_outl, .test_and_ack = vx2_test_and_ack, .validate_irq = vx2_validate_irq, .write_codec = vx2_old_write_codec_bit, .reset_codec = vx2_reset_codec, .change_audio_source = vx2_change_audio_source, .set_clock_source = vx2_set_clock_source, .load_dsp = vx2_load_dsp, .reset_dsp = vx2_reset_dsp, .reset_board = vx2_reset_board, .dma_write = vx2_dma_write, .dma_read = vx2_dma_read, };
gpl-2.0
alinuredini/nova
drivers/block/xsysace.c
1168
33533
/* * Xilinx SystemACE device driver * * Copyright 2007 Secret Lab Technologies Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ /* * The SystemACE chip is designed to configure FPGAs by loading an FPGA * bitstream from a file on a CF card and squirting it into FPGAs connected * to the SystemACE JTAG chain. It also has the advantage of providing an * MPU interface which can be used to control the FPGA configuration process * and to use the attached CF card for general purpose storage. * * This driver is a block device driver for the SystemACE. * * Initialization: * The driver registers itself as a platform_device driver at module * load time. The platform bus will take care of calling the * ace_probe() method for all SystemACE instances in the system. Any * number of SystemACE instances are supported. ace_probe() calls * ace_setup() which initialized all data structures, reads the CF * id structure and registers the device. * * Processing: * Just about all of the heavy lifting in this driver is performed by * a Finite State Machine (FSM). The driver needs to wait on a number * of events; some raised by interrupts, some which need to be polled * for. Describing all of the behaviour in a FSM seems to be the * easiest way to keep the complexity low and make it easy to * understand what the driver is doing. If the block ops or the * request function need to interact with the hardware, then they * simply need to flag the request and kick of FSM processing. * * The FSM itself is atomic-safe code which can be run from any * context. The general process flow is: * 1. obtain the ace->lock spinlock. * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is * cleared. * 3. release the lock. * * Individual states do not sleep in any way. If a condition needs to * be waited for then the state much clear the fsm_continue flag and * either schedule the FSM to be run again at a later time, or expect * an interrupt to call the FSM when the desired condition is met. * * In normal operation, the FSM is processed at interrupt context * either when the driver's tasklet is scheduled, or when an irq is * raised by the hardware. The tasklet can be scheduled at any time. * The request method in particular schedules the tasklet when a new * request has been indicated by the block layer. Once started, the * FSM proceeds as far as it can processing the request until it * needs on a hardware event. At this point, it must yield execution. * * A state has two options when yielding execution: * 1. ace_fsm_yield() * - Call if need to poll for event. * - clears the fsm_continue flag to exit the processing loop * - reschedules the tasklet to run again as soon as possible * 2. ace_fsm_yieldirq() * - Call if an irq is expected from the HW * - clears the fsm_continue flag to exit the processing loop * - does not reschedule the tasklet so the FSM will not be processed * again until an irq is received. * After calling a yield function, the state must return control back * to the FSM main loop. * * Additionally, the driver maintains a kernel timer which can process * the FSM. If the FSM gets stalled, typically due to a missed * interrupt, then the kernel timer will expire and the driver can * continue where it left off. * * To Do: * - Add FPGA configuration control interface. * - Request major number from lanana */ #undef DEBUG #include <linux/module.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/platform_device.h> #if defined(CONFIG_OF) #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Xilinx SystemACE device driver"); MODULE_LICENSE("GPL"); /* SystemACE register definitions */ #define ACE_BUSMODE (0x00) #define ACE_STATUS (0x04) #define ACE_STATUS_CFGLOCK (0x00000001) #define ACE_STATUS_MPULOCK (0x00000002) #define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */ #define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */ #define ACE_STATUS_CFDETECT (0x00000010) #define ACE_STATUS_DATABUFRDY (0x00000020) #define ACE_STATUS_DATABUFMODE (0x00000040) #define ACE_STATUS_CFGDONE (0x00000080) #define ACE_STATUS_RDYFORCFCMD (0x00000100) #define ACE_STATUS_CFGMODEPIN (0x00000200) #define ACE_STATUS_CFGADDR_MASK (0x0000e000) #define ACE_STATUS_CFBSY (0x00020000) #define ACE_STATUS_CFRDY (0x00040000) #define ACE_STATUS_CFDWF (0x00080000) #define ACE_STATUS_CFDSC (0x00100000) #define ACE_STATUS_CFDRQ (0x00200000) #define ACE_STATUS_CFCORR (0x00400000) #define ACE_STATUS_CFERR (0x00800000) #define ACE_ERROR (0x08) #define ACE_CFGLBA (0x0c) #define ACE_MPULBA (0x10) #define ACE_SECCNTCMD (0x14) #define ACE_SECCNTCMD_RESET (0x0100) #define ACE_SECCNTCMD_IDENTIFY (0x0200) #define ACE_SECCNTCMD_READ_DATA (0x0300) #define ACE_SECCNTCMD_WRITE_DATA (0x0400) #define ACE_SECCNTCMD_ABORT (0x0600) #define ACE_VERSION (0x16) #define ACE_VERSION_REVISION_MASK (0x00FF) #define ACE_VERSION_MINOR_MASK (0x0F00) #define ACE_VERSION_MAJOR_MASK (0xF000) #define ACE_CTRL (0x18) #define ACE_CTRL_FORCELOCKREQ (0x0001) #define ACE_CTRL_LOCKREQ (0x0002) #define ACE_CTRL_FORCECFGADDR (0x0004) #define ACE_CTRL_FORCECFGMODE (0x0008) #define ACE_CTRL_CFGMODE (0x0010) #define ACE_CTRL_CFGSTART (0x0020) #define ACE_CTRL_CFGSEL (0x0040) #define ACE_CTRL_CFGRESET (0x0080) #define ACE_CTRL_DATABUFRDYIRQ (0x0100) #define ACE_CTRL_ERRORIRQ (0x0200) #define ACE_CTRL_CFGDONEIRQ (0x0400) #define ACE_CTRL_RESETIRQ (0x0800) #define ACE_CTRL_CFGPROG (0x1000) #define ACE_CTRL_CFGADDR_MASK (0xe000) #define ACE_FATSTAT (0x1c) #define ACE_NUM_MINORS 16 #define ACE_SECTOR_SIZE (512) #define ACE_FIFO_SIZE (32) #define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) #define ACE_BUS_WIDTH_8 0 #define ACE_BUS_WIDTH_16 1 struct ace_reg_ops; struct ace_device { /* driver state data */ int id; int media_change; int users; struct list_head list; /* finite state machine data */ struct tasklet_struct fsm_tasklet; uint fsm_task; /* Current activity (ACE_TASK_*) */ uint fsm_state; /* Current state (ACE_FSM_STATE_*) */ uint fsm_continue_flag; /* cleared to exit FSM mainloop */ uint fsm_iter_num; struct timer_list stall_timer; /* Transfer state/result, use for both id and block request */ struct request *req; /* request being processed */ void *data_ptr; /* pointer to I/O buffer */ int data_count; /* number of buffers remaining */ int data_result; /* Result of transfer; 0 := success */ int id_req_count; /* count of id requests */ int id_result; struct completion id_completion; /* used when id req finishes */ int in_irq; /* Details of hardware device */ resource_size_t physaddr; void __iomem *baseaddr; int irq; int bus_width; /* 0 := 8 bit; 1 := 16 bit */ struct ace_reg_ops *reg_ops; int lock_count; /* Block device data structures */ spinlock_t lock; struct device *dev; struct request_queue *queue; struct gendisk *gd; /* Inserted CF card parameters */ u16 cf_id[ATA_ID_WORDS]; }; static DEFINE_MUTEX(xsysace_mutex); static int ace_major; /* --------------------------------------------------------------------- * Low level register access */ struct ace_reg_ops { u16(*in) (struct ace_device * ace, int reg); void (*out) (struct ace_device * ace, int reg, u16 val); void (*datain) (struct ace_device * ace); void (*dataout) (struct ace_device * ace); }; /* 8 Bit bus width */ static u16 ace_in_8(struct ace_device *ace, int reg) { void __iomem *r = ace->baseaddr + reg; return in_8(r) | (in_8(r + 1) << 8); } static void ace_out_8(struct ace_device *ace, int reg, u16 val) { void __iomem *r = ace->baseaddr + reg; out_8(r, val); out_8(r + 1, val >> 8); } static void ace_datain_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *dst = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) *dst++ = in_8(r++); ace->data_ptr = dst; } static void ace_dataout_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *src = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) out_8(r++, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_8_ops = { .in = ace_in_8, .out = ace_out_8, .datain = ace_datain_8, .dataout = ace_dataout_8, }; /* 16 bit big endian bus attachment */ static u16 ace_in_be16(struct ace_device *ace, int reg) { return in_be16(ace->baseaddr + reg); } static void ace_out_be16(struct ace_device *ace, int reg, u16 val) { out_be16(ace->baseaddr + reg, val); } static void ace_datain_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_le16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_le16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } /* 16 bit little endian bus attachment */ static u16 ace_in_le16(struct ace_device *ace, int reg) { return in_le16(ace->baseaddr + reg); } static void ace_out_le16(struct ace_device *ace, int reg, u16 val) { out_le16(ace->baseaddr + reg, val); } static void ace_datain_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_be16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_be16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_be16_ops = { .in = ace_in_be16, .out = ace_out_be16, .datain = ace_datain_be16, .dataout = ace_dataout_be16, }; static struct ace_reg_ops ace_reg_le16_ops = { .in = ace_in_le16, .out = ace_out_le16, .datain = ace_datain_le16, .dataout = ace_dataout_le16, }; static inline u16 ace_in(struct ace_device *ace, int reg) { return ace->reg_ops->in(ace, reg); } static inline u32 ace_in32(struct ace_device *ace, int reg) { return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16); } static inline void ace_out(struct ace_device *ace, int reg, u16 val) { ace->reg_ops->out(ace, reg, val); } static inline void ace_out32(struct ace_device *ace, int reg, u32 val) { ace_out(ace, reg, val); ace_out(ace, reg + 2, val >> 16); } /* --------------------------------------------------------------------- * Debug support functions */ #if defined(DEBUG) static void ace_dump_mem(void *base, int len) { const char *ptr = base; int i, j; for (i = 0; i < len; i += 16) { printk(KERN_INFO "%.8x:", i); for (j = 0; j < 16; j++) { if (!(j % 4)) printk(" "); printk("%.2x", ptr[i + j]); } printk(" "); for (j = 0; j < 16; j++) printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.'); printk("\n"); } } #else static inline void ace_dump_mem(void *base, int len) { } #endif static void ace_dump_regs(struct ace_device *ace) { dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" " status:%.8x mpu_lba:%.8x busmode:%4x\n" " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD), ace_in(ace, ACE_VERSION), ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_MPULBA), ace_in(ace, ACE_BUSMODE), ace_in32(ace, ACE_ERROR), ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); } void ace_fix_driveid(u16 *id) { #if defined(__BIG_ENDIAN) int i; /* All half words have wrong byte order; swap the bytes */ for (i = 0; i < ATA_ID_WORDS; i++, id++) *id = le16_to_cpu(*id); #endif } /* --------------------------------------------------------------------- * Finite State Machine (FSM) implementation */ /* FSM tasks; used to direct state transitions */ #define ACE_TASK_IDLE 0 #define ACE_TASK_IDENTIFY 1 #define ACE_TASK_READ 2 #define ACE_TASK_WRITE 3 #define ACE_FSM_NUM_TASKS 4 /* FSM state definitions */ #define ACE_FSM_STATE_IDLE 0 #define ACE_FSM_STATE_REQ_LOCK 1 #define ACE_FSM_STATE_WAIT_LOCK 2 #define ACE_FSM_STATE_WAIT_CFREADY 3 #define ACE_FSM_STATE_IDENTIFY_PREPARE 4 #define ACE_FSM_STATE_IDENTIFY_TRANSFER 5 #define ACE_FSM_STATE_IDENTIFY_COMPLETE 6 #define ACE_FSM_STATE_REQ_PREPARE 7 #define ACE_FSM_STATE_REQ_TRANSFER 8 #define ACE_FSM_STATE_REQ_COMPLETE 9 #define ACE_FSM_STATE_ERROR 10 #define ACE_FSM_NUM_STATES 11 /* Set flag to exit FSM loop and reschedule tasklet */ static inline void ace_fsm_yield(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yield()\n"); tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ static inline void ace_fsm_yieldirq(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); if (ace->irq == NO_IRQ) /* No IRQ assigned, so need to poll */ tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Get the next read/write request; ending requests that we don't handle */ struct request *ace_get_next_request(struct request_queue * q) { struct request *req; while ((req = blk_peek_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) break; blk_start_request(req); __blk_end_request_all(req, -EIO); } return req; } static void ace_fsm_dostate(struct ace_device *ace) { struct request *req; u32 status; u16 val; int count; #if defined(DEBUG) dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n", ace->fsm_state, ace->id_req_count); #endif /* Verify that there is actually a CF in the slot. If not, then * bail out back to the idle state and wake up all the waiters */ status = ace_in32(ace, ACE_STATUS); if ((status & ACE_STATUS_CFDETECT) == 0) { ace->fsm_state = ACE_FSM_STATE_IDLE; ace->media_change = 1; set_capacity(ace->gd, 0); dev_info(ace->dev, "No CF in slot\n"); /* Drop all in-flight and pending requests */ if (ace->req) { __blk_end_request_all(ace->req, -EIO); ace->req = NULL; } while ((req = blk_fetch_request(ace->queue)) != NULL) __blk_end_request_all(req, -EIO); /* Drop back to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = -EIO; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } } switch (ace->fsm_state) { case ACE_FSM_STATE_IDLE: /* See if there is anything to do */ if (ace->id_req_count || ace_get_next_request(ace->queue)) { ace->fsm_iter_num++; ace->fsm_state = ACE_FSM_STATE_REQ_LOCK; mod_timer(&ace->stall_timer, jiffies + HZ); if (!timer_pending(&ace->stall_timer)) add_timer(&ace->stall_timer); break; } del_timer(&ace->stall_timer); ace->fsm_continue_flag = 0; break; case ACE_FSM_STATE_REQ_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* Already have the lock, jump to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* Request the lock */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ); ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK; break; case ACE_FSM_STATE_WAIT_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* got the lock; move to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* wait a bit for the lock */ ace_fsm_yield(ace); break; case ACE_FSM_STATE_WAIT_CFREADY: status = ace_in32(ace, ACE_STATUS); if (!(status & ACE_STATUS_RDYFORCFCMD) || (status & ACE_STATUS_CFBSY)) { /* CF card isn't ready; it needs to be polled */ ace_fsm_yield(ace); break; } /* Device is ready for command; determine what to do next */ if (ace->id_req_count) ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE; else ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE; break; case ACE_FSM_STATE_IDENTIFY_PREPARE: /* Send identify command */ ace->fsm_task = ACE_TASK_IDENTIFY; ace->data_ptr = ace->cf_id; ace->data_count = ACE_BUF_PER_SECTOR; ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* irq handler takes over from this point; wait for the * transfer to complete */ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER; ace_fsm_yieldirq(ace); break; case ACE_FSM_STATE_IDENTIFY_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n", ace->fsm_task, ace->fsm_iter_num, ace->data_count); ace_fsm_yield(ace); break; } if (!(status & ACE_STATUS_DATABUFRDY)) { ace_fsm_yield(ace); break; } /* Transfer the next buffer */ ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* transfer finished; kick state machine */ dev_dbg(ace->dev, "identify finished\n"); ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE; break; case ACE_FSM_STATE_IDENTIFY_COMPLETE: ace_fix_driveid(ace->cf_id); ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ if (ace->data_result) { /* Error occurred, disable the disk */ ace->media_change = 1; set_capacity(ace->gd, 0); dev_err(ace->dev, "error fetching CF id (%i)\n", ace->data_result); } else { ace->media_change = 0; /* Record disk parameters */ set_capacity(ace->gd, ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); dev_info(ace->dev, "capacity: %i sectors\n", ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); } /* We're done, drop to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = ace->data_result; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } break; case ACE_FSM_STATE_REQ_PREPARE: req = ace_get_next_request(ace->queue); if (!req) { ace->fsm_state = ACE_FSM_STATE_IDLE; break; } blk_start_request(req); /* Okay, it's a data request, set it up for transfer */ dev_dbg(ace->dev, "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", (unsigned long long)blk_rq_pos(req), blk_rq_sectors(req), blk_rq_cur_sectors(req), rq_data_dir(req)); ace->req = req; ace->data_ptr = req->buffer; ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); count = blk_rq_sectors(req); if (rq_data_dir(req)) { /* Kick off write request */ dev_dbg(ace->dev, "write data\n"); ace->fsm_task = ACE_TASK_WRITE; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_WRITE_DATA); } else { /* Kick off read request */ dev_dbg(ace->dev, "read data\n"); ace->fsm_task = ACE_TASK_READ; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_READ_DATA); } /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* Move to the transfer state. The systemace will raise * an interrupt once there is something to do */ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER; if (ace->fsm_task == ACE_TASK_READ) ace_fsm_yieldirq(ace); /* wait for data ready */ break; case ACE_FSM_STATE_REQ_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yield(ace); /* need to poll CFBSY bit */ break; } if (!(status & ACE_STATUS_DATABUFRDY)) { dev_dbg(ace->dev, "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yieldirq(ace); break; } /* Transfer the next buffer */ if (ace->fsm_task == ACE_TASK_WRITE) ace->reg_ops->dataout(ace); else ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* bio finished; is there another one? */ if (__blk_end_request_cur(ace->req, 0)) { /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ ace->data_ptr = ace->req->buffer; ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; } ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE; break; case ACE_FSM_STATE_REQ_COMPLETE: ace->req = NULL; /* Finished request; go to idle state */ ace->fsm_state = ACE_FSM_STATE_IDLE; break; default: ace->fsm_state = ACE_FSM_STATE_IDLE; break; } } static void ace_fsm_tasklet(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; spin_lock_irqsave(&ace->lock, flags); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } static void ace_stall_timer(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; dev_warn(ace->dev, "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n", ace->fsm_state, ace->fsm_task, ace->fsm_iter_num, ace->data_count); spin_lock_irqsave(&ace->lock, flags); /* Rearm the stall timer *before* entering FSM (which may then * delete the timer) */ mod_timer(&ace->stall_timer, jiffies + HZ); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } /* --------------------------------------------------------------------- * Interrupt handling routines */ static int ace_interrupt_checkstate(struct ace_device *ace) { u32 sreg = ace_in32(ace, ACE_STATUS); u16 creg = ace_in(ace, ACE_CTRL); /* Check for error occurrence */ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) && (creg & ACE_CTRL_ERRORIRQ)) { dev_err(ace->dev, "transfer failure\n"); ace_dump_regs(ace); return -EIO; } return 0; } static irqreturn_t ace_interrupt(int irq, void *dev_id) { u16 creg; struct ace_device *ace = dev_id; /* be safe and get the lock */ spin_lock(&ace->lock); ace->in_irq = 1; /* clear the interrupt */ creg = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ); ace_out(ace, ACE_CTRL, creg); /* check for IO failures */ if (ace_interrupt_checkstate(ace)) ace->data_result = -EIO; if (ace->fsm_task == 0) { dev_err(ace->dev, "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n", ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD)); dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n", ace->fsm_task, ace->fsm_state, ace->data_count); } /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); /* done with interrupt; drop the lock */ ace->in_irq = 0; spin_unlock(&ace->lock); return IRQ_HANDLED; } /* --------------------------------------------------------------------- * Block ops */ static void ace_request(struct request_queue * q) { struct request *req; struct ace_device *ace; req = ace_get_next_request(q); if (req) { ace = req->rq_disk->private_data; tasklet_schedule(&ace->fsm_tasklet); } } static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing) { struct ace_device *ace = gd->private_data; dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change); return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0; } static int ace_revalidate_disk(struct gendisk *gd) { struct ace_device *ace = gd->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_revalidate_disk()\n"); if (ace->media_change) { dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); spin_lock_irqsave(&ace->lock, flags); ace->id_req_count++; spin_unlock_irqrestore(&ace->lock, flags); tasklet_schedule(&ace->fsm_tasklet); wait_for_completion(&ace->id_completion); } dev_dbg(ace->dev, "revalidate complete\n"); return ace->id_result; } static int ace_open(struct block_device *bdev, fmode_t mode) { struct ace_device *ace = bdev->bd_disk->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users++; spin_unlock_irqrestore(&ace->lock, flags); check_disk_change(bdev); mutex_unlock(&xsysace_mutex); return 0; } static int ace_release(struct gendisk *disk, fmode_t mode) { struct ace_device *ace = disk->private_data; unsigned long flags; u16 val; dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users--; if (ace->users == 0) { val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); } spin_unlock_irqrestore(&ace->lock, flags); mutex_unlock(&xsysace_mutex); return 0; } static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ace_device *ace = bdev->bd_disk->private_data; u16 *cf_id = ace->cf_id; dev_dbg(ace->dev, "ace_getgeo()\n"); geo->heads = cf_id[ATA_ID_HEADS]; geo->sectors = cf_id[ATA_ID_SECTORS]; geo->cylinders = cf_id[ATA_ID_CYLS]; return 0; } static const struct block_device_operations ace_fops = { .owner = THIS_MODULE, .open = ace_open, .release = ace_release, .check_events = ace_check_events, .revalidate_disk = ace_revalidate_disk, .getgeo = ace_getgeo, }; /* -------------------------------------------------------------------- * SystemACE device setup/teardown code */ static int __devinit ace_setup(struct ace_device *ace) { u16 version; u16 val; int rc; dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", (unsigned long long)ace->physaddr, ace->irq); spin_lock_init(&ace->lock); init_completion(&ace->id_completion); /* * Map the device */ ace->baseaddr = ioremap(ace->physaddr, 0x80); if (!ace->baseaddr) goto err_ioremap; /* * Initialize the state machine tasklet and stall timer */ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace); /* * Initialize the request queue */ ace->queue = blk_init_queue(ace_request, &ace->lock); if (ace->queue == NULL) goto err_blk_initq; blk_queue_logical_block_size(ace->queue, 512); /* * Allocate and initialize GD structure */ ace->gd = alloc_disk(ACE_NUM_MINORS); if (!ace->gd) goto err_alloc_disk; ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); /* set bus width */ if (ace->bus_width == ACE_BUS_WIDTH_16) { /* 0x0101 should work regardless of endianess */ ace_out_le16(ace, ACE_BUSMODE, 0x0101); /* read it back to determine endianess */ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) ace->reg_ops = &ace_reg_le16_ops; else ace->reg_ops = &ace_reg_be16_ops; } else { ace_out_8(ace, ACE_BUSMODE, 0x00); ace->reg_ops = &ace_reg_8_ops; } /* Make sure version register is sane */ version = ace_in(ace, ACE_VERSION); if ((version == 0) || (version == 0xFFFF)) goto err_read; /* Put sysace in a sane state by clearing most control reg bits */ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); /* Now we can hook up the irq handler */ if (ace->irq != NO_IRQ) { rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); if (rc) { /* Failure - fall back to polled mode */ dev_err(ace->dev, "request_irq failed\n"); ace->irq = NO_IRQ; } } /* Enable interrupts */ val = ace_in(ace, ACE_CTRL); val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; ace_out(ace, ACE_CTRL, val); /* Print the identification */ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); ace->media_change = 1; ace_revalidate_disk(ace->gd); /* Make the sysace device 'live' */ add_disk(ace->gd); return 0; err_read: put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); err_blk_initq: iounmap(ace->baseaddr); err_ioremap: dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", (unsigned long long) ace->physaddr); return -ENOMEM; } static void __devexit ace_teardown(struct ace_device *ace) { if (ace->gd) { del_gendisk(ace->gd); put_disk(ace->gd); } if (ace->queue) blk_cleanup_queue(ace->queue); tasklet_kill(&ace->fsm_tasklet); if (ace->irq != NO_IRQ) free_irq(ace->irq, ace); iounmap(ace->baseaddr); } static int __devinit ace_alloc(struct device *dev, int id, resource_size_t physaddr, int irq, int bus_width) { struct ace_device *ace; int rc; dev_dbg(dev, "ace_alloc(%p)\n", dev); if (!physaddr) { rc = -ENODEV; goto err_noreg; } /* Allocate and initialize the ace device structure */ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); if (!ace) { rc = -ENOMEM; goto err_alloc; } ace->dev = dev; ace->id = id; ace->physaddr = physaddr; ace->irq = irq; ace->bus_width = bus_width; /* Call the setup code */ rc = ace_setup(ace); if (rc) goto err_setup; dev_set_drvdata(dev, ace); return 0; err_setup: dev_set_drvdata(dev, NULL); kfree(ace); err_alloc: err_noreg: dev_err(dev, "could not initialize device, err=%i\n", rc); return rc; } static void __devexit ace_free(struct device *dev) { struct ace_device *ace = dev_get_drvdata(dev); dev_dbg(dev, "ace_free(%p)\n", dev); if (ace) { ace_teardown(ace); dev_set_drvdata(dev, NULL); kfree(ace); } } /* --------------------------------------------------------------------- * Platform Bus Support */ static int __devinit ace_probe(struct platform_device *dev) { resource_size_t physaddr = 0; int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ u32 id = dev->id; int irq = NO_IRQ; int i; dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); /* device id and bus width */ of_property_read_u32(dev->dev.of_node, "port-number", &id); if (id < 0) id = 0; if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; for (i = 0; i < dev->num_resources; i++) { if (dev->resource[i].flags & IORESOURCE_MEM) physaddr = dev->resource[i].start; if (dev->resource[i].flags & IORESOURCE_IRQ) irq = dev->resource[i].start; } /* Call the bus-independent setup code */ return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); } /* * Platform bus remove() method */ static int __devexit ace_remove(struct platform_device *dev) { ace_free(&dev->dev); return 0; } #if defined(CONFIG_OF) /* Match table for of_platform binding */ static const struct of_device_id ace_of_match[] __devinitconst = { { .compatible = "xlnx,opb-sysace-1.00.b", }, { .compatible = "xlnx,opb-sysace-1.00.c", }, { .compatible = "xlnx,xps-sysace-1.00.a", }, { .compatible = "xlnx,sysace", }, {}, }; MODULE_DEVICE_TABLE(of, ace_of_match); #else /* CONFIG_OF */ #define ace_of_match NULL #endif /* CONFIG_OF */ static struct platform_driver ace_platform_driver = { .probe = ace_probe, .remove = __devexit_p(ace_remove), .driver = { .owner = THIS_MODULE, .name = "xsysace", .of_match_table = ace_of_match, }, }; /* --------------------------------------------------------------------- * Module init/exit routines */ static int __init ace_init(void) { int rc; ace_major = register_blkdev(ace_major, "xsysace"); if (ace_major <= 0) { rc = -ENOMEM; goto err_blk; } rc = platform_driver_register(&ace_platform_driver); if (rc) goto err_plat; pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major); return 0; err_plat: unregister_blkdev(ace_major, "xsysace"); err_blk: printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc); return rc; } module_init(ace_init); static void __exit ace_exit(void) { pr_debug("Unregistering Xilinx SystemACE driver\n"); platform_driver_unregister(&ace_platform_driver); unregister_blkdev(ace_major, "xsysace"); } module_exit(ace_exit);
gpl-2.0
gobzateloon/Gobza_Sprout-LP
arch/powerpc/kvm/book3s_pr.c
1936
34901
/* * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <agraf@suse.de> * Kevin Wolf <mail@kevin-wolf.de> * Paul Mackerras <paulus@samba.org> * * Description: * Functions relating to running KVM on Book 3S processors where * we don't have access to hypervisor mode, and we run the guest * in problem state (user mode). * * This file is derived from arch/powerpc/kvm/44x.c, * by Hollis Blanchard <hollisb@us.ibm.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/kvm_host.h> #include <linux/export.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/switch_to.h> #include <asm/firmware.h> #include <asm/hvcall.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include "trace.h" /* #define EXIT_DEBUG */ /* #define DEBUG_EXT */ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr); /* Some compatibility defines */ #ifdef CONFIG_PPC_BOOK3S_32 #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE #endif void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; svcpu_put(svcpu); #endif vcpu->cpu = smp_processor_id(); #ifdef CONFIG_PPC_BOOK3S_32 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; #endif } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); #endif kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); vcpu->cpu = -1; } int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) { int r = 1; /* Indicate we want to get back into the guest */ /* We misuse TLB_FLUSH to indicate that we want to clear all shadow cache entries */ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvmppc_mmu_pte_flush(vcpu, 0, 0); return r; } /************* MMU Notifiers *************/ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) { trace_kvm_unmap_hva(hva); /* * Flush all shadow tlb entries everywhere. This is slow, but * we are 100% sure that we catch the to be unmapped page */ kvm_flush_remote_tlbs(kvm); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); return 0; } int kvm_age_hva(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { /* The page will get remapped properly on its next fault */ kvm_unmap_hva(kvm, hva); } /*****************************************/ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) { ulong smsr = vcpu->arch.shared->msr; /* Guest MSR values */ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; /* Process MSR values */ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; /* External providers the guest reserved */ smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); /* 64-bit Process MSR values */ #ifdef CONFIG_PPC_BOOK3S_64 smsr |= MSR_ISF | MSR_HV; #endif vcpu->arch.shadow_msr = smsr; } void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) { ulong old_msr = vcpu->arch.shared->msr; #ifdef EXIT_DEBUG printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); #endif msr &= to_book3s(vcpu)->msr_mask; vcpu->arch.shared->msr = msr; kvmppc_recalc_shadow_msr(vcpu); if (msr & MSR_POW) { if (!vcpu->arch.pending_exceptions) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); vcpu->stat.halt_wakeup++; /* Unset POW bit after we woke up */ msr &= ~MSR_POW; vcpu->arch.shared->msr = msr; } } if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); /* Preload magic page segment when in kernel mode */ if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { struct kvm_vcpu_arch *a = &vcpu->arch; if (msr & MSR_DR) kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); else kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); } } /* * When switching from 32 to 64-bit, we may have a stale 32-bit * magic page around, we need to flush it. Typically 32-bit magic * page will be instanciated when calling into RTAS. Note: We * assume that such transition only happens while in kernel mode, * ie, we never transition from user 32-bit to kernel 64-bit with * a 32-bit magic page around. */ if (vcpu->arch.magic_page_pa && !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { /* going from RTAS to normal kernel code */ kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, ~0xFFFUL); } /* Preload FPU if it's enabled */ if (vcpu->arch.shared->msr & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); } void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) { u32 host_pvr; vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; vcpu->arch.pvr = pvr; #ifdef CONFIG_PPC_BOOK3S_64 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { kvmppc_mmu_book3s_64_init(vcpu); if (!to_book3s(vcpu)->hior_explicit) to_book3s(vcpu)->hior = 0xfff00000; to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; vcpu->arch.cpu_type = KVM_CPU_3S_64; } else #endif { kvmppc_mmu_book3s_32_init(vcpu); if (!to_book3s(vcpu)->hior_explicit) to_book3s(vcpu)->hior = 0; to_book3s(vcpu)->msr_mask = 0xffffffffULL; vcpu->arch.cpu_type = KVM_CPU_3S_32; } kvmppc_sanity_check(vcpu); /* If we are in hypervisor level on 970, we can tell the CPU to * treat DCBZ as 32 bytes store */ vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && !strcmp(cur_cpu_spec->platform, "ppc970")) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; /* Cell performs badly if MSR_FEx are set. So let's hope nobody really needs them in a VM on Cell and force disable them. */ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); #ifdef CONFIG_PPC_BOOK3S_32 /* 32 bit Book3S always has 32 byte dcbz */ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; #endif /* On some CPUs we can execute paired single operations natively */ asm ( "mfpvr %0" : "=r"(host_pvr)); switch (host_pvr) { case 0x00080200: /* lonestar 2.0 */ case 0x00088202: /* lonestar 2.2 */ case 0x70000100: /* gekko 1.0 */ case 0x00080100: /* gekko 2.0 */ case 0x00083203: /* gekko 2.3a */ case 0x00083213: /* gekko 2.3b */ case 0x00083204: /* gekko 2.4 */ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ case 0x00087200: /* broadway */ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; /* Enable HID2.PSE - in case we need it later */ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); } } /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to * emulate 32 bytes dcbz length. * * The Book3s_64 inventors also realized this case and implemented a special bit * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. * * My approach here is to patch the dcbz instruction on executing pages. */ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) { struct page *hpage; u64 hpage_offset; u32 *page; int i; hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); if (is_error_page(hpage)) return; hpage_offset = pte->raddr & ~PAGE_MASK; hpage_offset &= ~0xFFFULL; hpage_offset /= 4; get_page(hpage); page = kmap_atomic(hpage); /* patch dcbz into reserved instruction, so we trap */ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) if ((page[i] & 0xff0007ff) == INS_DCBZ) page[i] &= 0xfffffff7; kunmap_atomic(page); put_page(hpage); } static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { ulong mp_pa = vcpu->arch.magic_page_pa; if (!(vcpu->arch.shared->msr & MSR_SF)) mp_pa = (uint32_t)mp_pa; if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { return 1; } return kvm_is_visible_gfn(vcpu->kvm, gfn); } int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong eaddr, int vec) { bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); int r = RESUME_GUEST; int relocated; int page_found = 0; struct kvmppc_pte pte; bool is_mmio = false; bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; u64 vsid; relocated = data ? dr : ir; /* Resolve real address if translation turned on */ if (relocated) { page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); } else { pte.may_execute = true; pte.may_read = true; pte.may_write = true; pte.raddr = eaddr & KVM_PAM; pte.eaddr = eaddr; pte.vpage = eaddr >> 12; } switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { case 0: pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); break; case MSR_DR: case MSR_IR: vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); else pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); pte.vpage |= vsid; if (vsid == -1) page_found = -EINVAL; break; } if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { /* * If we do the dcbz hack, we have to NX on every execution, * so we can patch the executing code. This renders our guest * NX-less. */ pte.may_execute = !data; } if (page_found == -ENOENT) { /* Page not found in guest PTE entries */ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); vcpu->arch.shared->dsisr = svcpu->fault_dsisr; vcpu->arch.shared->msr |= (svcpu->shadow_srr1 & 0x00000000f8000000ULL); svcpu_put(svcpu); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; vcpu->arch.shared->msr |= svcpu->shadow_srr1 & 0x00000000f8000000ULL; svcpu_put(svcpu); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); } else if (!is_mmio && kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { /* The guest's PTE is not mapped yet. Map on the host */ kvmppc_mmu_map_page(vcpu, &pte); if (data) vcpu->stat.sp_storage++; else if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) kvmppc_patch_dcbz(vcpu, &pte); } else { /* MMIO */ vcpu->stat.mmio_exits++; vcpu->arch.paddr_accessed = pte.raddr; vcpu->arch.vaddr_accessed = pte.eaddr; r = kvmppc_emulate_mmio(run, vcpu); if ( r == RESUME_HOST_NV ) r = RESUME_HOST; } return r; } static inline int get_fpr_index(int i) { return i * TS_FPRWIDTH; } /* Give up external provider (FPU, Altivec, VSX) */ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) { struct thread_struct *t = &current->thread; u64 *vcpu_fpr = vcpu->arch.fpr; #ifdef CONFIG_VSX u64 *vcpu_vsx = vcpu->arch.vsr; #endif u64 *thread_fpr = (u64*)t->fpr; int i; /* * VSX instructions can access FP and vector registers, so if * we are giving up VSX, make sure we give up FP and VMX as well. */ if (msr & MSR_VSX) msr |= MSR_FP | MSR_VEC; msr &= vcpu->arch.guest_owned_ext; if (!msr) return; #ifdef DEBUG_EXT printk(KERN_INFO "Giving up ext 0x%lx\n", msr); #endif if (msr & MSR_FP) { /* * Note that on CPUs with VSX, giveup_fpu stores * both the traditional FP registers and the added VSX * registers into thread.fpr[]. */ giveup_fpu(current); for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; vcpu->arch.fpscr = t->fpscr.val; #ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; #endif } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { giveup_altivec(current); memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); vcpu->arch.vscr = t->vscr; } #endif vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); kvmppc_recalc_shadow_msr(vcpu); } static int kvmppc_read_inst(struct kvm_vcpu *vcpu) { ulong srr0 = kvmppc_get_pc(vcpu); u32 last_inst = kvmppc_get_last_inst(vcpu); int ret; ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); if (ret == -ENOENT) { ulong msr = vcpu->arch.shared->msr; msr = kvmppc_set_field(msr, 33, 33, 1); msr = kvmppc_set_field(msr, 34, 36, 0); vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); return EMULATE_AGAIN; } return EMULATE_DONE; } static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) { /* Need to do paired single emulation? */ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) return EMULATE_DONE; /* Read out the instruction */ if (kvmppc_read_inst(vcpu) == EMULATE_DONE) /* Need to emulate */ return EMULATE_FAIL; return EMULATE_AGAIN; } /* Handle external providers (FPU, Altivec, VSX) */ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr) { struct thread_struct *t = &current->thread; u64 *vcpu_fpr = vcpu->arch.fpr; #ifdef CONFIG_VSX u64 *vcpu_vsx = vcpu->arch.vsr; #endif u64 *thread_fpr = (u64*)t->fpr; int i; /* When we have paired singles, we emulate in software */ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) return RESUME_GUEST; if (!(vcpu->arch.shared->msr & msr)) { kvmppc_book3s_queue_irqprio(vcpu, exit_nr); return RESUME_GUEST; } if (msr == MSR_VSX) { /* No VSX? Give an illegal instruction interrupt */ #ifdef CONFIG_VSX if (!cpu_has_feature(CPU_FTR_VSX)) #endif { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); return RESUME_GUEST; } /* * We have to load up all the FP and VMX registers before * we can let the guest use VSX instructions. */ msr = MSR_FP | MSR_VEC | MSR_VSX; } /* See if we already own all the ext(s) needed */ msr &= ~vcpu->arch.guest_owned_ext; if (!msr) return RESUME_GUEST; #ifdef DEBUG_EXT printk(KERN_INFO "Loading up ext 0x%lx\n", msr); #endif current->thread.regs->msr |= msr; if (msr & MSR_FP) { for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; #ifdef CONFIG_VSX for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; #endif t->fpscr.val = vcpu->arch.fpscr; t->fpexc_mode = 0; kvmppc_load_up_fpu(); } if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); t->vscr = vcpu->arch.vscr; t->vrsave = -1; kvmppc_load_up_altivec(); #endif } vcpu->arch.guest_owned_ext |= msr; kvmppc_recalc_shadow_msr(vcpu); return RESUME_GUEST; } int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { int r = RESUME_HOST; int s; vcpu->stat.sum_exits++; run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; /* We get here with MSR.EE=1 */ trace_kvm_exit(exit_nr, vcpu); kvm_guest_exit(); switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); ulong shadow_srr1 = svcpu->shadow_srr1; vcpu->stat.pf_instruc++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); r = RESUME_GUEST; svcpu_put(svcpu); break; } #endif svcpu_put(svcpu); /* only care about PTEG not found errors, but leave NX alone */ if (shadow_srr1 & 0x40000000) { r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { /* * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, * so we can't use the NX bit inside the guest. Let's cross our fingers, * that no guest that needs the dcbz hack does NX. */ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); r = RESUME_GUEST; } else { vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_DATA_STORAGE: { ulong dar = kvmppc_get_fault_dar(vcpu); struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); u32 fault_dsisr = svcpu->fault_dsisr; vcpu->stat.pf_storage++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { kvmppc_mmu_map_segment(vcpu, dar); r = RESUME_GUEST; svcpu_put(svcpu); break; } #endif svcpu_put(svcpu); /* The only case we need to handle is missing shadow PTEs */ if (fault_dsisr & DSISR_NOHPTE) { r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); } else { vcpu->arch.shared->dar = dar; vcpu->arch.shared->dsisr = fault_dsisr; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } break; } case BOOK3S_INTERRUPT_DATA_SEGMENT: if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_SEGMENT); } r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_INST_SEGMENT: if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_SEGMENT); } r = RESUME_GUEST; break; /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_DECREMENTER: case BOOK3S_INTERRUPT_HV_DECREMENTER: vcpu->stat.dec_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: case BOOK3S_INTERRUPT_EXTERNAL_HV: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_PERFMON: r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_H_EMUL_ASSIST: { enum emulation_result er; struct kvmppc_book3s_shadow_vcpu *svcpu; ulong flags; program_interrupt: svcpu = svcpu_get(vcpu); flags = svcpu->shadow_srr1 & 0x1f0000ull; svcpu_put(svcpu); if (vcpu->arch.shared->msr & MSR_PR) { #ifdef EXIT_DEBUG printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); #endif if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; } } vcpu->stat.emulated_inst_exits++; er = kvmppc_emulate_instruction(run, vcpu); switch (er) { case EMULATE_DONE: r = RESUME_GUEST_NV; break; case EMULATE_AGAIN: r = RESUME_GUEST; break; case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; case EMULATE_DO_MMIO: run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST_NV; break; case EMULATE_EXIT_USER: r = RESUME_HOST_NV; break; default: BUG(); } break; } case BOOK3S_INTERRUPT_SYSCALL: if (vcpu->arch.papr_enabled && (kvmppc_get_last_inst(vcpu) == 0x44000022) && !(vcpu->arch.shared->msr & MSR_PR)) { /* SC 1 papr hypercalls */ ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; #ifdef CONFIG_KVM_BOOK3S_64_PR if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; } #endif run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); run->papr_hcall.args[i] = gpr; } run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->arch.hcall_needed = 1; r = RESUME_HOST; } else if (vcpu->arch.osi_enabled && (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { /* MOL hypercalls */ u64 *gprs = run->osi.gprs; int i; run->exit_reason = KVM_EXIT_OSI; for (i = 0; i < 32; i++) gprs[i] = kvmppc_get_gpr(vcpu, i); vcpu->arch.osi_needed = 1; r = RESUME_HOST_NV; } else if (!(vcpu->arch.shared->msr & MSR_PR) && (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { /* KVM PV hypercalls */ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); r = RESUME_GUEST; } else { /* Guest syscalls */ vcpu->stat.syscall_exits++; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } break; case BOOK3S_INTERRUPT_FP_UNAVAIL: case BOOK3S_INTERRUPT_ALTIVEC: case BOOK3S_INTERRUPT_VSX: { int ext_msr = 0; switch (exit_nr) { case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; } switch (kvmppc_check_ext(vcpu, exit_nr)) { case EMULATE_DONE: /* everything ok - let's enable the ext */ r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); break; case EMULATE_FAIL: /* we need to emulate this instruction */ goto program_interrupt; break; default: /* nothing to worry about - go again */ break; } break; } case BOOK3S_INTERRUPT_ALIGNMENT: if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, kvmppc_get_last_inst(vcpu)); vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, kvmppc_get_last_inst(vcpu)); kvmppc_book3s_queue_irqprio(vcpu, exit_nr); } r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: case BOOK3S_INTERRUPT_TRACE: kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; default: { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); ulong shadow_srr1 = svcpu->shadow_srr1; svcpu_put(svcpu); /* Ugh - bork here! What did we get? */ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); r = RESUME_HOST; BUG(); break; } } if (!(r & RESUME_HOST)) { /* To avoid clobbering exit_reason, only check for signals if * we aren't already exiting to userspace for some other * reason. */ /* * Interrupts could be timers for the guest which we have to * inject again, so let's postpone them until we're in the guest * and if we really did time things so badly, then we just exit * again due to a host external interrupt. */ local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) { local_irq_enable(); r = s; } else { kvmppc_lazy_ee_enable(); } } trace_kvm_book3s_reenter(r, vcpu); return r; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; sregs->pvr = vcpu->arch.pvr; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { for (i = 0; i < 64; i++) { sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; } } else { for (i = 0; i < 16; i++) sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; for (i = 0; i < 8; i++) { sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; } } return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; kvmppc_set_pvr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { for (i = 0; i < 64; i++) { vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, sregs->u.s.ppc64.slb[i].slbe); } } else { for (i = 0; i < 16; i++) { vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); } for (i = 0; i < 8; i++) { kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, (u32)sregs->u.s.ppc32.ibat[i]); kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, (u32)sregs->u.s.ppc32.dbat[i]); kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); } } /* Flush the MMU after messing with the segments */ kvmppc_mmu_pte_flush(vcpu, 0, 0); return 0; } int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_HIOR: *val = get_reg_val(id, to_book3s(vcpu)->hior); break; #ifdef CONFIG_VSX case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { long int i = id - KVM_REG_PPC_VSR0; if (!cpu_has_feature(CPU_FTR_VSX)) { r = -ENXIO; break; } val->vsxval[0] = vcpu->arch.fpr[i]; val->vsxval[1] = vcpu->arch.vsr[i]; break; } #endif /* CONFIG_VSX */ default: r = -EINVAL; break; } return r; } int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { int r = 0; switch (id) { case KVM_REG_PPC_HIOR: to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior_explicit = true; break; #ifdef CONFIG_VSX case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { long int i = id - KVM_REG_PPC_VSR0; if (!cpu_has_feature(CPU_FTR_VSX)) { r = -ENXIO; break; } vcpu->arch.fpr[i] = val->vsxval[0]; vcpu->arch.vsr[i] = val->vsxval[1]; break; } #endif /* CONFIG_VSX */ default: r = -EINVAL; break; } return r; } int kvmppc_core_check_processor_compat(void) { return 0; } struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvmppc_vcpu_book3s *vcpu_book3s; struct kvm_vcpu *vcpu; int err = -ENOMEM; unsigned long p; vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); if (!vcpu_book3s) goto out; vcpu_book3s->shadow_vcpu = kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); if (!vcpu_book3s->shadow_vcpu) goto free_vcpu; vcpu = &vcpu_book3s->vcpu; err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_shadow_vcpu; p = __get_free_page(GFP_KERNEL|__GFP_ZERO); /* the real shared page fills the last 4k of our page */ vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); if (!p) goto uninit_vcpu; #ifdef CONFIG_PPC_BOOK3S_64 /* default to book3s_64 (970fx) */ vcpu->arch.pvr = 0x3C0301; #else /* default to book3s_32 (750) */ vcpu->arch.pvr = 0x84202; #endif kvmppc_set_pvr(vcpu, vcpu->arch.pvr); vcpu->arch.slb_nr = 64; vcpu->arch.shadow_msr = MSR_USER64; err = kvmppc_mmu_init(vcpu); if (err < 0) goto uninit_vcpu; return vcpu; uninit_vcpu: kvm_vcpu_uninit(vcpu); free_shadow_vcpu: kfree(vcpu_book3s->shadow_vcpu); free_vcpu: vfree(vcpu_book3s); out: return ERR_PTR(err); } void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); kvm_vcpu_uninit(vcpu); kfree(vcpu_book3s->shadow_vcpu); vfree(vcpu_book3s); } int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; double fpr[32][TS_FPRWIDTH]; unsigned int fpscr; int fpexc_mode; #ifdef CONFIG_ALTIVEC vector128 vr[32]; vector128 vscr; unsigned long uninitialized_var(vrsave); int used_vr; #endif #ifdef CONFIG_VSX int used_vsr; #endif ulong ext_msr; /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) { kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = -EINVAL; goto out; } /* * Interrupts could be timers for the guest which we have to inject * again, so let's postpone them until we're in the guest and if we * really did time things so badly, then we just exit again due to * a host external interrupt. */ local_irq_disable(); ret = kvmppc_prepare_to_enter(vcpu); if (ret <= 0) { local_irq_enable(); goto out; } /* Save FPU state in stack */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); fpscr = current->thread.fpscr.val; fpexc_mode = current->thread.fpexc_mode; #ifdef CONFIG_ALTIVEC /* Save Altivec state in stack */ used_vr = current->thread.used_vr; if (used_vr) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); vscr = current->thread.vscr; vrsave = current->thread.vrsave; } #endif #ifdef CONFIG_VSX /* Save VSX state in stack */ used_vsr = current->thread.used_vsr; if (used_vsr && (current->thread.regs->msr & MSR_VSX)) __giveup_vsx(current); #endif /* Remember the MSR with disabled extensions */ ext_msr = current->thread.regs->msr; /* Preload FPU if it's enabled */ if (vcpu->arch.shared->msr & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_lazy_ee_enable(); ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ /* Make sure we save the guest FPU/Altivec/VSX state */ kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); current->thread.regs->msr = ext_msr; /* Restore FPU/VSX state from stack */ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); current->thread.fpscr.val = fpscr; current->thread.fpexc_mode = fpexc_mode; #ifdef CONFIG_ALTIVEC /* Restore Altivec state from stack */ if (used_vr && current->thread.used_vr) { memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); current->thread.vscr = vscr; current->thread.vrsave = vrsave; } current->thread.used_vr = used_vr; #endif #ifdef CONFIG_VSX current->thread.used_vsr = used_vsr; #endif out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; } /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; struct kvm_vcpu *vcpu; ulong ga, ga_end; int is_dirty = 0; int r; unsigned long n; mutex_lock(&kvm->slots_lock); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { memslot = id_to_memslot(kvm->memslots, log->slot); ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_for_each_vcpu(n, vcpu, kvm) kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } #ifdef CONFIG_PPC64 int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) { /* No flags */ info->flags = 0; /* SLB is always 64 entries */ info->slb_size = 64; /* Standard 4k base page size segment */ info->sps[0].page_shift = 12; info->sps[0].slb_enc = 0; info->sps[0].enc[0].page_shift = 12; info->sps[0].enc[0].pte_enc = 0; /* Standard 16M large page size segment */ info->sps[1].page_shift = 24; info->sps[1].slb_enc = SLB_VSID_L; info->sps[1].enc[0].page_shift = 24; info->sps[1].enc[0].pte_enc = 0; return 0; } #endif /* CONFIG_PPC64 */ void kvmppc_core_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; } int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem) { return 0; } void kvmppc_core_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old) { } void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) { } static unsigned int kvm_global_user_count = 0; static DEFINE_SPINLOCK(kvm_global_user_count_lock); int kvmppc_core_init_vm(struct kvm *kvm) { #ifdef CONFIG_PPC64 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.rtas_tokens); #endif if (firmware_has_feature(FW_FEATURE_SET_MODE)) { spin_lock(&kvm_global_user_count_lock); if (++kvm_global_user_count == 1) pSeries_disable_reloc_on_exc(); spin_unlock(&kvm_global_user_count_lock); } return 0; } void kvmppc_core_destroy_vm(struct kvm *kvm) { #ifdef CONFIG_PPC64 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); #endif if (firmware_has_feature(FW_FEATURE_SET_MODE)) { spin_lock(&kvm_global_user_count_lock); BUG_ON(kvm_global_user_count == 0); if (--kvm_global_user_count == 0) pSeries_enable_reloc_on_exc(); spin_unlock(&kvm_global_user_count_lock); } } static int kvmppc_book3s_init(void) { int r; r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, THIS_MODULE); if (r) return r; r = kvmppc_mmu_hpte_sysinit(); return r; } static void kvmppc_book3s_exit(void) { kvmppc_mmu_hpte_sysexit(); kvm_exit(); } module_init(kvmppc_book3s_init); module_exit(kvmppc_book3s_exit);
gpl-2.0
zf2-laser-dev/android_kernel_asus_Z00E
net/ipv6/netfilter.c
1936
5604
/* * IPv6 specific functions of netfilter core * * Rusty Russell (C) 2000 -- This code is GPL. * Patrick McHardy (C) 2006-2012 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ipv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/export.h> #include <net/addrconf.h> #include <net/dst.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/xfrm.h> #include <net/ip6_checksum.h> #include <net/netfilter/nf_queue.h> int ip6_route_me_harder(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); const struct ipv6hdr *iph = ipv6_hdr(skb); unsigned int hh_len; struct dst_entry *dst; struct flowi6 fl6 = { .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, .flowi6_mark = skb->mark, .daddr = iph->daddr, .saddr = iph->saddr, }; int err; dst = ip6_route_output(net, skb->sk, &fl6); err = dst->error; if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); dst_release(dst); return err; } /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, dst); #ifdef CONFIG_XFRM if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } EXPORT_SYMBOL(ip6_route_me_harder); /* * Extra routing may needed on local out, as the QUEUE target never * returns control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; rt_info->mark = skb->mark; } } static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) return ip6_route_me_harder(skb); } return 0; } static int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { static const struct ipv6_pinfo fake_pinfo; static const struct inet_sock fake_sk = { /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ .sk.sk_bound_dev_if = 1, .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, }; const void *sk = strict ? &fake_sk : NULL; struct dst_entry *result; int err; result = ip6_route_output(net, sk, &fl->u.ip6); err = result->error; if (err) dst_release(result); else *dst = result; return err; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(skb->csum, skb_checksum(skb, 0, dataoff, 0)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, skb_checksum(skb, 0, dataoff, 0)))); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip6_checksum); static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __wsum hsum; __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; }; static const struct nf_ipv6_ops ipv6ops = { .chk_addr = ipv6_chk_addr, }; static const struct nf_afinfo nf_ip6_afinfo = { .family = AF_INET6, .checksum = nf_ip6_checksum, .checksum_partial = nf_ip6_checksum_partial, .route = nf_ip6_route, .saveroute = nf_ip6_saveroute, .reroute = nf_ip6_reroute, .route_key_size = sizeof(struct ip6_rt_info), }; int __init ipv6_netfilter_init(void) { RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); return nf_register_afinfo(&nf_ip6_afinfo); } /* This can be called from inet6_init() on errors, so it cannot * be marked __exit. -DaveM */ void ipv6_netfilter_fini(void) { RCU_INIT_POINTER(nf_ipv6_ops, NULL); nf_unregister_afinfo(&nf_ip6_afinfo); }
gpl-2.0
ghbhaha/android_kernel_oneplus_msm8974
arch/arm/plat-samsung/adc.c
2960
12543
/* arch/arm/plat-samsung/adc.c * * Copyright (c) 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk>, <ben-linux@fluff.org> * * Samsung ADC device core * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/regulator/consumer.h> #include <plat/regs-adc.h> #include <plat/adc.h> /* This driver is designed to control the usage of the ADC block between * the touchscreen and any other drivers that may need to use it, such as * the hwmon driver. * * Priority will be given to the touchscreen driver, but as this itself is * rate limited it should not starve other requests which are processed in * order that they are received. * * Each user registers to get a client block which uniquely identifies it * and stores information such as the necessary functions to callback when * action is required. */ enum s3c_cpu_type { TYPE_ADCV1, /* S3C24XX */ TYPE_ADCV11, /* S3C2443 */ TYPE_ADCV12, /* S3C2416, S3C2450 */ TYPE_ADCV2, /* S3C64XX, S5P64X0, S5PC100 */ TYPE_ADCV3, /* S5PV210, S5PC110, EXYNOS4210 */ }; struct s3c_adc_client { struct platform_device *pdev; struct list_head pend; wait_queue_head_t *wait; unsigned int nr_samples; int result; unsigned char is_ts; unsigned char channel; void (*select_cb)(struct s3c_adc_client *c, unsigned selected); void (*convert_cb)(struct s3c_adc_client *c, unsigned val1, unsigned val2, unsigned *samples_left); }; struct adc_device { struct platform_device *pdev; struct platform_device *owner; struct clk *clk; struct s3c_adc_client *cur; struct s3c_adc_client *ts_pend; void __iomem *regs; spinlock_t lock; unsigned int prescale; int irq; struct regulator *vdd; }; static struct adc_device *adc_dev; static LIST_HEAD(adc_pending); /* protected by adc_device.lock */ #define adc_dbg(_adc, msg...) dev_dbg(&(_adc)->pdev->dev, msg) static inline void s3c_adc_convert(struct adc_device *adc) { unsigned con = readl(adc->regs + S3C2410_ADCCON); con |= S3C2410_ADCCON_ENABLE_START; writel(con, adc->regs + S3C2410_ADCCON); } static inline void s3c_adc_select(struct adc_device *adc, struct s3c_adc_client *client) { unsigned con = readl(adc->regs + S3C2410_ADCCON); enum s3c_cpu_type cpu = platform_get_device_id(adc->pdev)->driver_data; client->select_cb(client, 1); if (cpu == TYPE_ADCV1 || cpu == TYPE_ADCV2) con &= ~S3C2410_ADCCON_MUXMASK; con &= ~S3C2410_ADCCON_STDBM; con &= ~S3C2410_ADCCON_STARTMASK; if (!client->is_ts) { if (cpu == TYPE_ADCV3) writel(client->channel & 0xf, adc->regs + S5P_ADCMUX); else if (cpu == TYPE_ADCV11 || cpu == TYPE_ADCV12) writel(client->channel & 0xf, adc->regs + S3C2443_ADCMUX); else con |= S3C2410_ADCCON_SELMUX(client->channel); } writel(con, adc->regs + S3C2410_ADCCON); } static void s3c_adc_dbgshow(struct adc_device *adc) { adc_dbg(adc, "CON=%08x, TSC=%08x, DLY=%08x\n", readl(adc->regs + S3C2410_ADCCON), readl(adc->regs + S3C2410_ADCTSC), readl(adc->regs + S3C2410_ADCDLY)); } static void s3c_adc_try(struct adc_device *adc) { struct s3c_adc_client *next = adc->ts_pend; if (!next && !list_empty(&adc_pending)) { next = list_first_entry(&adc_pending, struct s3c_adc_client, pend); list_del(&next->pend); } else adc->ts_pend = NULL; if (next) { adc_dbg(adc, "new client is %p\n", next); adc->cur = next; s3c_adc_select(adc, next); s3c_adc_convert(adc); s3c_adc_dbgshow(adc); } } int s3c_adc_start(struct s3c_adc_client *client, unsigned int channel, unsigned int nr_samples) { struct adc_device *adc = adc_dev; unsigned long flags; if (!adc) { printk(KERN_ERR "%s: failed to find adc\n", __func__); return -EINVAL; } if (client->is_ts && adc->ts_pend) return -EAGAIN; spin_lock_irqsave(&adc->lock, flags); client->channel = channel; client->nr_samples = nr_samples; if (client->is_ts) adc->ts_pend = client; else list_add_tail(&client->pend, &adc_pending); if (!adc->cur) s3c_adc_try(adc); spin_unlock_irqrestore(&adc->lock, flags); return 0; } EXPORT_SYMBOL_GPL(s3c_adc_start); static void s3c_convert_done(struct s3c_adc_client *client, unsigned v, unsigned u, unsigned *left) { client->result = v; wake_up(client->wait); } int s3c_adc_read(struct s3c_adc_client *client, unsigned int ch) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); int ret; client->convert_cb = s3c_convert_done; client->wait = &wake; client->result = -1; ret = s3c_adc_start(client, ch, 1); if (ret < 0) goto err; ret = wait_event_timeout(wake, client->result >= 0, HZ / 2); if (client->result < 0) { ret = -ETIMEDOUT; goto err; } client->convert_cb = NULL; return client->result; err: return ret; } EXPORT_SYMBOL_GPL(s3c_adc_read); static void s3c_adc_default_select(struct s3c_adc_client *client, unsigned select) { } struct s3c_adc_client *s3c_adc_register(struct platform_device *pdev, void (*select)(struct s3c_adc_client *client, unsigned int selected), void (*conv)(struct s3c_adc_client *client, unsigned d0, unsigned d1, unsigned *samples_left), unsigned int is_ts) { struct s3c_adc_client *client; WARN_ON(!pdev); if (!select) select = s3c_adc_default_select; if (!pdev) return ERR_PTR(-EINVAL); client = kzalloc(sizeof(struct s3c_adc_client), GFP_KERNEL); if (!client) { dev_err(&pdev->dev, "no memory for adc client\n"); return ERR_PTR(-ENOMEM); } client->pdev = pdev; client->is_ts = is_ts; client->select_cb = select; client->convert_cb = conv; return client; } EXPORT_SYMBOL_GPL(s3c_adc_register); void s3c_adc_release(struct s3c_adc_client *client) { unsigned long flags; spin_lock_irqsave(&adc_dev->lock, flags); /* We should really check that nothing is in progress. */ if (adc_dev->cur == client) adc_dev->cur = NULL; if (adc_dev->ts_pend == client) adc_dev->ts_pend = NULL; else { struct list_head *p, *n; struct s3c_adc_client *tmp; list_for_each_safe(p, n, &adc_pending) { tmp = list_entry(p, struct s3c_adc_client, pend); if (tmp == client) list_del(&tmp->pend); } } if (adc_dev->cur == NULL) s3c_adc_try(adc_dev); spin_unlock_irqrestore(&adc_dev->lock, flags); kfree(client); } EXPORT_SYMBOL_GPL(s3c_adc_release); static irqreturn_t s3c_adc_irq(int irq, void *pw) { struct adc_device *adc = pw; struct s3c_adc_client *client = adc->cur; enum s3c_cpu_type cpu = platform_get_device_id(adc->pdev)->driver_data; unsigned data0, data1; if (!client) { dev_warn(&adc->pdev->dev, "%s: no adc pending\n", __func__); goto exit; } data0 = readl(adc->regs + S3C2410_ADCDAT0); data1 = readl(adc->regs + S3C2410_ADCDAT1); adc_dbg(adc, "read %d: 0x%04x, 0x%04x\n", client->nr_samples, data0, data1); client->nr_samples--; if (cpu == TYPE_ADCV1 || cpu == TYPE_ADCV11) { data0 &= 0x3ff; data1 &= 0x3ff; } else { /* S3C2416/S3C64XX/S5P ADC resolution is 12-bit */ data0 &= 0xfff; data1 &= 0xfff; } if (client->convert_cb) (client->convert_cb)(client, data0, data1, &client->nr_samples); if (client->nr_samples > 0) { /* fire another conversion for this */ client->select_cb(client, 1); s3c_adc_convert(adc); } else { spin_lock(&adc->lock); (client->select_cb)(client, 0); adc->cur = NULL; s3c_adc_try(adc); spin_unlock(&adc->lock); } exit: if (cpu == TYPE_ADCV2 || cpu == TYPE_ADCV3) { /* Clear ADC interrupt */ writel(0, adc->regs + S3C64XX_ADCCLRINT); } return IRQ_HANDLED; } static int s3c_adc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct adc_device *adc; struct resource *regs; enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data; int ret; unsigned tmp; adc = kzalloc(sizeof(struct adc_device), GFP_KERNEL); if (adc == NULL) { dev_err(dev, "failed to allocate adc_device\n"); return -ENOMEM; } spin_lock_init(&adc->lock); adc->pdev = pdev; adc->prescale = S3C2410_ADCCON_PRSCVL(49); adc->vdd = regulator_get(dev, "vdd"); if (IS_ERR(adc->vdd)) { dev_err(dev, "operating without regulator \"vdd\" .\n"); ret = PTR_ERR(adc->vdd); goto err_alloc; } adc->irq = platform_get_irq(pdev, 1); if (adc->irq <= 0) { dev_err(dev, "failed to get adc irq\n"); ret = -ENOENT; goto err_reg; } ret = request_irq(adc->irq, s3c_adc_irq, 0, dev_name(dev), adc); if (ret < 0) { dev_err(dev, "failed to attach adc irq\n"); goto err_reg; } adc->clk = clk_get(dev, "adc"); if (IS_ERR(adc->clk)) { dev_err(dev, "failed to get adc clock\n"); ret = PTR_ERR(adc->clk); goto err_irq; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(dev, "failed to find registers\n"); ret = -ENXIO; goto err_clk; } adc->regs = ioremap(regs->start, resource_size(regs)); if (!adc->regs) { dev_err(dev, "failed to map registers\n"); ret = -ENXIO; goto err_clk; } ret = regulator_enable(adc->vdd); if (ret) goto err_ioremap; clk_enable(adc->clk); tmp = adc->prescale | S3C2410_ADCCON_PRSCEN; /* Enable 12-bit ADC resolution */ if (cpu == TYPE_ADCV12) tmp |= S3C2416_ADCCON_RESSEL; if (cpu == TYPE_ADCV2 || cpu == TYPE_ADCV3) tmp |= S3C64XX_ADCCON_RESSEL; writel(tmp, adc->regs + S3C2410_ADCCON); dev_info(dev, "attached adc driver\n"); platform_set_drvdata(pdev, adc); adc_dev = adc; return 0; err_ioremap: iounmap(adc->regs); err_clk: clk_put(adc->clk); err_irq: free_irq(adc->irq, adc); err_reg: regulator_put(adc->vdd); err_alloc: kfree(adc); return ret; } static int __devexit s3c_adc_remove(struct platform_device *pdev) { struct adc_device *adc = platform_get_drvdata(pdev); iounmap(adc->regs); free_irq(adc->irq, adc); clk_disable(adc->clk); regulator_disable(adc->vdd); regulator_put(adc->vdd); clk_put(adc->clk); kfree(adc); return 0; } #ifdef CONFIG_PM static int s3c_adc_suspend(struct device *dev) { struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct adc_device *adc = platform_get_drvdata(pdev); unsigned long flags; u32 con; spin_lock_irqsave(&adc->lock, flags); con = readl(adc->regs + S3C2410_ADCCON); con |= S3C2410_ADCCON_STDBM; writel(con, adc->regs + S3C2410_ADCCON); disable_irq(adc->irq); spin_unlock_irqrestore(&adc->lock, flags); clk_disable(adc->clk); regulator_disable(adc->vdd); return 0; } static int s3c_adc_resume(struct device *dev) { struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct adc_device *adc = platform_get_drvdata(pdev); enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data; int ret; unsigned long tmp; ret = regulator_enable(adc->vdd); if (ret) return ret; clk_enable(adc->clk); enable_irq(adc->irq); tmp = adc->prescale | S3C2410_ADCCON_PRSCEN; /* Enable 12-bit ADC resolution */ if (cpu == TYPE_ADCV12) tmp |= S3C2416_ADCCON_RESSEL; if (cpu == TYPE_ADCV2 || cpu == TYPE_ADCV3) tmp |= S3C64XX_ADCCON_RESSEL; writel(tmp, adc->regs + S3C2410_ADCCON); return 0; } #else #define s3c_adc_suspend NULL #define s3c_adc_resume NULL #endif static struct platform_device_id s3c_adc_driver_ids[] = { { .name = "s3c24xx-adc", .driver_data = TYPE_ADCV1, }, { .name = "s3c2443-adc", .driver_data = TYPE_ADCV11, }, { .name = "s3c2416-adc", .driver_data = TYPE_ADCV12, }, { .name = "s3c64xx-adc", .driver_data = TYPE_ADCV2, }, { .name = "samsung-adc-v3", .driver_data = TYPE_ADCV3, }, { } }; MODULE_DEVICE_TABLE(platform, s3c_adc_driver_ids); static const struct dev_pm_ops adc_pm_ops = { .suspend = s3c_adc_suspend, .resume = s3c_adc_resume, }; static struct platform_driver s3c_adc_driver = { .id_table = s3c_adc_driver_ids, .driver = { .name = "s3c-adc", .owner = THIS_MODULE, .pm = &adc_pm_ops, }, .probe = s3c_adc_probe, .remove = __devexit_p(s3c_adc_remove), }; static int __init adc_init(void) { int ret; ret = platform_driver_register(&s3c_adc_driver); if (ret) printk(KERN_ERR "%s: failed to add adc driver\n", __func__); return ret; } module_init(adc_init);
gpl-2.0
SiddheshK15/android_kernel_yu_msm8916_gcc5
drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
7568
9450
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> *****************************************************************************/ #include "rtl_ps.h" #include "rtl_core.h" #include "r8192E_phy.h" #include "r8192E_phyreg.h" #include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */ #include "r8192E_cmdpkt.h" static void rtl8192_hw_sleep_down(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); unsigned long flags = 0; spin_lock_irqsave(&priv->rf_ps_lock, flags); if (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flags); RT_TRACE(COMP_DBG, "rtl8192_hw_sleep_down(): RF Change in " "progress!\n"); return; } spin_unlock_irqrestore(&priv->rf_ps_lock, flags); RT_TRACE(COMP_DBG, "%s()============>come to sleep down\n", __func__); MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS, false); } void rtl8192_hw_sleep_wq(void *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, hw_sleep_wq); struct net_device *dev = ieee->dev; rtl8192_hw_sleep_down(dev); } void rtl8192_hw_wakeup(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); unsigned long flags = 0; spin_lock_irqsave(&priv->rf_ps_lock, flags); if (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flags); RT_TRACE(COMP_DBG, "rtl8192_hw_wakeup(): RF Change in " "progress!\n"); queue_delayed_work_rsl(priv->rtllib->wq, &priv->rtllib->hw_wakeup_wq, MSECS(10)); return; } spin_unlock_irqrestore(&priv->rf_ps_lock, flags); RT_TRACE(COMP_PS, "%s()============>come to wake up\n", __func__); MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS, false); } void rtl8192_hw_wakeup_wq(void *data) { struct rtllib_device *ieee = container_of_dwork_rsl(data, struct rtllib_device, hw_wakeup_wq); struct net_device *dev = ieee->dev; rtl8192_hw_wakeup(dev); } #define MIN_SLEEP_TIME 50 #define MAX_SLEEP_TIME 10000 void rtl8192_hw_to_sleep(struct net_device *dev, u64 time) { struct r8192_priv *priv = rtllib_priv(dev); u32 tmp; unsigned long flags; spin_lock_irqsave(&priv->ps_lock, flags); time -= MSECS(8+16+7); if ((time - jiffies) <= MSECS(MIN_SLEEP_TIME)) { spin_unlock_irqrestore(&priv->ps_lock, flags); printk(KERN_INFO "too short to sleep::%lld < %ld\n", time - jiffies, MSECS(MIN_SLEEP_TIME)); return; } if ((time - jiffies) > MSECS(MAX_SLEEP_TIME)) { printk(KERN_INFO "========>too long to sleep:%lld > %ld\n", time - jiffies, MSECS(MAX_SLEEP_TIME)); spin_unlock_irqrestore(&priv->ps_lock, flags); return; } tmp = time - jiffies; queue_delayed_work_rsl(priv->rtllib->wq, &priv->rtllib->hw_wakeup_wq, tmp); queue_delayed_work_rsl(priv->rtllib->wq, (void *)&priv->rtllib->hw_sleep_wq, 0); spin_unlock_irqrestore(&priv->ps_lock, flags); } static void InactivePsWorkItemCallback(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) &(priv->rtllib->PowerSaveControl); RT_TRACE(COMP_PS, "InactivePsWorkItemCallback() --------->\n"); pPSC->bSwRfProcessing = true; RT_TRACE(COMP_PS, "InactivePsWorkItemCallback(): Set RF to %s.\n", pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON"); MgntActSet_RF_State(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS, false); pPSC->bSwRfProcessing = false; RT_TRACE(COMP_PS, "InactivePsWorkItemCallback() <---------\n"); } void IPSEnter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) &(priv->rtllib->PowerSaveControl); enum rt_rf_power_state rtState; if (pPSC->bInactivePs) { rtState = priv->rtllib->eRFPowerState; if (rtState == eRfOn && !pPSC->bSwRfProcessing && (priv->rtllib->state != RTLLIB_LINKED) && (priv->rtllib->iw_mode != IW_MODE_MASTER)) { RT_TRACE(COMP_PS, "IPSEnter(): Turn off RF.\n"); pPSC->eInactivePowerState = eRfOff; priv->isRFOff = true; priv->bInPowerSaveMode = true; InactivePsWorkItemCallback(dev); } } } void IPSLeave(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) &(priv->rtllib->PowerSaveControl); enum rt_rf_power_state rtState; if (pPSC->bInactivePs) { rtState = priv->rtllib->eRFPowerState; if (rtState != eRfOn && !pPSC->bSwRfProcessing && priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) { RT_TRACE(COMP_PS, "IPSLeave(): Turn on RF.\n"); pPSC->eInactivePowerState = eRfOn; priv->bInPowerSaveMode = false; InactivePsWorkItemCallback(dev); } } } void IPSLeave_wq(void *data) { struct rtllib_device *ieee = container_of_work_rsl(data, struct rtllib_device, ips_leave_wq); struct net_device *dev = ieee->dev; struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); down(&priv->rtllib->ips_sem); IPSLeave(dev); up(&priv->rtllib->ips_sem); } void rtllib_ips_leave_wq(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); enum rt_rf_power_state rtState; rtState = priv->rtllib->eRFPowerState; if (priv->rtllib->PowerSaveControl.bInactivePs) { if (rtState == eRfOff) { if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) { RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n", __func__); return; } else { printk(KERN_INFO "=========>%s(): IPSLeave\n", __func__); queue_work_rsl(priv->rtllib->wq, &priv->rtllib->ips_leave_wq); } } } } void rtllib_ips_leave(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); down(&priv->rtllib->ips_sem); IPSLeave(dev); up(&priv->rtllib->ips_sem); } static bool MgntActSet_802_11_PowerSaveMode(struct net_device *dev, u8 rtPsMode) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->rtllib->iw_mode == IW_MODE_ADHOC) return false; RT_TRACE(COMP_LPS, "%s(): set ieee->ps = %x\n", __func__, rtPsMode); if (!priv->ps_force) priv->rtllib->ps = rtPsMode; if (priv->rtllib->sta_sleep != LPS_IS_WAKE && rtPsMode == RTLLIB_PS_DISABLED) { unsigned long flags; rtl8192_hw_wakeup(dev); priv->rtllib->sta_sleep = LPS_IS_WAKE; spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags); RT_TRACE(COMP_DBG, "LPS leave: notify AP we are awaked" " ++++++++++ SendNullFunctionData\n"); rtllib_sta_ps_send_null_frame(priv->rtllib, 0); spin_unlock_irqrestore(&(priv->rtllib->mgmt_tx_lock), flags); } return true; } void LeisurePSEnter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) &(priv->rtllib->PowerSaveControl); RT_TRACE(COMP_PS, "LeisurePSEnter()...\n"); RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdle" "Count is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n", pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount, RT_CHECK_FOR_HANG_PERIOD); if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) && (priv->rtllib->state == RTLLIB_LINKED)) || (priv->rtllib->iw_mode == IW_MODE_ADHOC) || (priv->rtllib->iw_mode == IW_MODE_MASTER)) return; if (pPSC->bLeisurePs) { if (pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) { if (priv->rtllib->ps == RTLLIB_PS_DISABLED) { RT_TRACE(COMP_LPS, "LeisurePSEnter(): Enter " "802.11 power save mode...\n"); if (!pPSC->bFwCtrlLPS) { if (priv->rtllib->SetFwCmdHandler) priv->rtllib->SetFwCmdHandler( dev, FW_CMD_LPS_ENTER); } MgntActSet_802_11_PowerSaveMode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST); } } else pPSC->LpsIdleCount++; } } void LeisurePSLeave(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) &(priv->rtllib->PowerSaveControl); RT_TRACE(COMP_PS, "LeisurePSLeave()...\n"); RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n", pPSC->bLeisurePs, priv->rtllib->ps); if (pPSC->bLeisurePs) { if (priv->rtllib->ps != RTLLIB_PS_DISABLED) { RT_TRACE(COMP_LPS, "LeisurePSLeave(): Busy Traffic , " "Leave 802.11 power save..\n"); MgntActSet_802_11_PowerSaveMode(dev, RTLLIB_PS_DISABLED); if (!pPSC->bFwCtrlLPS) { if (priv->rtllib->SetFwCmdHandler) priv->rtllib->SetFwCmdHandler(dev, FW_CMD_LPS_LEAVE); } } } }
gpl-2.0
scto/android_kernel_jide_sk1wg
arch/m68k/amiga/amiints.c
8592
4218
/* * Amiga Linux interrupt handling code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/irq.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/amipcmcia.h> /* * Enable/disable a particular machine specific interrupt source. * Note that this may affect other interrupts in case of a shared interrupt. * This function should only be called for a _very_ short time to change some * internal data, that may not be changed by the interrupt at the same time. */ static void amiga_irq_enable(struct irq_data *data) { amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER)); } static void amiga_irq_disable(struct irq_data *data) { amiga_custom.intena = 1 << (data->irq - IRQ_USER); } static struct irq_chip amiga_irq_chip = { .name = "amiga", .irq_enable = amiga_irq_enable, .irq_disable = amiga_irq_disable, }; /* * The builtin Amiga hardware interrupt handlers. */ static void ami_int1(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial transmit buffer empty, interrupt */ if (ints & IF_TBE) { amiga_custom.intreq = IF_TBE; generic_handle_irq(IRQ_AMIGA_TBE); } /* if floppy disk transfer complete, interrupt */ if (ints & IF_DSKBLK) { amiga_custom.intreq = IF_DSKBLK; generic_handle_irq(IRQ_AMIGA_DSKBLK); } /* if software interrupt set, interrupt */ if (ints & IF_SOFT) { amiga_custom.intreq = IF_SOFT; generic_handle_irq(IRQ_AMIGA_SOFT); } } static void ami_int3(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if a blitter interrupt */ if (ints & IF_BLIT) { amiga_custom.intreq = IF_BLIT; generic_handle_irq(IRQ_AMIGA_BLIT); } /* if a copper interrupt */ if (ints & IF_COPER) { amiga_custom.intreq = IF_COPER; generic_handle_irq(IRQ_AMIGA_COPPER); } /* if a vertical blank interrupt */ if (ints & IF_VERTB) { amiga_custom.intreq = IF_VERTB; generic_handle_irq(IRQ_AMIGA_VERTB); } } static void ami_int4(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if audio 0 interrupt */ if (ints & IF_AUD0) { amiga_custom.intreq = IF_AUD0; generic_handle_irq(IRQ_AMIGA_AUD0); } /* if audio 1 interrupt */ if (ints & IF_AUD1) { amiga_custom.intreq = IF_AUD1; generic_handle_irq(IRQ_AMIGA_AUD1); } /* if audio 2 interrupt */ if (ints & IF_AUD2) { amiga_custom.intreq = IF_AUD2; generic_handle_irq(IRQ_AMIGA_AUD2); } /* if audio 3 interrupt */ if (ints & IF_AUD3) { amiga_custom.intreq = IF_AUD3; generic_handle_irq(IRQ_AMIGA_AUD3); } } static void ami_int5(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial receive buffer full interrupt */ if (ints & IF_RBF) { /* acknowledge of IF_RBF must be done by the serial interrupt */ generic_handle_irq(IRQ_AMIGA_RBF); } /* if a disk sync interrupt */ if (ints & IF_DSKSYN) { amiga_custom.intreq = IF_DSKSYN; generic_handle_irq(IRQ_AMIGA_DSKSYN); } } /* * void amiga_init_IRQ(void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the amiga IRQ handling routines. */ void __init amiga_init_IRQ(void) { m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER, AMI_STD_IRQS); irq_set_chained_handler(IRQ_AUTO_1, ami_int1); irq_set_chained_handler(IRQ_AUTO_3, ami_int3); irq_set_chained_handler(IRQ_AUTO_4, ami_int4); irq_set_chained_handler(IRQ_AUTO_5, ami_int5); /* turn off PCMCIA interrupts */ if (AMIGAHW_PRESENT(PCMCIA)) gayle.inten = GAYLE_IRQ_IDE; /* turn off all interrupts and enable the master interrupt bit */ amiga_custom.intena = 0x7fff; amiga_custom.intreq = 0x7fff; amiga_custom.intena = IF_SETCLR | IF_INTEN; cia_init_IRQ(&ciaa_base); cia_init_IRQ(&ciab_base); }
gpl-2.0
yank555-lu/Shamu-3.10-lollipop
arch/arm/mach-pxa/clock-pxa2xx.c
8592
1077
/* * linux/arch/arm/mach-pxa/clock-pxa2xx.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <mach/pxa2xx-regs.h> #include "clock.h" void clk_pxa2xx_cken_enable(struct clk *clk) { CKEN |= 1 << clk->cken; } void clk_pxa2xx_cken_disable(struct clk *clk) { CKEN &= ~(1 << clk->cken); } const struct clkops clk_pxa2xx_cken_ops = { .enable = clk_pxa2xx_cken_enable, .disable = clk_pxa2xx_cken_disable, }; #ifdef CONFIG_PM static uint32_t saved_cken; static int pxa2xx_clock_suspend(void) { saved_cken = CKEN; return 0; } static void pxa2xx_clock_resume(void) { CKEN = saved_cken; } #else #define pxa2xx_clock_suspend NULL #define pxa2xx_clock_resume NULL #endif struct syscore_ops pxa2xx_clock_syscore_ops = { .suspend = pxa2xx_clock_suspend, .resume = pxa2xx_clock_resume, };
gpl-2.0
mihadyuk/wandboard-linux
arch/mn10300/unit-asb2303/leds.c
13712
1471
/* ASB2303 peripheral 7-segment LEDs x1 support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/intctl-regs.h> #include <asm/rtc-regs.h> #include <unit/leds.h> #if 0 static const u8 asb2303_led_hex_tbl[16] = { 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0, 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c }; #endif static const u8 asb2303_led_chase_tbl[6] = { ~0x02, /* top - segA */ ~0x04, /* right top - segB */ ~0x08, /* right bottom - segC */ ~0x10, /* bottom - segD */ ~0x20, /* left bottom - segE */ ~0x40, /* left top - segF */ }; static unsigned asb2303_led_chase; void peripheral_leds_display_exception(enum exception_code code) { ASB2303_GPIO0DEF = 0x5555; /* configure as an output port */ ASB2303_7SEGLEDS = 0x6d; /* triple horizontal bar */ } void peripheral_leds_led_chase(void) { ASB2303_GPIO0DEF = 0x5555; /* configure as an output port */ ASB2303_7SEGLEDS = asb2303_led_chase_tbl[asb2303_led_chase]; asb2303_led_chase++; if (asb2303_led_chase >= 6) asb2303_led_chase = 0; }
gpl-2.0
BORETS24/Zenfone-2-500CL
linux/kernel/net/ceph/osd_client.c
145
69505
#include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/uaccess.h> #ifdef CONFIG_BLOCK #include <linux/bio.h> #endif #include <linux/ceph/libceph.h> #include <linux/ceph/osd_client.h> #include <linux/ceph/messenger.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include <linux/ceph/pagelist.h> #define OSD_OP_FRONT_LEN 4096 #define OSD_OPREPLY_FRONT_LEN 512 static struct kmem_cache *ceph_osd_request_cache; static const struct ceph_connection_operations osd_con_ops; static void __send_queued(struct ceph_osd_client *osdc); static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); static void __register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); static void __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); static void __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); /* * Implement client access to distributed object storage cluster. * * All data objects are stored within a cluster/cloud of OSDs, or * "object storage devices." (Note that Ceph OSDs have _nothing_ to * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply * remote daemons serving up and coordinating consistent and safe * access to storage. * * Cluster membership and the mapping of data objects onto storage devices * are described by the osd map. * * We keep track of pending OSD requests (read, write), resubmit * requests to different OSDs when the cluster topology/data layout * change, or retry the affected requests when the communications * channel with an OSD is reset. */ /* * calculate the mapping of a file extent onto an object, and fill out the * request accordingly. shorten extent as necessary if it crosses an * object boundary. * * fill osd op in request message. */ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, u64 *objnum, u64 *objoff, u64 *objlen) { u64 orig_len = *plen; int r; /* object extent? */ r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, objoff, objlen); if (r < 0) return r; if (*objlen < orig_len) { *plen = *objlen; dout(" skipping last %llu, final file extent %llu~%llu\n", orig_len - *plen, off, *plen); } dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); return 0; } static void ceph_osd_data_init(struct ceph_osd_data *osd_data) { memset(osd_data, 0, sizeof (*osd_data)); osd_data->type = CEPH_OSD_DATA_TYPE_NONE; } static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; osd_data->pages = pages; osd_data->length = length; osd_data->alignment = alignment; osd_data->pages_from_pool = pages_from_pool; osd_data->own_pages = own_pages; } static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, struct ceph_pagelist *pagelist) { osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; osd_data->pagelist = pagelist; } #ifdef CONFIG_BLOCK static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, struct bio *bio, size_t bio_length) { osd_data->type = CEPH_OSD_DATA_TYPE_BIO; osd_data->bio = bio; osd_data->bio_length = bio_length; } #endif /* CONFIG_BLOCK */ #define osd_req_op_data(oreq, whch, typ, fld) \ ({ \ BUG_ON(whch >= (oreq)->r_num_ops); \ &(oreq)->r_ops[whch].typ.fld; \ }) static struct ceph_osd_data * osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) { BUG_ON(which >= osd_req->r_num_ops); return &osd_req->r_ops[which].raw_data_in; } struct ceph_osd_data * osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, unsigned int which) { return osd_req_op_data(osd_req, which, extent, osd_data); } EXPORT_SYMBOL(osd_req_op_extent_osd_data); struct ceph_osd_data * osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, unsigned int which) { return osd_req_op_data(osd_req, which, cls, response_data); } EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_raw_data_in(osd_req, which); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, extent, osd_data); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_pagelist *pagelist) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, extent, osd_data); ceph_osd_data_pagelist_init(osd_data, pagelist); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); #ifdef CONFIG_BLOCK void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, unsigned int which, struct bio *bio, size_t bio_length) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, extent, osd_data); ceph_osd_data_bio_init(osd_data, bio, bio_length); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); #endif /* CONFIG_BLOCK */ static void osd_req_op_cls_request_info_pagelist( struct ceph_osd_request *osd_req, unsigned int which, struct ceph_pagelist *pagelist) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, cls, request_info); ceph_osd_data_pagelist_init(osd_data, pagelist); } void osd_req_op_cls_request_data_pagelist( struct ceph_osd_request *osd_req, unsigned int which, struct ceph_pagelist *pagelist) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, cls, request_data); ceph_osd_data_pagelist_init(osd_data, pagelist); } EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, cls, request_data); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { struct ceph_osd_data *osd_data; osd_data = osd_req_op_data(osd_req, which, cls, response_data); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) { switch (osd_data->type) { case CEPH_OSD_DATA_TYPE_NONE: return 0; case CEPH_OSD_DATA_TYPE_PAGES: return osd_data->length; case CEPH_OSD_DATA_TYPE_PAGELIST: return (u64)osd_data->pagelist->length; #ifdef CONFIG_BLOCK case CEPH_OSD_DATA_TYPE_BIO: return (u64)osd_data->bio_length; #endif /* CONFIG_BLOCK */ default: WARN(true, "unrecognized data type %d\n", (int)osd_data->type); return 0; } } static void ceph_osd_data_release(struct ceph_osd_data *osd_data) { if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { int num_pages; num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); ceph_release_page_vector(osd_data->pages, num_pages); } ceph_osd_data_init(osd_data); } static void osd_req_op_data_release(struct ceph_osd_request *osd_req, unsigned int which) { struct ceph_osd_req_op *op; BUG_ON(which >= osd_req->r_num_ops); op = &osd_req->r_ops[which]; switch (op->op) { case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: ceph_osd_data_release(&op->extent.osd_data); break; case CEPH_OSD_OP_CALL: ceph_osd_data_release(&op->cls.request_info); ceph_osd_data_release(&op->cls.request_data); ceph_osd_data_release(&op->cls.response_data); break; default: break; } } /* * requests */ void ceph_osdc_release_request(struct kref *kref) { struct ceph_osd_request *req; unsigned int which; req = container_of(kref, struct ceph_osd_request, r_kref); if (req->r_request) ceph_msg_put(req->r_request); if (req->r_reply) { ceph_msg_revoke_incoming(req->r_reply); ceph_msg_put(req->r_reply); } for (which = 0; which < req->r_num_ops; which++) osd_req_op_data_release(req, which); ceph_put_snap_context(req->r_snapc); if (req->r_mempool) mempool_free(req, req->r_osdc->req_mempool); else kmem_cache_free(ceph_osd_request_cache, req); } EXPORT_SYMBOL(ceph_osdc_release_request); struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, unsigned int num_ops, bool use_mempool, gfp_t gfp_flags) { struct ceph_osd_request *req; struct ceph_msg *msg; size_t msg_size; BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); BUG_ON(num_ops > CEPH_OSD_MAX_OP); msg_size = 4 + 4 + 8 + 8 + 4+8; msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ msg_size += 1 + 8 + 4 + 4; /* pg_t */ msg_size += 4 + MAX_OBJ_NAME_SIZE; msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); msg_size += 8; /* snapid */ msg_size += 8; /* snap_seq */ msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ msg_size += 4; if (use_mempool) { req = mempool_alloc(osdc->req_mempool, gfp_flags); memset(req, 0, sizeof(*req)); } else { req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); } if (req == NULL) return NULL; req->r_osdc = osdc; req->r_mempool = use_mempool; req->r_num_ops = num_ops; kref_init(&req->r_kref); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); RB_CLEAR_NODE(&req->r_node); INIT_LIST_HEAD(&req->r_unsafe_item); INIT_LIST_HEAD(&req->r_linger_item); INIT_LIST_HEAD(&req->r_linger_osd); INIT_LIST_HEAD(&req->r_req_lru_item); INIT_LIST_HEAD(&req->r_osd_item); /* create reply message */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, OSD_OPREPLY_FRONT_LEN, gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } req->r_reply = msg; /* create request message; allow space for oid */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } memset(msg->front.iov_base, 0, msg->front.iov_len); req->r_request = msg; return req; } EXPORT_SYMBOL(ceph_osdc_alloc_request); static bool osd_req_opcode_valid(u16 opcode) { switch (opcode) { case CEPH_OSD_OP_READ: case CEPH_OSD_OP_STAT: case CEPH_OSD_OP_MAPEXT: case CEPH_OSD_OP_MASKTRUNC: case CEPH_OSD_OP_SPARSE_READ: case CEPH_OSD_OP_NOTIFY: case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_ASSERT_VER: case CEPH_OSD_OP_WRITE: case CEPH_OSD_OP_WRITEFULL: case CEPH_OSD_OP_TRUNCATE: case CEPH_OSD_OP_ZERO: case CEPH_OSD_OP_DELETE: case CEPH_OSD_OP_APPEND: case CEPH_OSD_OP_STARTSYNC: case CEPH_OSD_OP_SETTRUNC: case CEPH_OSD_OP_TRIMTRUNC: case CEPH_OSD_OP_TMAPUP: case CEPH_OSD_OP_TMAPPUT: case CEPH_OSD_OP_TMAPGET: case CEPH_OSD_OP_CREATE: case CEPH_OSD_OP_ROLLBACK: case CEPH_OSD_OP_WATCH: case CEPH_OSD_OP_OMAPGETKEYS: case CEPH_OSD_OP_OMAPGETVALS: case CEPH_OSD_OP_OMAPGETHEADER: case CEPH_OSD_OP_OMAPGETVALSBYKEYS: case CEPH_OSD_OP_OMAPSETVALS: case CEPH_OSD_OP_OMAPSETHEADER: case CEPH_OSD_OP_OMAPCLEAR: case CEPH_OSD_OP_OMAPRMKEYS: case CEPH_OSD_OP_OMAP_CMP: case CEPH_OSD_OP_CLONERANGE: case CEPH_OSD_OP_ASSERT_SRC_VERSION: case CEPH_OSD_OP_SRC_CMPXATTR: case CEPH_OSD_OP_GETXATTR: case CEPH_OSD_OP_GETXATTRS: case CEPH_OSD_OP_CMPXATTR: case CEPH_OSD_OP_SETXATTR: case CEPH_OSD_OP_SETXATTRS: case CEPH_OSD_OP_RESETXATTRS: case CEPH_OSD_OP_RMXATTR: case CEPH_OSD_OP_PULL: case CEPH_OSD_OP_PUSH: case CEPH_OSD_OP_BALANCEREADS: case CEPH_OSD_OP_UNBALANCEREADS: case CEPH_OSD_OP_SCRUB: case CEPH_OSD_OP_SCRUB_RESERVE: case CEPH_OSD_OP_SCRUB_UNRESERVE: case CEPH_OSD_OP_SCRUB_STOP: case CEPH_OSD_OP_SCRUB_MAP: case CEPH_OSD_OP_WRLOCK: case CEPH_OSD_OP_WRUNLOCK: case CEPH_OSD_OP_RDLOCK: case CEPH_OSD_OP_RDUNLOCK: case CEPH_OSD_OP_UPLOCK: case CEPH_OSD_OP_DNLOCK: case CEPH_OSD_OP_CALL: case CEPH_OSD_OP_PGLS: case CEPH_OSD_OP_PGLS_FILTER: return true; default: return false; } } /* * This is an osd op init function for opcodes that have no data or * other information associated with them. It also serves as a * common init routine for all the other init functions, below. */ static struct ceph_osd_req_op * _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode) { struct ceph_osd_req_op *op; BUG_ON(which >= osd_req->r_num_ops); BUG_ON(!osd_req_opcode_valid(opcode)); op = &osd_req->r_ops[which]; memset(op, 0, sizeof (*op)); op->op = opcode; return op; } void osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode) { (void)_osd_req_op_init(osd_req, which, opcode); } EXPORT_SYMBOL(osd_req_op_init); void osd_req_op_extent_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); op->extent.offset = offset; op->extent.length = length; op->extent.truncate_size = truncate_size; op->extent.truncate_seq = truncate_seq; if (opcode == CEPH_OSD_OP_WRITE) payload_len += length; op->payload_len = payload_len; } EXPORT_SYMBOL(osd_req_op_extent_init); void osd_req_op_extent_update(struct ceph_osd_request *osd_req, unsigned int which, u64 length) { struct ceph_osd_req_op *op; u64 previous; BUG_ON(which >= osd_req->r_num_ops); op = &osd_req->r_ops[which]; previous = op->extent.length; if (length == previous) return; /* Nothing to do */ BUG_ON(length > previous); op->extent.length = length; op->payload_len -= previous - length; } EXPORT_SYMBOL(osd_req_op_extent_update); void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method) { struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); struct ceph_pagelist *pagelist; size_t payload_len = 0; size_t size; BUG_ON(opcode != CEPH_OSD_OP_CALL); pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); BUG_ON(!pagelist); ceph_pagelist_init(pagelist); op->cls.class_name = class; size = strlen(class); BUG_ON(size > (size_t) U8_MAX); op->cls.class_len = size; ceph_pagelist_append(pagelist, class, size); payload_len += size; op->cls.method_name = method; size = strlen(method); BUG_ON(size > (size_t) U8_MAX); op->cls.method_len = size; ceph_pagelist_append(pagelist, method, size); payload_len += size; osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); op->cls.argc = 0; /* currently unused */ op->payload_len = payload_len; } EXPORT_SYMBOL(osd_req_op_cls_init); void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag) { struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); op->watch.cookie = cookie; op->watch.ver = version; if (opcode == CEPH_OSD_OP_WATCH && flag) op->watch.flag = (u8)1; } EXPORT_SYMBOL(osd_req_op_watch_init); static void ceph_osdc_msg_data_add(struct ceph_msg *msg, struct ceph_osd_data *osd_data) { u64 length = ceph_osd_data_length(osd_data); if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { BUG_ON(length > (u64) SIZE_MAX); if (length) ceph_msg_data_add_pages(msg, osd_data->pages, length, osd_data->alignment); } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { BUG_ON(!length); ceph_msg_data_add_pagelist(msg, osd_data->pagelist); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { ceph_msg_data_add_bio(msg, osd_data->bio, length); #endif } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); } } static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, unsigned int which) { struct ceph_osd_req_op *src; struct ceph_osd_data *osd_data; u64 request_data_len = 0; u64 data_length; BUG_ON(which >= req->r_num_ops); src = &req->r_ops[which]; if (WARN_ON(!osd_req_opcode_valid(src->op))) { pr_err("unrecognized osd opcode %d\n", src->op); return 0; } switch (src->op) { case CEPH_OSD_OP_STAT: osd_data = &src->raw_data_in; ceph_osdc_msg_data_add(req->r_reply, osd_data); break; case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: if (src->op == CEPH_OSD_OP_WRITE) request_data_len = src->extent.length; dst->extent.offset = cpu_to_le64(src->extent.offset); dst->extent.length = cpu_to_le64(src->extent.length); dst->extent.truncate_size = cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); osd_data = &src->extent.osd_data; if (src->op == CEPH_OSD_OP_WRITE) ceph_osdc_msg_data_add(req->r_request, osd_data); else ceph_osdc_msg_data_add(req->r_reply, osd_data); break; case CEPH_OSD_OP_CALL: dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; osd_data = &src->cls.request_info; ceph_osdc_msg_data_add(req->r_request, osd_data); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST); request_data_len = osd_data->pagelist->length; osd_data = &src->cls.request_data; data_length = ceph_osd_data_length(osd_data); if (data_length) { BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE); dst->cls.indata_len = cpu_to_le32(data_length); ceph_osdc_msg_data_add(req->r_request, osd_data); src->payload_len += data_length; request_data_len += data_length; } osd_data = &src->cls.response_data; ceph_osdc_msg_data_add(req->r_reply, osd_data); break; case CEPH_OSD_OP_STARTSYNC: break; case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: dst->watch.cookie = cpu_to_le64(src->watch.cookie); dst->watch.ver = cpu_to_le64(src->watch.ver); dst->watch.flag = src->watch.flag; break; default: pr_err("unsupported osd opcode %s\n", ceph_osd_op_name(src->op)); WARN_ON(1); return 0; } dst->op = cpu_to_le16(src->op); dst->payload_len = cpu_to_le32(src->payload_len); return request_data_len; } /* * build new request AND message, calculate layout, and adjust file * extent as needed. * * if the file was recently truncated, we include information about its * old and new size so that the object can be updated appropriately. (we * avoid synchronously deleting truncated objects because it's slow.) * * if @do_sync, include a 'startsync' command so that the osd will flush * data quickly. */ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, struct ceph_vino vino, u64 off, u64 *plen, int num_ops, int opcode, int flags, struct ceph_snap_context *snapc, u32 truncate_seq, u64 truncate_size, bool use_mempool) { struct ceph_osd_request *req; u64 objnum = 0; u64 objoff = 0; u64 objlen = 0; u32 object_size; u64 object_base; int r; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); req->r_flags = flags; /* calculate max write size */ r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); if (r < 0) { ceph_osdc_put_request(req); return ERR_PTR(r); } object_size = le32_to_cpu(layout->fl_object_size); object_base = off - objoff; if (truncate_size <= object_base) { truncate_size = 0; } else { truncate_size -= object_base; if (truncate_size > object_size) truncate_size = object_size; } osd_req_op_extent_init(req, 0, opcode, objoff, objlen, truncate_size, truncate_seq); /* * A second op in the ops array means the caller wants to * also issue a include a 'startsync' command so that the * osd will flush data quickly. */ if (num_ops > 1) osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); req->r_file_layout = *layout; /* keep a copy */ snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, objnum); req->r_oid_len = strlen(req->r_oid); return req; } EXPORT_SYMBOL(ceph_osdc_new_request); /* * We keep osd requests in an rbtree, sorted by ->r_tid. */ static void __insert_request(struct ceph_osd_client *osdc, struct ceph_osd_request *new) { struct rb_node **p = &osdc->requests.rb_node; struct rb_node *parent = NULL; struct ceph_osd_request *req = NULL; while (*p) { parent = *p; req = rb_entry(parent, struct ceph_osd_request, r_node); if (new->r_tid < req->r_tid) p = &(*p)->rb_left; else if (new->r_tid > req->r_tid) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->r_node, parent, p); rb_insert_color(&new->r_node, &osdc->requests); } static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, u64 tid) { struct ceph_osd_request *req; struct rb_node *n = osdc->requests.rb_node; while (n) { req = rb_entry(n, struct ceph_osd_request, r_node); if (tid < req->r_tid) n = n->rb_left; else if (tid > req->r_tid) n = n->rb_right; else return req; } return NULL; } static struct ceph_osd_request * __lookup_request_ge(struct ceph_osd_client *osdc, u64 tid) { struct ceph_osd_request *req; struct rb_node *n = osdc->requests.rb_node; while (n) { req = rb_entry(n, struct ceph_osd_request, r_node); if (tid < req->r_tid) { if (!n->rb_left) return req; n = n->rb_left; } else if (tid > req->r_tid) { n = n->rb_right; } else { return req; } } return NULL; } /* * Resubmit requests pending on the given osd. */ static void __kick_osd_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd) { struct ceph_osd_request *req, *nreq; LIST_HEAD(resend); int err; dout("__kick_osd_requests osd%d\n", osd->o_osd); err = __reset_osd(osdc, osd); if (err) return; /* * Build up a list of requests to resend by traversing the * osd's list of requests. Requests for a given object are * sent in tid order, and that is also the order they're * kept on this list. Therefore all requests that are in * flight will be found first, followed by all requests that * have not yet been sent. And to resend requests while * preserving this order we will want to put any sent * requests back on the front of the osd client's unsent * list. * * So we build a separate ordered list of already-sent * requests for the affected osd and splice it onto the * front of the osd client's unsent list. Once we've seen a * request that has not yet been sent we're done. Those * requests are already sitting right where they belong. */ list_for_each_entry(req, &osd->o_requests, r_osd_item) { if (!req->r_sent) break; list_move_tail(&req->r_req_lru_item, &resend); dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); if (!req->r_linger) req->r_flags |= CEPH_OSD_FLAG_RETRY; } list_splice(&resend, &osdc->req_unsent); /* * Linger requests are re-registered before sending, which * sets up a new tid for each. We add them to the unsent * list at the end to keep things in tid order. */ list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, r_linger_osd) { /* * reregister request prior to unregistering linger so * that r_osd is preserved. */ BUG_ON(!list_empty(&req->r_req_lru_item)); __register_request(osdc, req); list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); __unregister_linger_request(osdc, req); dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); } } /* * If the osd connection drops, we need to resubmit all requests. */ static void osd_reset(struct ceph_connection *con) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc; if (!osd) return; dout("osd_reset osd%d\n", osd->o_osd); osdc = osd->o_osdc; down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); __kick_osd_requests(osdc, osd); __send_queued(osdc); mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); } /* * Track open sessions with osds. */ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) { struct ceph_osd *osd; osd = kzalloc(sizeof(*osd), GFP_NOFS); if (!osd) return NULL; atomic_set(&osd->o_ref, 1); osd->o_osdc = osdc; osd->o_osd = onum; RB_CLEAR_NODE(&osd->o_node); INIT_LIST_HEAD(&osd->o_requests); INIT_LIST_HEAD(&osd->o_linger_requests); INIT_LIST_HEAD(&osd->o_osd_lru); osd->o_incarnation = 1; ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); INIT_LIST_HEAD(&osd->o_keepalive_item); return osd; } static struct ceph_osd *get_osd(struct ceph_osd *osd) { if (atomic_inc_not_zero(&osd->o_ref)) { dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, atomic_read(&osd->o_ref)); return osd; } else { dout("get_osd %p FAIL\n", osd); return NULL; } } static void put_osd(struct ceph_osd *osd) { dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); kfree(osd); } } /* * remove an osd from our map */ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { dout("__remove_osd %p\n", osd); BUG_ON(!list_empty(&osd->o_requests)); rb_erase(&osd->o_node, &osdc->osds); list_del_init(&osd->o_osd_lru); ceph_con_close(&osd->o_con); put_osd(osd); } static void remove_all_osds(struct ceph_osd_client *osdc) { dout("%s %p\n", __func__, osdc); mutex_lock(&osdc->request_mutex); while (!RB_EMPTY_ROOT(&osdc->osds)) { struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), struct ceph_osd, o_node); __remove_osd(osdc, osd); } mutex_unlock(&osdc->request_mutex); } static void __move_osd_to_lru(struct ceph_osd_client *osdc, struct ceph_osd *osd) { dout("__move_osd_to_lru %p\n", osd); BUG_ON(!list_empty(&osd->o_osd_lru)); list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; } static void __remove_osd_from_lru(struct ceph_osd *osd) { dout("__remove_osd_from_lru %p\n", osd); if (!list_empty(&osd->o_osd_lru)) list_del_init(&osd->o_osd_lru); } static void remove_old_osds(struct ceph_osd_client *osdc) { struct ceph_osd *osd, *nosd; dout("__remove_old_osds %p\n", osdc); mutex_lock(&osdc->request_mutex); list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { if (time_before(jiffies, osd->lru_ttl)) break; __remove_osd(osdc, osd); } mutex_unlock(&osdc->request_mutex); } /* * reset osd connect */ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { struct ceph_entity_addr *peer_addr; dout("__reset_osd %p osd%d\n", osd, osd->o_osd); if (list_empty(&osd->o_requests) && list_empty(&osd->o_linger_requests)) { __remove_osd(osdc, osd); return -ENODEV; } peer_addr = &osdc->osdmap->osd_addr[osd->o_osd]; if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && !ceph_con_opened(&osd->o_con)) { struct ceph_osd_request *req; dout(" osd addr hasn't changed and connection never opened," " letting msgr retry"); /* touch each r_stamp for handle_timeout()'s benfit */ list_for_each_entry(req, &osd->o_requests, r_osd_item) req->r_stamp = jiffies; return -EAGAIN; } ceph_con_close(&osd->o_con); ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); osd->o_incarnation++; return 0; } static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) { struct rb_node **p = &osdc->osds.rb_node; struct rb_node *parent = NULL; struct ceph_osd *osd = NULL; dout("__insert_osd %p osd%d\n", new, new->o_osd); while (*p) { parent = *p; osd = rb_entry(parent, struct ceph_osd, o_node); if (new->o_osd < osd->o_osd) p = &(*p)->rb_left; else if (new->o_osd > osd->o_osd) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->o_node, parent, p); rb_insert_color(&new->o_node, &osdc->osds); } static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) { struct ceph_osd *osd; struct rb_node *n = osdc->osds.rb_node; while (n) { osd = rb_entry(n, struct ceph_osd, o_node); if (o < osd->o_osd) n = n->rb_left; else if (o > osd->o_osd) n = n->rb_right; else return osd; } return NULL; } static void __schedule_osd_timeout(struct ceph_osd_client *osdc) { schedule_delayed_work(&osdc->timeout_work, osdc->client->options->osd_keepalive_timeout * HZ); } static void __cancel_osd_timeout(struct ceph_osd_client *osdc) { cancel_delayed_work(&osdc->timeout_work); } /* * Register request, assign tid. If this is the first request, set up * the timeout event. */ static void __register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { req->r_tid = ++osdc->last_tid; req->r_request->hdr.tid = cpu_to_le64(req->r_tid); dout("__register_request %p tid %lld\n", req, req->r_tid); __insert_request(osdc, req); ceph_osdc_get_request(req); osdc->num_requests++; if (osdc->num_requests == 1) { dout(" first request, scheduling timeout\n"); __schedule_osd_timeout(osdc); } } /* * called under osdc->request_mutex */ static void __unregister_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { if (RB_EMPTY_NODE(&req->r_node)) { dout("__unregister_request %p tid %lld not registered\n", req, req->r_tid); return; } dout("__unregister_request %p tid %lld\n", req, req->r_tid); rb_erase(&req->r_node, &osdc->requests); osdc->num_requests--; if (req->r_osd) { /* make sure the original request isn't in flight. */ ceph_msg_revoke(req->r_request); list_del_init(&req->r_osd_item); if (list_empty(&req->r_osd->o_requests) && list_empty(&req->r_osd->o_linger_requests)) { dout("moving osd to %p lru\n", req->r_osd); __move_osd_to_lru(osdc, req->r_osd); } if (list_empty(&req->r_linger_item)) req->r_osd = NULL; } list_del_init(&req->r_req_lru_item); ceph_osdc_put_request(req); if (osdc->num_requests == 0) { dout(" no requests, canceling timeout\n"); __cancel_osd_timeout(osdc); } } /* * Cancel a previously queued request message */ static void __cancel_request(struct ceph_osd_request *req) { if (req->r_sent && req->r_osd) { ceph_msg_revoke(req->r_request); req->r_sent = 0; } } static void __register_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { dout("__register_linger_request %p\n", req); list_add_tail(&req->r_linger_item, &osdc->req_linger); if (req->r_osd) list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests); } static void __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { dout("__unregister_linger_request %p\n", req); list_del_init(&req->r_linger_item); if (req->r_osd) { list_del_init(&req->r_linger_osd); if (list_empty(&req->r_osd->o_requests) && list_empty(&req->r_osd->o_linger_requests)) { dout("moving osd to %p lru\n", req->r_osd); __move_osd_to_lru(osdc, req->r_osd); } if (list_empty(&req->r_osd_item)) req->r_osd = NULL; } } void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { mutex_lock(&osdc->request_mutex); if (req->r_linger) { __unregister_linger_request(osdc, req); req->r_linger = 0; ceph_osdc_put_request(req); } mutex_unlock(&osdc->request_mutex); } EXPORT_SYMBOL(ceph_osdc_unregister_linger_request); void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { if (!req->r_linger) { dout("set_request_linger %p\n", req); req->r_linger = 1; /* * caller is now responsible for calling * unregister_linger_request */ ceph_osdc_get_request(req); } } EXPORT_SYMBOL(ceph_osdc_set_request_linger); /* * Pick an osd (the first 'up' osd in the pg), allocate the osd struct * (as needed), and set the request r_osd appropriately. If there is * no up osd, set r_osd to NULL. Move the request to the appropriate list * (unsent, homeless) or leave on in-flight lru. * * Return 0 if unchanged, 1 if changed, or negative on error. * * Caller should hold map_sem for read and request_mutex. */ static int __map_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, int force_resend) { struct ceph_pg pgid; int acting[CEPH_PG_MAX_SIZE]; int o = -1, num = 0; int err; dout("map_request %p tid %lld\n", req, req->r_tid); err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap, ceph_file_layout_pg_pool(req->r_file_layout)); if (err) { list_move(&req->r_req_lru_item, &osdc->req_notarget); return err; } req->r_pgid = pgid; err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); if (err > 0) { o = acting[0]; num = err; } if ((!force_resend && req->r_osd && req->r_osd->o_osd == o && req->r_sent >= req->r_osd->o_incarnation && req->r_num_pg_osds == num && memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || (req->r_osd == NULL && o == -1)) return 0; /* no change */ dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n", req->r_tid, pgid.pool, pgid.seed, o, req->r_osd ? req->r_osd->o_osd : -1); /* record full pg acting set */ memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); req->r_num_pg_osds = num; if (req->r_osd) { __cancel_request(req); list_del_init(&req->r_osd_item); req->r_osd = NULL; } req->r_osd = __lookup_osd(osdc, o); if (!req->r_osd && o >= 0) { err = -ENOMEM; req->r_osd = create_osd(osdc, o); if (!req->r_osd) { list_move(&req->r_req_lru_item, &osdc->req_notarget); goto out; } dout("map_request osd %p is osd%d\n", req->r_osd, o); __insert_osd(osdc, req->r_osd); ceph_con_open(&req->r_osd->o_con, CEPH_ENTITY_TYPE_OSD, o, &osdc->osdmap->osd_addr[o]); } if (req->r_osd) { __remove_osd_from_lru(req->r_osd); list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); } else { list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); } err = 1; /* osd or pg changed */ out: return err; } /* * caller should hold map_sem (for read) and request_mutex */ static void __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { void *p; dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n", req, req->r_tid, req->r_osd->o_osd, req->r_flags, (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); /* fill in message content that changes each time we send it */ put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); put_unaligned_le32(req->r_flags, req->r_request_flags); put_unaligned_le64(req->r_pgid.pool, req->r_request_pool); p = req->r_request_pgid; ceph_encode_64(&p, req->r_pgid.pool); ceph_encode_32(&p, req->r_pgid.seed); put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ memcpy(req->r_request_reassert_version, &req->r_reassert_version, sizeof(req->r_reassert_version)); req->r_stamp = jiffies; list_move_tail(&req->r_req_lru_item, &osdc->req_lru); ceph_msg_get(req->r_request); /* send consumes a ref */ /* Mark the request unsafe if this is the first timet's being sent. */ if (!req->r_sent && req->r_unsafe_callback) req->r_unsafe_callback(req, true); req->r_sent = req->r_osd->o_incarnation; ceph_con_send(&req->r_osd->o_con, req->r_request); } /* * Send any requests in the queue (req_unsent). */ static void __send_queued(struct ceph_osd_client *osdc) { struct ceph_osd_request *req, *tmp; dout("__send_queued\n"); list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) __send_request(osdc, req); } /* * Timeout callback, called every N seconds when 1 or more osd * requests has been active for more than N seconds. When this * happens, we ping all OSDs with requests who have timed out to * ensure any communications channel reset is detected. Reset the * request timeouts another N seconds in the future as we go. * Reschedule the timeout event another N seconds in future (unless * there are no open requests). */ static void handle_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, timeout_work.work); struct ceph_osd_request *req; struct ceph_osd *osd; unsigned long keepalive = osdc->client->options->osd_keepalive_timeout * HZ; struct list_head slow_osds; dout("timeout\n"); down_read(&osdc->map_sem); ceph_monc_request_next_osdmap(&osdc->client->monc); mutex_lock(&osdc->request_mutex); /* * ping osds that are a bit slow. this ensures that if there * is a break in the TCP connection we will notice, and reopen * a connection with that osd (from the fault callback). */ INIT_LIST_HEAD(&slow_osds); list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { if (time_before(jiffies, req->r_stamp + keepalive)) break; osd = req->r_osd; BUG_ON(!osd); dout(" tid %llu is slow, will send keepalive on osd%d\n", req->r_tid, osd->o_osd); list_move_tail(&osd->o_keepalive_item, &slow_osds); } while (!list_empty(&slow_osds)) { osd = list_entry(slow_osds.next, struct ceph_osd, o_keepalive_item); list_del_init(&osd->o_keepalive_item); ceph_con_keepalive(&osd->o_con); } __schedule_osd_timeout(osdc); __send_queued(osdc); mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); } static void handle_osds_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, osds_timeout_work.work); unsigned long delay = osdc->client->options->osd_idle_ttl * HZ >> 2; dout("osds timeout\n"); down_read(&osdc->map_sem); remove_old_osds(osdc); up_read(&osdc->map_sem); schedule_delayed_work(&osdc->osds_timeout_work, round_jiffies_relative(delay)); } static void complete_request(struct ceph_osd_request *req) { if (req->r_unsafe_callback) req->r_unsafe_callback(req, false); complete_all(&req->r_safe_completion); /* fsync waiter */ } /* * handle osd op reply. either call the callback if it is specified, * or do the completion to wake up the waiting thread. */ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, struct ceph_connection *con) { void *p, *end; struct ceph_osd_request *req; u64 tid; int object_len; unsigned int numops; int payload_len, flags; s32 result; s32 retry_attempt; struct ceph_pg pg; int err; u32 reassert_epoch; u64 reassert_version; u32 osdmap_epoch; int already_completed; u32 bytes; unsigned int i; tid = le64_to_cpu(msg->hdr.tid); dout("handle_reply %p tid %llu\n", msg, tid); p = msg->front.iov_base; end = p + msg->front.iov_len; ceph_decode_need(&p, end, 4, bad); object_len = ceph_decode_32(&p); ceph_decode_need(&p, end, object_len, bad); p += object_len; err = ceph_decode_pgid(&p, end, &pg); if (err) goto bad; ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad); flags = ceph_decode_64(&p); result = ceph_decode_32(&p); reassert_epoch = ceph_decode_32(&p); reassert_version = ceph_decode_64(&p); osdmap_epoch = ceph_decode_32(&p); /* lookup */ mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (req == NULL) { dout("handle_reply tid %llu dne\n", tid); goto bad_mutex; } ceph_osdc_get_request(req); dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, req, result); ceph_decode_need(&p, end, 4, bad); numops = ceph_decode_32(&p); if (numops > CEPH_OSD_MAX_OP) goto bad_put; if (numops != req->r_num_ops) goto bad_put; payload_len = 0; ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad); for (i = 0; i < numops; i++) { struct ceph_osd_op *op = p; int len; len = le32_to_cpu(op->payload_len); req->r_reply_op_len[i] = len; dout(" op %d has %d bytes\n", i, len); payload_len += len; p += sizeof(*op); } bytes = le32_to_cpu(msg->hdr.data_len); if (payload_len != bytes) { pr_warning("sum of op payload lens %d != data_len %d", payload_len, bytes); goto bad_put; } ceph_decode_need(&p, end, 4 + numops * 4, bad); retry_attempt = ceph_decode_32(&p); for (i = 0; i < numops; i++) req->r_reply_op_result[i] = ceph_decode_32(&p); if (!req->r_got_reply) { req->r_result = result; dout("handle_reply result %d bytes %d\n", req->r_result, bytes); if (req->r_result == 0) req->r_result = bytes; /* in case this is a write and we need to replay, */ req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); req->r_reassert_version.version = cpu_to_le64(reassert_version); req->r_got_reply = 1; } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { dout("handle_reply tid %llu dup ack\n", tid); mutex_unlock(&osdc->request_mutex); goto done; } dout("handle_reply tid %llu flags %d\n", tid, flags); if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) __register_linger_request(osdc, req); /* either this is a read, or we got the safe response */ if (result < 0 || (flags & CEPH_OSD_FLAG_ONDISK) || ((flags & CEPH_OSD_FLAG_WRITE) == 0)) __unregister_request(osdc, req); already_completed = req->r_completed; req->r_completed = 1; mutex_unlock(&osdc->request_mutex); if (already_completed) goto done; if (req->r_callback) req->r_callback(req, msg); else complete_all(&req->r_completion); if (flags & CEPH_OSD_FLAG_ONDISK) complete_request(req); done: dout("req=%p req->r_linger=%d\n", req, req->r_linger); ceph_osdc_put_request(req); return; bad_put: ceph_osdc_put_request(req); bad_mutex: mutex_unlock(&osdc->request_mutex); bad: pr_err("corrupt osd_op_reply got %d %d\n", (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); ceph_msg_dump(msg); } static void reset_changed_osds(struct ceph_osd_client *osdc) { struct rb_node *p, *n; for (p = rb_first(&osdc->osds); p; p = n) { struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); n = rb_next(p); if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || memcmp(&osd->o_con.peer_addr, ceph_osd_addr(osdc->osdmap, osd->o_osd), sizeof(struct ceph_entity_addr)) != 0) __reset_osd(osdc, osd); } } /* * Requeue requests whose mapping to an OSD has changed. If requests map to * no osd, request a new map. * * Caller should hold map_sem for read. */ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) { struct ceph_osd_request *req, *nreq; struct rb_node *p; int needmap = 0; int err; dout("kick_requests %s\n", force_resend ? " (force resend)" : ""); mutex_lock(&osdc->request_mutex); for (p = rb_first(&osdc->requests); p; ) { req = rb_entry(p, struct ceph_osd_request, r_node); p = rb_next(p); /* * For linger requests that have not yet been * registered, move them to the linger list; they'll * be sent to the osd in the loop below. Unregister * the request before re-registering it as a linger * request to ensure the __map_request() below * will decide it needs to be sent. */ if (req->r_linger && list_empty(&req->r_linger_item)) { dout("%p tid %llu restart on osd%d\n", req, req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); __unregister_request(osdc, req); __register_linger_request(osdc, req); continue; } err = __map_request(osdc, req, force_resend); if (err < 0) continue; /* error */ if (req->r_osd == NULL) { dout("%p tid %llu maps to no osd\n", req, req->r_tid); needmap++; /* request a newer map */ } else if (err > 0) { if (!req->r_linger) { dout("%p tid %llu requeued on osd%d\n", req, req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); req->r_flags |= CEPH_OSD_FLAG_RETRY; } } } list_for_each_entry_safe(req, nreq, &osdc->req_linger, r_linger_item) { dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); err = __map_request(osdc, req, force_resend); dout("__map_request returned %d\n", err); if (err == 0) continue; /* no change and no osd was specified */ if (err < 0) continue; /* hrm! */ if (req->r_osd == NULL) { dout("tid %llu maps to no valid osd\n", req->r_tid); needmap++; /* request a newer map */ continue; } dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); __register_request(osdc, req); __unregister_linger_request(osdc, req); } reset_changed_osds(osdc); mutex_unlock(&osdc->request_mutex); if (needmap) { dout("%d requests for down osds, need new map\n", needmap); ceph_monc_request_next_osdmap(&osdc->client->monc); } } /* * Process updated osd map. * * The message contains any number of incremental and full maps, normally * indicating some sort of topology change in the cluster. Kick requests * off to different OSDs as needed. */ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) { void *p, *end, *next; u32 nr_maps, maplen; u32 epoch; struct ceph_osdmap *newmap = NULL, *oldmap; int err; struct ceph_fsid fsid; dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); p = msg->front.iov_base; end = p + msg->front.iov_len; /* verify fsid */ ceph_decode_need(&p, end, sizeof(fsid), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); if (ceph_check_fsid(osdc->client, &fsid) < 0) return; down_write(&osdc->map_sem); /* incremental maps */ ceph_decode_32_safe(&p, end, nr_maps, bad); dout(" %d inc maps\n", nr_maps); while (nr_maps > 0) { ceph_decode_need(&p, end, 2*sizeof(u32), bad); epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); ceph_decode_need(&p, end, maplen, bad); next = p + maplen; if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { dout("applying incremental map %u len %d\n", epoch, maplen); newmap = osdmap_apply_incremental(&p, next, osdc->osdmap, &osdc->client->msgr); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad; } BUG_ON(!newmap); if (newmap != osdc->osdmap) { ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = newmap; } kick_requests(osdc, 0); } else { dout("ignoring incremental map %u len %d\n", epoch, maplen); } p = next; nr_maps--; } if (newmap) goto done; /* full maps */ ceph_decode_32_safe(&p, end, nr_maps, bad); dout(" %d full maps\n", nr_maps); while (nr_maps) { ceph_decode_need(&p, end, 2*sizeof(u32), bad); epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); ceph_decode_need(&p, end, maplen, bad); if (nr_maps > 1) { dout("skipping non-latest full map %u len %d\n", epoch, maplen); } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { dout("skipping full map %u len %d, " "older than our %u\n", epoch, maplen, osdc->osdmap->epoch); } else { int skipped_map = 0; dout("taking full map %u len %d\n", epoch, maplen); newmap = osdmap_decode(&p, p+maplen); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad; } BUG_ON(!newmap); oldmap = osdc->osdmap; osdc->osdmap = newmap; if (oldmap) { if (oldmap->epoch + 1 < newmap->epoch) skipped_map = 1; ceph_osdmap_destroy(oldmap); } kick_requests(osdc, skipped_map); } p += maplen; nr_maps--; } done: downgrade_write(&osdc->map_sem); ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); /* * subscribe to subsequent osdmap updates if full to ensure * we find out when we are no longer full and stop returning * ENOSPC. */ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) ceph_monc_request_next_osdmap(&osdc->client->monc); mutex_lock(&osdc->request_mutex); __send_queued(osdc); mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); wake_up_all(&osdc->client->auth_wq); return; bad: pr_err("osdc handle_map corrupt msg\n"); ceph_msg_dump(msg); up_write(&osdc->map_sem); return; } /* * watch/notify callback event infrastructure * * These callbacks are used both for watch and notify operations. */ static void __release_event(struct kref *kref) { struct ceph_osd_event *event = container_of(kref, struct ceph_osd_event, kref); dout("__release_event %p\n", event); kfree(event); } static void get_event(struct ceph_osd_event *event) { kref_get(&event->kref); } void ceph_osdc_put_event(struct ceph_osd_event *event) { kref_put(&event->kref, __release_event); } EXPORT_SYMBOL(ceph_osdc_put_event); static void __insert_event(struct ceph_osd_client *osdc, struct ceph_osd_event *new) { struct rb_node **p = &osdc->event_tree.rb_node; struct rb_node *parent = NULL; struct ceph_osd_event *event = NULL; while (*p) { parent = *p; event = rb_entry(parent, struct ceph_osd_event, node); if (new->cookie < event->cookie) p = &(*p)->rb_left; else if (new->cookie > event->cookie) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, &osdc->event_tree); } static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, u64 cookie) { struct rb_node **p = &osdc->event_tree.rb_node; struct rb_node *parent = NULL; struct ceph_osd_event *event = NULL; while (*p) { parent = *p; event = rb_entry(parent, struct ceph_osd_event, node); if (cookie < event->cookie) p = &(*p)->rb_left; else if (cookie > event->cookie) p = &(*p)->rb_right; else return event; } return NULL; } static void __remove_event(struct ceph_osd_event *event) { struct ceph_osd_client *osdc = event->osdc; if (!RB_EMPTY_NODE(&event->node)) { dout("__remove_event removed %p\n", event); rb_erase(&event->node, &osdc->event_tree); ceph_osdc_put_event(event); } else { dout("__remove_event didn't remove %p\n", event); } } int ceph_osdc_create_event(struct ceph_osd_client *osdc, void (*event_cb)(u64, u64, u8, void *), void *data, struct ceph_osd_event **pevent) { struct ceph_osd_event *event; event = kmalloc(sizeof(*event), GFP_NOIO); if (!event) return -ENOMEM; dout("create_event %p\n", event); event->cb = event_cb; event->one_shot = 0; event->data = data; event->osdc = osdc; INIT_LIST_HEAD(&event->osd_node); RB_CLEAR_NODE(&event->node); kref_init(&event->kref); /* one ref for us */ kref_get(&event->kref); /* one ref for the caller */ spin_lock(&osdc->event_lock); event->cookie = ++osdc->event_count; __insert_event(osdc, event); spin_unlock(&osdc->event_lock); *pevent = event; return 0; } EXPORT_SYMBOL(ceph_osdc_create_event); void ceph_osdc_cancel_event(struct ceph_osd_event *event) { struct ceph_osd_client *osdc = event->osdc; dout("cancel_event %p\n", event); spin_lock(&osdc->event_lock); __remove_event(event); spin_unlock(&osdc->event_lock); ceph_osdc_put_event(event); /* caller's */ } EXPORT_SYMBOL(ceph_osdc_cancel_event); static void do_event_work(struct work_struct *work) { struct ceph_osd_event_work *event_work = container_of(work, struct ceph_osd_event_work, work); struct ceph_osd_event *event = event_work->event; u64 ver = event_work->ver; u64 notify_id = event_work->notify_id; u8 opcode = event_work->opcode; dout("do_event_work completing %p\n", event); event->cb(ver, notify_id, opcode, event->data); dout("do_event_work completed %p\n", event); ceph_osdc_put_event(event); kfree(event_work); } /* * Process osd watch notifications */ static void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg) { void *p, *end; u8 proto_ver; u64 cookie, ver, notify_id; u8 opcode; struct ceph_osd_event *event; struct ceph_osd_event_work *event_work; p = msg->front.iov_base; end = p + msg->front.iov_len; ceph_decode_8_safe(&p, end, proto_ver, bad); ceph_decode_8_safe(&p, end, opcode, bad); ceph_decode_64_safe(&p, end, cookie, bad); ceph_decode_64_safe(&p, end, ver, bad); ceph_decode_64_safe(&p, end, notify_id, bad); spin_lock(&osdc->event_lock); event = __find_event(osdc, cookie); if (event) { BUG_ON(event->one_shot); get_event(event); } spin_unlock(&osdc->event_lock); dout("handle_watch_notify cookie %lld ver %lld event %p\n", cookie, ver, event); if (event) { event_work = kmalloc(sizeof(*event_work), GFP_NOIO); if (!event_work) { dout("ERROR: could not allocate event_work\n"); goto done_err; } INIT_WORK(&event_work->work, do_event_work); event_work->event = event; event_work->ver = ver; event_work->notify_id = notify_id; event_work->opcode = opcode; if (!queue_work(osdc->notify_wq, &event_work->work)) { dout("WARNING: failed to queue notify event work\n"); goto done_err; } } return; done_err: ceph_osdc_put_event(event); return; bad: pr_err("osdc handle_watch_notify corrupt msg\n"); return; } /* * build new request AND message * */ void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, struct ceph_snap_context *snapc, u64 snap_id, struct timespec *mtime) { struct ceph_msg *msg = req->r_request; void *p; size_t msg_size; int flags = req->r_flags; u64 data_len; unsigned int i; req->r_snapid = snap_id; req->r_snapc = ceph_get_snap_context(snapc); /* encode request */ msg->hdr.version = cpu_to_le16(4); p = msg->front.iov_base; ceph_encode_32(&p, 1); /* client_inc is always 1 */ req->r_request_osdmap_epoch = p; p += 4; req->r_request_flags = p; p += 4; if (req->r_flags & CEPH_OSD_FLAG_WRITE) ceph_encode_timespec(p, mtime); p += sizeof(struct ceph_timespec); req->r_request_reassert_version = p; p += sizeof(struct ceph_eversion); /* will get filled in */ /* oloc */ ceph_encode_8(&p, 4); ceph_encode_8(&p, 4); ceph_encode_32(&p, 8 + 4 + 4); req->r_request_pool = p; p += 8; ceph_encode_32(&p, -1); /* preferred */ ceph_encode_32(&p, 0); /* key len */ ceph_encode_8(&p, 1); req->r_request_pgid = p; p += 8 + 4; ceph_encode_32(&p, -1); /* preferred */ /* oid */ ceph_encode_32(&p, req->r_oid_len); memcpy(p, req->r_oid, req->r_oid_len); dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); p += req->r_oid_len; /* ops--can imply data */ ceph_encode_16(&p, (u16)req->r_num_ops); data_len = 0; for (i = 0; i < req->r_num_ops; i++) { data_len += osd_req_encode_op(req, p, i); p += sizeof(struct ceph_osd_op); } /* snaps */ ceph_encode_64(&p, req->r_snapid); ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); if (req->r_snapc) { for (i = 0; i < snapc->num_snaps; i++) { ceph_encode_64(&p, req->r_snapc->snaps[i]); } } req->r_request_attempts = p; p += 4; /* data */ if (flags & CEPH_OSD_FLAG_WRITE) { u16 data_off; /* * The header "data_off" is a hint to the receiver * allowing it to align received data into its * buffers such that there's no need to re-copy * it before writing it to disk (direct I/O). */ data_off = (u16) (off & 0xffff); req->r_request->hdr.data_off = cpu_to_le16(data_off); } req->r_request->hdr.data_len = cpu_to_le32(data_len); BUG_ON(p > msg->front.iov_base + msg->front.iov_len); msg_size = p - msg->front.iov_base; msg->front.iov_len = msg_size; msg->hdr.front_len = cpu_to_le32(msg_size); dout("build_request msg_size was %d\n", (int)msg_size); } EXPORT_SYMBOL(ceph_osdc_build_request); /* * Register request, send initial attempt. */ int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail) { int rc = 0; down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); __register_request(osdc, req); req->r_sent = 0; req->r_got_reply = 0; req->r_completed = 0; rc = __map_request(osdc, req, 0); if (rc < 0) { if (nofail) { dout("osdc_start_request failed map, " " will retry %lld\n", req->r_tid); rc = 0; } else { __unregister_request(osdc, req); } goto out_unlock; } if (req->r_osd == NULL) { dout("send_request %p no up osds in pg\n", req); ceph_monc_request_next_osdmap(&osdc->client->monc); } else { __send_queued(osdc); } rc = 0; out_unlock: mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); return rc; } EXPORT_SYMBOL(ceph_osdc_start_request); /* * wait for a request to complete */ int ceph_osdc_wait_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { int rc; rc = wait_for_completion_interruptible(&req->r_completion); if (rc < 0) { mutex_lock(&osdc->request_mutex); __cancel_request(req); __unregister_request(osdc, req); mutex_unlock(&osdc->request_mutex); complete_request(req); dout("wait_request tid %llu canceled/timed out\n", req->r_tid); return rc; } dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); return req->r_result; } EXPORT_SYMBOL(ceph_osdc_wait_request); /* * sync - wait for all in-flight requests to flush. avoid starvation. */ void ceph_osdc_sync(struct ceph_osd_client *osdc) { struct ceph_osd_request *req; u64 last_tid, next_tid = 0; mutex_lock(&osdc->request_mutex); last_tid = osdc->last_tid; while (1) { req = __lookup_request_ge(osdc, next_tid); if (!req) break; if (req->r_tid > last_tid) break; next_tid = req->r_tid + 1; if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) continue; ceph_osdc_get_request(req); mutex_unlock(&osdc->request_mutex); dout("sync waiting on tid %llu (last is %llu)\n", req->r_tid, last_tid); wait_for_completion(&req->r_safe_completion); mutex_lock(&osdc->request_mutex); ceph_osdc_put_request(req); } mutex_unlock(&osdc->request_mutex); dout("sync done (thru tid %llu)\n", last_tid); } EXPORT_SYMBOL(ceph_osdc_sync); /* * init, shutdown */ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) { int err; dout("init\n"); osdc->client = client; osdc->osdmap = NULL; init_rwsem(&osdc->map_sem); init_completion(&osdc->map_waiters); osdc->last_requested_map = 0; mutex_init(&osdc->request_mutex); osdc->last_tid = 0; osdc->osds = RB_ROOT; INIT_LIST_HEAD(&osdc->osd_lru); osdc->requests = RB_ROOT; INIT_LIST_HEAD(&osdc->req_lru); INIT_LIST_HEAD(&osdc->req_unsent); INIT_LIST_HEAD(&osdc->req_notarget); INIT_LIST_HEAD(&osdc->req_linger); osdc->num_requests = 0; INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); spin_lock_init(&osdc->event_lock); osdc->event_tree = RB_ROOT; osdc->event_count = 0; schedule_delayed_work(&osdc->osds_timeout_work, round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); err = -ENOMEM; osdc->req_mempool = mempool_create_kmalloc_pool(10, sizeof(struct ceph_osd_request)); if (!osdc->req_mempool) goto out; err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, OSD_OP_FRONT_LEN, 10, true, "osd_op"); if (err < 0) goto out_mempool; err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, OSD_OPREPLY_FRONT_LEN, 10, true, "osd_op_reply"); if (err < 0) goto out_msgpool; osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); if (IS_ERR(osdc->notify_wq)) { err = PTR_ERR(osdc->notify_wq); osdc->notify_wq = NULL; goto out_msgpool; } return 0; out_msgpool: ceph_msgpool_destroy(&osdc->msgpool_op); out_mempool: mempool_destroy(osdc->req_mempool); out: return err; } void ceph_osdc_stop(struct ceph_osd_client *osdc) { flush_workqueue(osdc->notify_wq); destroy_workqueue(osdc->notify_wq); cancel_delayed_work_sync(&osdc->timeout_work); cancel_delayed_work_sync(&osdc->osds_timeout_work); if (osdc->osdmap) { ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = NULL; } remove_all_osds(osdc); mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); ceph_msgpool_destroy(&osdc->msgpool_op_reply); } /* * Read some contiguous pages. If we cross a stripe boundary, shorten * *plen. Return number of bytes read, or error. */ int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, u32 truncate_seq, u64 truncate_size, struct page **pages, int num_pages, int page_align) { struct ceph_osd_request *req; int rc = 0; dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, vino.snap, off, *plen); req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, truncate_seq, truncate_size, false); if (IS_ERR(req)) return PTR_ERR(req); /* it may be a short read due to an object boundary */ osd_req_op_extent_osd_data_pages(req, 0, pages, *plen, page_align, false, false); dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", off, *plen, *plen, page_align); ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) rc = ceph_osdc_wait_request(osdc, req); ceph_osdc_put_request(req); dout("readpages result %d\n", rc); return rc; } EXPORT_SYMBOL(ceph_osdc_readpages); /* * do a synchronous write on N pages */ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, struct ceph_snap_context *snapc, u64 off, u64 len, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, struct page **pages, int num_pages) { struct ceph_osd_request *req; int rc = 0; int page_align = off & ~PAGE_MASK; BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, snapc, truncate_seq, truncate_size, true); if (IS_ERR(req)) return PTR_ERR(req); /* it may be a short write due to an object boundary */ osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, false, false); dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); rc = ceph_osdc_start_request(osdc, req, true); if (!rc) rc = ceph_osdc_wait_request(osdc, req); ceph_osdc_put_request(req); if (rc == 0) rc = len; dout("writepages result %d\n", rc); return rc; } EXPORT_SYMBOL(ceph_osdc_writepages); int ceph_osdc_setup(void) { BUG_ON(ceph_osd_request_cache); ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", sizeof (struct ceph_osd_request), __alignof__(struct ceph_osd_request), 0, NULL); return ceph_osd_request_cache ? 0 : -ENOMEM; } EXPORT_SYMBOL(ceph_osdc_setup); void ceph_osdc_cleanup(void) { BUG_ON(!ceph_osd_request_cache); kmem_cache_destroy(ceph_osd_request_cache); ceph_osd_request_cache = NULL; } EXPORT_SYMBOL(ceph_osdc_cleanup); /* * handle incoming message */ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc; int type = le16_to_cpu(msg->hdr.type); if (!osd) goto out; osdc = osd->o_osdc; switch (type) { case CEPH_MSG_OSD_MAP: ceph_osdc_handle_map(osdc, msg); break; case CEPH_MSG_OSD_OPREPLY: handle_reply(osdc, msg, con); break; case CEPH_MSG_WATCH_NOTIFY: handle_watch_notify(osdc, msg); break; default: pr_err("received unknown message type %d %s\n", type, ceph_msg_type_name(type)); } out: ceph_msg_put(msg); } /* * lookup and return message for incoming reply. set up reply message * pages. */ static struct ceph_msg *get_reply(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc = osd->o_osdc; struct ceph_msg *m; struct ceph_osd_request *req; int front = le32_to_cpu(hdr->front_len); int data_len = le32_to_cpu(hdr->data_len); u64 tid; tid = le64_to_cpu(hdr->tid); mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (!req) { *skip = 1; m = NULL; dout("get_reply unknown tid %llu from osd%d\n", tid, osd->o_osd); goto out; } if (req->r_reply->con) dout("%s revoking msg %p from old con %p\n", __func__, req->r_reply, req->r_reply->con); ceph_msg_revoke_incoming(req->r_reply); if (front > req->r_reply->front.iov_len) { pr_warning("get_reply front %d > preallocated %d\n", front, (int)req->r_reply->front.iov_len); m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false); if (!m) goto out; ceph_msg_put(req->r_reply); req->r_reply = m; } m = ceph_msg_get(req->r_reply); if (data_len > 0) { struct ceph_osd_data *osd_data; /* * XXX This is assuming there is only one op containing * XXX page data. Probably OK for reads, but this * XXX ought to be done more generally. */ osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { if (osd_data->pages && unlikely(osd_data->length < data_len)) { pr_warning("tid %lld reply has %d bytes " "we had only %llu bytes ready\n", tid, data_len, osd_data->length); *skip = 1; ceph_msg_put(m); m = NULL; goto out; } } } *skip = 0; dout("get_reply tid %lld %p\n", tid, m); out: mutex_unlock(&osdc->request_mutex); return m; } static struct ceph_msg *alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_osd *osd = con->private; int type = le16_to_cpu(hdr->type); int front = le32_to_cpu(hdr->front_len); *skip = 0; switch (type) { case CEPH_MSG_OSD_MAP: case CEPH_MSG_WATCH_NOTIFY: return ceph_msg_new(type, front, GFP_NOFS, false); case CEPH_MSG_OSD_OPREPLY: return get_reply(con, hdr, skip); default: pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, osd->o_osd); *skip = 1; return NULL; } } /* * Wrappers to refcount containing ceph_osd struct */ static struct ceph_connection *get_osd_con(struct ceph_connection *con) { struct ceph_osd *osd = con->private; if (get_osd(osd)) return con; return NULL; } static void put_osd_con(struct ceph_connection *con) { struct ceph_osd *osd = con->private; put_osd(osd); } /* * authentication */ /* * Note: returned pointer is the address of a structure that's * managed separately. Caller must *not* attempt to free it. */ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, int *proto, int force_new) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; struct ceph_auth_handshake *auth = &o->o_auth; if (force_new && auth->authorizer) { ceph_auth_destroy_authorizer(ac, auth->authorizer); auth->authorizer = NULL; } if (!auth->authorizer) { int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, auth); if (ret) return ERR_PTR(ret); } else { int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, auth); if (ret) return ERR_PTR(ret); } *proto = ac->protocol; return auth; } static int verify_authorizer_reply(struct ceph_connection *con, int len) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); return ceph_monc_validate_auth(&osdc->client->monc); } static const struct ceph_connection_operations osd_con_ops = { .get = get_osd_con, .put = put_osd_con, .dispatch = dispatch, .get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, .invalidate_authorizer = invalidate_authorizer, .alloc_msg = alloc_msg, .fault = osd_reset, };
gpl-2.0
nache303/Project-WoW-Legacy
dep/mysqllite/mysys/my_file.c
401
3867
/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysys_priv.h" #include "my_static.h" #include <m_string.h> /* set how many open files we want to be able to handle SYNOPSIS set_maximum_open_files() max_file_limit Files to open NOTES The request may not fulfilled becasue of system limitations RETURN Files available to open. May be more or less than max_file_limit! */ #if defined(HAVE_GETRLIMIT) && defined(RLIMIT_NOFILE) #ifndef RLIM_INFINITY #define RLIM_INFINITY ((uint) 0xffffffff) #endif static uint set_max_open_files(uint max_file_limit) { struct rlimit rlimit; uint old_cur; DBUG_ENTER("set_max_open_files"); DBUG_PRINT("enter",("files: %u", max_file_limit)); if (!getrlimit(RLIMIT_NOFILE,&rlimit)) { old_cur= (uint) rlimit.rlim_cur; DBUG_PRINT("info", ("rlim_cur: %u rlim_max: %u", (uint) rlimit.rlim_cur, (uint) rlimit.rlim_max)); if (rlimit.rlim_cur == RLIM_INFINITY) rlimit.rlim_cur = max_file_limit; if (rlimit.rlim_cur >= max_file_limit) DBUG_RETURN(rlimit.rlim_cur); /* purecov: inspected */ rlimit.rlim_cur= rlimit.rlim_max= max_file_limit; if (setrlimit(RLIMIT_NOFILE, &rlimit)) max_file_limit= old_cur; /* Use original value */ else { rlimit.rlim_cur= 0; /* Safety if next call fails */ (void) getrlimit(RLIMIT_NOFILE,&rlimit); DBUG_PRINT("info", ("rlim_cur: %u", (uint) rlimit.rlim_cur)); if (rlimit.rlim_cur) /* If call didn't fail */ max_file_limit= (uint) rlimit.rlim_cur; } } DBUG_PRINT("exit",("max_file_limit: %u", max_file_limit)); DBUG_RETURN(max_file_limit); } #else static uint set_max_open_files(uint max_file_limit) { /* We don't know the limit. Return best guess */ return min(max_file_limit, OS_FILE_LIMIT); } #endif /* Change number of open files SYNOPSIS: my_set_max_open_files() files Number of requested files RETURN number of files available for open */ uint my_set_max_open_files(uint files) { struct st_my_file_info *tmp; DBUG_ENTER("my_set_max_open_files"); DBUG_PRINT("enter",("files: %u my_file_limit: %u", files, my_file_limit)); files+= MY_FILE_MIN; files= set_max_open_files(min(files, OS_FILE_LIMIT)); if (files <= MY_NFILE) DBUG_RETURN(files); if (!(tmp= (struct st_my_file_info*) my_malloc(sizeof(*tmp) * files, MYF(MY_WME)))) DBUG_RETURN(MY_NFILE); /* Copy any initialized files */ memcpy((char*) tmp, (char*) my_file_info, sizeof(*tmp) * min(my_file_limit, files)); bzero((char*) (tmp + my_file_limit), max((int) (files- my_file_limit), 0)*sizeof(*tmp)); my_free_open_file_info(); /* Free if already allocated */ my_file_info= tmp; my_file_limit= files; DBUG_PRINT("exit",("files: %u", files)); DBUG_RETURN(files); } void my_free_open_file_info() { DBUG_ENTER("my_free_file_info"); if (my_file_info != my_file_info_default) { /* Copy data back for my_print_open_files */ memcpy((char*) my_file_info_default, my_file_info, sizeof(*my_file_info_default)* MY_NFILE); my_free(my_file_info); my_file_info= my_file_info_default; my_file_limit= MY_NFILE; } DBUG_VOID_RETURN; }
gpl-2.0
TeamHackYU/SKernel_Yu
drivers/clk/qcom/clock-gcc-8994.c
657
91594
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/clk/msm-clock-generic.h> #include <dt-bindings/clock/msm-clocks-8994.h> #include <soc/qcom/clock-local2.h> #include <soc/qcom/clock-voter.h> #include <soc/qcom/clock-pll.h> #include <soc/qcom/clock-alpha-pll.h> #include "vdd-level-8994.h" static void __iomem *virt_base; static void __iomem *virt_dbgbase; #define GCC_REG_BASE(x) (void __iomem *)(virt_base + (x)) #define gcc_xo_source_val 0 #define gpll0_out_main_source_val 1 #define gpll4_out_main_source_val 5 #define pcie_pipe_source_val 2 #define FIXDIV(div) (div ? (2 * (div) - 1) : (0)) #define F(f, s, div, m, n) \ { \ .freq_hz = (f), \ .src_clk = &s.c, \ .m_val = (m), \ .n_val = ~((n)-(m)) * !!(n), \ .d_val = ~(n),\ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \ | BVAL(10, 8, s##_source_val), \ } #define F_EXT(f, s, div, m, n) \ { \ .freq_hz = (f), \ .m_val = (m), \ .n_val = ~((n)-(m)) * !!(n), \ .d_val = ~(n),\ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \ | BVAL(10, 8, s##_source_val), \ } static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL); #define GPLL0_MODE (0x0000) #define SYS_NOC_USB3_AXI_CBCR (0x03FC) #define SYS_NOC_UFS_AXI_CBCR (0x1D7C) #define MSS_CFG_AHB_CBCR (0x0280) #define MSS_Q6_BIMC_AXI_CBCR (0x0284) #define USB_30_BCR (0x03C0) #define USB30_MASTER_CBCR (0x03C8) #define USB30_SLEEP_CBCR (0x03CC) #define USB30_MOCK_UTMI_CBCR (0x03D0) #define USB30_MASTER_CMD_RCGR (0x03D4) #define USB30_MOCK_UTMI_CMD_RCGR (0x03E8) #define USB3_PHY_BCR (0x1400) #define USB3PHY_PHY_BCR (0x1404) #define USB3_PHY_AUX_CBCR (0x1408) #define USB3_PHY_PIPE_CBCR (0x140C) #define USB3_PHY_AUX_CMD_RCGR (0x1414) #define USB_HS_BCR (0x0480) #define USB_HS_SYSTEM_CBCR (0x0484) #define USB_HS_AHB_CBCR (0x0488) #define USB_HS_SYSTEM_CMD_RCGR (0x0490) #define USB2_HS_PHY_SLEEP_CBCR (0x04AC) #define USB2_HS_PHY_ONLY_BCR (0x04B0) #define QUSB2_PHY_BCR (0x04B8) #define USB_PHY_CFG_AHB2PHY_CBCR (0x1A84) #define SDCC1_APPS_CMD_RCGR (0x04D0) #define SDCC1_APPS_CBCR (0x04C4) #define SDCC1_AHB_CBCR (0x04C8) #define SDCC2_APPS_CMD_RCGR (0x0510) #define SDCC2_APPS_CBCR (0x0504) #define SDCC2_AHB_CBCR (0x0508) #define SDCC3_APPS_CMD_RCGR (0x0550) #define SDCC3_APPS_CBCR (0x0544) #define SDCC3_AHB_CBCR (0x0548) #define SDCC4_APPS_CMD_RCGR (0x0590) #define SDCC4_APPS_CBCR (0x0584) #define SDCC4_AHB_CBCR (0x0588) #define BLSP1_AHB_CBCR (0x05C4) #define BLSP1_QUP1_SPI_APPS_CBCR (0x0644) #define BLSP1_QUP1_I2C_APPS_CBCR (0x0648) #define BLSP1_QUP1_I2C_APPS_CMD_RCGR (0x0660) #define BLSP1_QUP2_I2C_APPS_CMD_RCGR (0x06E0) #define BLSP1_QUP3_I2C_APPS_CMD_RCGR (0x0760) #define BLSP1_QUP4_I2C_APPS_CMD_RCGR (0x07E0) #define BLSP1_QUP5_I2C_APPS_CMD_RCGR (0x0860) #define BLSP1_QUP6_I2C_APPS_CMD_RCGR (0x08E0) #define BLSP2_QUP1_I2C_APPS_CMD_RCGR (0x09A0) #define BLSP2_QUP2_I2C_APPS_CMD_RCGR (0x0A20) #define BLSP2_QUP3_I2C_APPS_CMD_RCGR (0x0AA0) #define BLSP2_QUP4_I2C_APPS_CMD_RCGR (0x0B20) #define BLSP2_QUP5_I2C_APPS_CMD_RCGR (0x0BA0) #define BLSP2_QUP6_I2C_APPS_CMD_RCGR (0x0C20) #define BLSP1_QUP1_SPI_APPS_CMD_RCGR (0x064C) #define BLSP1_UART1_APPS_CBCR (0x0684) #define BLSP1_UART1_APPS_CMD_RCGR (0x068C) #define BLSP1_QUP2_SPI_APPS_CBCR (0x06C4) #define BLSP1_QUP2_I2C_APPS_CBCR (0x06C8) #define BLSP1_QUP2_SPI_APPS_CMD_RCGR (0x06CC) #define BLSP1_UART2_APPS_CBCR (0x0704) #define BLSP1_UART2_APPS_CMD_RCGR (0x070C) #define BLSP1_QUP3_SPI_APPS_CBCR (0x0744) #define BLSP1_QUP3_I2C_APPS_CBCR (0x0748) #define BLSP1_QUP3_SPI_APPS_CMD_RCGR (0x074C) #define BLSP1_UART3_APPS_CBCR (0x0784) #define BLSP1_UART3_APPS_CMD_RCGR (0x078C) #define BLSP1_QUP4_SPI_APPS_CBCR (0x07C4) #define BLSP1_QUP4_I2C_APPS_CBCR (0x07C8) #define BLSP1_QUP4_SPI_APPS_CMD_RCGR (0x07CC) #define BLSP1_UART4_APPS_CBCR (0x0804) #define BLSP1_UART4_APPS_CMD_RCGR (0x080C) #define BLSP1_QUP5_SPI_APPS_CBCR (0x0844) #define BLSP1_QUP5_I2C_APPS_CBCR (0x0848) #define BLSP1_QUP5_SPI_APPS_CMD_RCGR (0x084C) #define BLSP1_UART5_APPS_CBCR (0x0884) #define BLSP1_UART5_APPS_CMD_RCGR (0x088C) #define BLSP1_QUP6_SPI_APPS_CBCR (0x08C4) #define BLSP1_QUP6_I2C_APPS_CBCR (0x08C8) #define BLSP1_QUP6_SPI_APPS_CMD_RCGR (0x08CC) #define BLSP1_UART6_APPS_CBCR (0x0904) #define BLSP1_UART6_APPS_CMD_RCGR (0x090C) #define BLSP2_AHB_CBCR (0x0944) #define BLSP2_QUP1_SPI_APPS_CBCR (0x0984) #define BLSP2_QUP1_I2C_APPS_CBCR (0x0988) #define BLSP2_QUP1_SPI_APPS_CMD_RCGR (0x098C) #define BLSP2_UART1_APPS_CBCR (0x09C4) #define BLSP2_UART1_APPS_CMD_RCGR (0x09CC) #define BLSP2_QUP2_SPI_APPS_CBCR (0x0A04) #define BLSP2_QUP2_I2C_APPS_CBCR (0x0A08) #define BLSP2_QUP2_SPI_APPS_CMD_RCGR (0x0A0C) #define BLSP2_UART2_APPS_CBCR (0x0A44) #define BLSP2_UART2_APPS_CMD_RCGR (0x0A4C) #define BLSP2_QUP3_SPI_APPS_CBCR (0x0A84) #define BLSP2_QUP3_I2C_APPS_CBCR (0x0A88) #define BLSP2_QUP3_SPI_APPS_CMD_RCGR (0x0A8C) #define BLSP2_UART3_APPS_CBCR (0x0AC4) #define BLSP2_UART3_APPS_CMD_RCGR (0x0ACC) #define BLSP2_QUP4_SPI_APPS_CBCR (0x0B04) #define BLSP2_QUP4_I2C_APPS_CBCR (0x0B08) #define BLSP2_QUP4_SPI_APPS_CMD_RCGR (0x0B0C) #define BLSP2_UART4_APPS_CBCR (0x0B44) #define BLSP2_UART4_APPS_CMD_RCGR (0x0B4C) #define BLSP2_QUP5_SPI_APPS_CBCR (0x0B84) #define BLSP2_QUP5_I2C_APPS_CBCR (0x0B88) #define BLSP2_QUP5_SPI_APPS_CMD_RCGR (0x0B8C) #define BLSP2_UART5_APPS_CBCR (0x0BC4) #define BLSP2_UART5_APPS_CMD_RCGR (0x0BCC) #define BLSP2_QUP6_SPI_APPS_CBCR (0x0C04) #define BLSP2_QUP6_I2C_APPS_CBCR (0x0C08) #define BLSP2_QUP6_SPI_APPS_CMD_RCGR (0x0C0C) #define BLSP2_UART6_APPS_CBCR (0x0C44) #define BLSP2_UART6_APPS_CMD_RCGR (0x0C4C) #define PDM_AHB_CBCR (0x0CC4) #define PDM2_CBCR (0x0CCC) #define PDM2_CMD_RCGR (0x0CD0) #define PRNG_AHB_CBCR (0x0D04) #define BAM_DMA_AHB_CBCR (0x0D44) #define TSIF_AHB_CBCR (0x0D84) #define TSIF_REF_CBCR (0x0D88) #define TSIF_REF_CMD_RCGR (0x0D90) #define BOOT_ROM_AHB_CBCR (0x0E04) #define GCC_XO_DIV4_CBCR (0x10C8) #define LPASS_Q6_AXI_CBCR (0x11C0) #define APCS_GPLL_ENA_VOTE (0x1480) #define APCS_CLOCK_BRANCH_ENA_VOTE (0x1484) #define GCC_DEBUG_CLK_CTL (0x1880) #define CLOCK_FRQ_MEASURE_CTL (0x1884) #define CLOCK_FRQ_MEASURE_STATUS (0x1888) #define PLLTEST_PAD_CFG (0x188C) #define GP1_CBCR (0x1900) #define GP1_CMD_RCGR (0x1904) #define GP2_CBCR (0x1940) #define GP2_CMD_RCGR (0x1944) #define GP3_CBCR (0x1980) #define GP3_CMD_RCGR (0x1984) #define GPLL4_MODE (0x1DC0) #define PCIE_0_SLV_AXI_CBCR (0x1AC8) #define PCIE_0_MSTR_AXI_CBCR (0x1ACC) #define PCIE_0_CFG_AHB_CBCR (0x1AD0) #define PCIE_0_AUX_CBCR (0x1AD4) #define PCIE_0_PIPE_CBCR (0x1AD8) #define PCIE_0_PIPE_CMD_RCGR (0x1ADC) #define PCIE_0_AUX_CMD_RCGR (0x1B00) #define PCIE_PHY_0_PHY_BCR (0x1B14) #define PCIE_PHY_0_BCR (0x1B18) #define PCIE_1_SLV_AXI_CBCR (0x1B48) #define PCIE_1_MSTR_AXI_CBCR (0x1B4C) #define PCIE_1_CFG_AHB_CBCR (0x1B50) #define PCIE_1_AUX_CBCR (0x1B54) #define PCIE_1_PIPE_CBCR (0x1B58) #define PCIE_1_PIPE_CMD_RCGR (0x1B5C) #define PCIE_1_AUX_CMD_RCGR (0x1B80) #define PCIE_PHY_1_PHY_BCR (0x1B94) #define PCIE_PHY_1_BCR (0x1B98) #define UFS_AXI_CBCR (0x1D48) #define UFS_AHB_CBCR (0x1D4C) #define UFS_TX_CFG_CBCR (0x1D50) #define UFS_RX_CFG_CBCR (0x1D54) #define UFS_TX_SYMBOL_0_CBCR (0x1D58) #define UFS_TX_SYMBOL_1_CBCR (0x1D5C) #define UFS_RX_SYMBOL_0_CBCR (0x1D60) #define UFS_RX_SYMBOL_1_CBCR (0x1D64) #define UFS_AXI_CMD_RCGR (0x1D68) #define PCIE_0_PHY_LDO_EN (0x1E00) #define PCIE_1_PHY_LDO_EN (0x1E04) #define USB_SS_PHY_LDO_EN (0x1E08) #define UFS_PHY_LDO_EN (0x1E0C) #define AXI_CMD_RCGR (0x5040) DEFINE_EXT_CLK(gcc_xo, NULL); DEFINE_EXT_CLK(gcc_xo_a_clk, NULL); DEFINE_EXT_CLK(debug_mmss_clk, NULL); DEFINE_EXT_CLK(debug_rpm_clk, NULL); DEFINE_EXT_CLK(debug_cpu_clk, NULL); static unsigned int soft_vote_gpll0; static struct pll_vote_clk gpll0 = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(0), .status_reg = (void __iomem *)GPLL0_MODE, .status_mask = BIT(30), .soft_vote = &soft_vote_gpll0, .soft_vote_mask = PLL_SOFT_VOTE_PRIMARY, .base = &virt_base, .c = { .rate = 600000000, .parent = &gcc_xo.c, .dbg_name = "gpll0", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(gpll0.c), }, }; static struct pll_vote_clk gpll0_ao = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(0), .status_reg = (void __iomem *)GPLL0_MODE, .status_mask = BIT(30), .soft_vote = &soft_vote_gpll0, .soft_vote_mask = PLL_SOFT_VOTE_ACPU, .base = &virt_base, .c = { .rate = 600000000, .parent = &gcc_xo_a_clk.c, .dbg_name = "gpll0_ao", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(gpll0_ao.c), }, }; DEFINE_EXT_CLK(gpll0_out_main, &gpll0.c); static struct pll_vote_clk gpll4 = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(4), .status_reg = (void __iomem *)GPLL4_MODE, .status_mask = BIT(30), .base = &virt_base, .c = { .rate = 1536000000, .parent = &gcc_xo.c, .dbg_name = "gpll4", .ops = &clk_ops_pll_vote, VDD_DIG_FMAX_MAP3(LOWER, 400000000, LOW, 800000000, NOMINAL, 1600000000), CLK_INIT(gpll4.c), }, }; DEFINE_FIXED_SLAVE_DIV_CLK(gpll4_out_main, 4, &gpll4.c); static struct clk_freq_tbl ftbl_ufs_axi_clk_src[] = { F( 50000000, gpll0_out_main, 12, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 150000000, gpll0_out_main, 4, 0, 0), F( 171430000, gpll0_out_main, 3.5, 0, 0), F_END }; static struct clk_freq_tbl ftbl_ufs_axi_clk_src_v2[] = { F( 50000000, gpll0_out_main, 12, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 150000000, gpll0_out_main, 4, 0, 0), F( 171430000, gpll0_out_main, 3.5, 0, 0), F( 200000000, gpll0_out_main, 3, 0, 0), F( 240000000, gpll0_out_main, 2.5, 0, 0), F_END }; static struct rcg_clk ufs_axi_clk_src = { .cmd_rcgr_reg = UFS_AXI_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_ufs_axi_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "ufs_axi_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 50000000, LOW, 100000000, NOMINAL, 150000000, HIGH, 171430000), CLK_INIT(ufs_axi_clk_src.c), }, }; static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = { F( 19200000, gcc_xo, 1, 0, 0), F( 125000000, gpll0_out_main, 1, 5, 24), F_END }; static struct rcg_clk usb30_master_clk_src = { .cmd_rcgr_reg = USB30_MASTER_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_usb30_master_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "usb30_master_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP2(LOWER, 62500000, LOW, 125000000), CLK_INIT(usb30_master_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp_i2c_apps_clk_src[] = { F( 19200000, gcc_xo, 1, 0, 0), F( 50000000, gpll0_out_main, 12, 0, 0), F_END }; static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP1_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup1_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blspqup_spi_apps_clk_src_v2[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 48000000, gpll0_out_main, 12.5, 0, 0), F( 50000000, gpll0_out_main, 12, 0, 0), F_END }; static struct clk_freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 48000000, gpll0_out_main, 12.5, 0, 0), F( 50000000, gpll0_out_main, 12, 0, 0), F_END }; static struct rcg_clk blsp1_qup1_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP1_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup1_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 48000000, HIGH, 50000000), CLK_INIT(blsp1_qup1_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP2_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup2_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 46150000, gpll0_out_main, 13, 0, 0), F_END }; static struct rcg_clk blsp1_qup2_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP2_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup2_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 46150000), CLK_INIT(blsp1_qup2_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP3_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup3_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp1_qup3_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 44440000, gpll0_out_main, 13.5, 0, 0), F_END }; static struct rcg_clk blsp1_qup3_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP3_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp1_qup3_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup3_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 44440000), CLK_INIT(blsp1_qup3_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP4_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup4_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp1_qup4_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 44440000, gpll0_out_main, 13.5, 0, 0), F_END }; static struct rcg_clk blsp1_qup4_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP4_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp1_qup4_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup4_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 44440000), CLK_INIT(blsp1_qup4_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup5_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP5_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup5_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp1_qup5_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 40000000, gpll0_out_main, 15, 0, 0), F( 42860000, gpll0_out_main, 14, 0, 0), F_END }; static struct rcg_clk blsp1_qup5_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP5_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp1_qup5_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup5_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 40000000, HIGH, 42860000), CLK_INIT(blsp1_qup5_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup6_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP6_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup6_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp1_qup6_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 41380000, gpll0_out_main, 14.5, 0, 0), F( 42860000, gpll0_out_main, 14, 0, 0), F_END }; static struct rcg_clk blsp1_qup6_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP6_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp1_qup6_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_qup6_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 41380000, HIGH, 42860000), CLK_INIT(blsp1_qup6_spi_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp_uart_apps_clk_src[] = { F( 3686400, gpll0_out_main, 1, 96, 15625), F( 7372800, gpll0_out_main, 1, 192, 15625), F( 14745600, gpll0_out_main, 1, 384, 15625), F( 16000000, gpll0_out_main, 5, 2, 15), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 5, 1, 5), F( 32000000, gpll0_out_main, 1, 4, 75), F( 40000000, gpll0_out_main, 15, 0, 0), F( 46400000, gpll0_out_main, 1, 29, 375), F( 48000000, gpll0_out_main, 12.5, 0, 0), F( 51200000, gpll0_out_main, 1, 32, 375), F( 56000000, gpll0_out_main, 1, 7, 75), F( 58982400, gpll0_out_main, 1, 1536, 15625), F( 60000000, gpll0_out_main, 10, 0, 0), F( 63160000, gpll0_out_main, 9.5, 0, 0), F_END }; static struct rcg_clk blsp1_uart1_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART1_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_uart1_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp1_uart1_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart2_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART2_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_uart2_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp1_uart2_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart3_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART3_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_uart3_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp1_uart3_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart4_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART4_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_uart4_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp1_uart4_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart5_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART5_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_uart5_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp1_uart5_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart6_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART6_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp1_uart6_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp1_uart6_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup1_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP1_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup1_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp2_qup1_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp2_qup1_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 44440000, gpll0_out_main, 13.5, 0, 0), F_END }; static struct rcg_clk blsp2_qup1_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP1_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp2_qup1_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup1_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 44440000), CLK_INIT(blsp2_qup1_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup2_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP2_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup2_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp2_qup2_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp2_qup2_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 44440000, gpll0_out_main, 13.5, 0, 0), F_END }; static struct rcg_clk blsp2_qup2_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP2_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp2_qup2_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup2_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 44440000), CLK_INIT(blsp2_qup2_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup3_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP3_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup3_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp2_qup3_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp2_qup3_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 48000000, gpll0_out_main, 12.5, 0, 0), F_END }; static struct rcg_clk blsp2_qup3_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP3_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp2_qup3_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup3_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 48000000), CLK_INIT(blsp2_qup3_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup4_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP4_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup4_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp2_qup4_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp2_qup4_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 42860000, gpll0_out_main, 14, 0, 0), F( 48000000, gpll0_out_main, 12.5, 0, 0), F_END }; static struct rcg_clk blsp2_qup4_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP4_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp2_qup4_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup4_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 42860000, HIGH, 48000000), CLK_INIT(blsp2_qup4_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup5_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP5_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup5_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp2_qup5_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp2_qup5_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 48000000, gpll0_out_main, 12.5, 0, 0), F( 50000000, gpll0_out_main, 12, 0, 0), F_END }; static struct rcg_clk blsp2_qup5_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP5_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp2_qup5_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup5_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 48000000, HIGH, 50000000), CLK_INIT(blsp2_qup5_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup6_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP6_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_blsp_i2c_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup6_i2c_apps_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000), CLK_INIT(blsp2_qup6_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = { F( 960000, gcc_xo, 10, 1, 2), F( 4800000, gcc_xo, 4, 0, 0), F( 9600000, gcc_xo, 2, 0, 0), F( 15000000, gpll0_out_main, 10, 1, 4), F( 19200000, gcc_xo, 1, 0, 0), F( 24000000, gpll0_out_main, 12.5, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 44440000, gpll0_out_main, 13.5, 0, 0), F( 48000000, gpll0_out_main, 12.5, 0, 0), F_END }; static struct rcg_clk blsp2_qup6_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP6_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp2_qup6_spi_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_qup6_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP4(LOWER, 12500000, LOW, 25000000, NOMINAL, 44440000, HIGH, 48000000), CLK_INIT(blsp2_qup6_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart1_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART1_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_uart1_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp2_uart1_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart2_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART2_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_uart2_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp2_uart2_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart3_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART3_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_uart3_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp2_uart3_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart4_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART4_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_uart4_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp2_uart4_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart5_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART5_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_uart5_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp2_uart5_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart6_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART6_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_blsp_uart_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "blsp2_uart6_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 15790000, LOW, 31580000, NOMINAL, 63160000), CLK_INIT(blsp2_uart6_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gp1_clk_src[] = { F( 19200000, gcc_xo, 1, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 200000000, gpll0_out_main, 3, 0, 0), F_END }; static struct rcg_clk gp1_clk_src = { .cmd_rcgr_reg = GP1_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gp1_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "gp1_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000, NOMINAL, 200000000), CLK_INIT(gp1_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gp2_clk_src[] = { F( 19200000, gcc_xo, 1, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 200000000, gpll0_out_main, 3, 0, 0), F_END }; static struct rcg_clk gp2_clk_src = { .cmd_rcgr_reg = GP2_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gp2_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "gp2_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000, NOMINAL, 200000000), CLK_INIT(gp2_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gp3_clk_src[] = { F( 19200000, gcc_xo, 1, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 200000000, gpll0_out_main, 3, 0, 0), F_END }; static struct rcg_clk gp3_clk_src = { .cmd_rcgr_reg = GP3_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gp3_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "gp3_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000, NOMINAL, 200000000), CLK_INIT(gp3_clk_src.c), }, }; static struct clk_freq_tbl ftbl_pcie_0_aux_clk_src[] = { F( 1011000, gcc_xo, 1, 1, 19), F_END }; static struct rcg_clk pcie_0_aux_clk_src = { .cmd_rcgr_reg = PCIE_0_AUX_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_pcie_0_aux_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "pcie_0_aux_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP1(LOWER, 1011000), CLK_INIT(pcie_0_aux_clk_src.c), }, }; static struct clk_freq_tbl ftbl_pcie_pipe_clk_src[] = { F_EXT( 125000000, pcie_pipe, 1, 0, 0), F_END }; static struct rcg_clk pcie_0_pipe_clk_src = { .cmd_rcgr_reg = PCIE_0_PIPE_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_pcie_pipe_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "pcie_0_pipe_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 62500000, LOW, 125000000), CLK_INIT(pcie_0_pipe_clk_src.c), }, }; static struct clk_freq_tbl ftbl_pcie_1_aux_clk_src[] = { F( 1011000, gcc_xo, 1, 1, 19), F_END }; static struct rcg_clk pcie_1_aux_clk_src = { .cmd_rcgr_reg = PCIE_1_AUX_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_pcie_1_aux_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "pcie_1_aux_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP1(LOWER, 1011000), CLK_INIT(pcie_1_aux_clk_src.c), }, }; static struct rcg_clk pcie_1_pipe_clk_src = { .cmd_rcgr_reg = PCIE_1_PIPE_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_pcie_pipe_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "pcie_1_pipe_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 62500000, LOW, 125000000), CLK_INIT(pcie_1_pipe_clk_src.c), }, }; static struct clk_freq_tbl ftbl_pdm2_clk_src[] = { F( 60000000, gpll0_out_main, 10, 0, 0), F_END }; static struct rcg_clk pdm2_clk_src = { .cmd_rcgr_reg = PDM2_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_pdm2_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "pdm2_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 60000000), CLK_INIT(pdm2_clk_src.c), }, }; static struct clk_freq_tbl ftbl_sdcc1_apps_clk_src[] = { F( 144000, gcc_xo, 16, 3, 25), F( 400000, gcc_xo, 12, 1, 4), F( 20000000, gpll0_out_main, 15, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 50000000, gpll0_out_main, 12, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 192000000, gpll4_out_main, 2, 0, 0), F( 384000000, gpll4_out_main, 1, 0, 0), F_END }; static struct rcg_clk sdcc1_apps_clk_src = { .cmd_rcgr_reg = SDCC1_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_sdcc1_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "sdcc1_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000, NOMINAL, 400000000), CLK_INIT(sdcc1_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_sdcc2_4_apps_clk_src[] = { F( 144000, gcc_xo, 16, 3, 25), F( 400000, gcc_xo, 12, 1, 4), F( 20000000, gpll0_out_main, 15, 1, 2), F( 25000000, gpll0_out_main, 12, 1, 2), F( 50000000, gpll0_out_main, 12, 0, 0), F( 100000000, gpll0_out_main, 6, 0, 0), F( 200000000, gpll0_out_main, 3, 0, 0), F_END }; static struct rcg_clk sdcc2_apps_clk_src = { .cmd_rcgr_reg = SDCC2_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_sdcc2_4_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "sdcc2_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000, NOMINAL, 200000000), CLK_INIT(sdcc2_apps_clk_src.c), }, }; static struct rcg_clk sdcc3_apps_clk_src = { .cmd_rcgr_reg = SDCC3_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_sdcc2_4_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "sdcc3_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000, NOMINAL, 200000000), CLK_INIT(sdcc3_apps_clk_src.c), }, }; static struct rcg_clk sdcc4_apps_clk_src = { .cmd_rcgr_reg = SDCC4_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_sdcc2_4_apps_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "sdcc4_apps_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 50000000, NOMINAL, 100000000), CLK_INIT(sdcc4_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_tsif_ref_clk_src[] = { F( 105500, gcc_xo, 1, 1, 182), F_END }; static struct rcg_clk tsif_ref_clk_src = { .cmd_rcgr_reg = TSIF_REF_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_tsif_ref_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "tsif_ref_clk_src", .ops = &clk_ops_rcg_mnd, VDD_DIG_FMAX_MAP1(LOWER, 105500), CLK_INIT(tsif_ref_clk_src.c), }, }; static struct clk_freq_tbl ftbl_usb30_mock_utmi_clk_src[] = { F( 19200000, gcc_xo, 1, 0, 0), F( 60000000, gpll0_out_main, 10, 0, 0), F_END }; static struct rcg_clk usb30_mock_utmi_clk_src = { .cmd_rcgr_reg = USB30_MOCK_UTMI_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_usb30_mock_utmi_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "usb30_mock_utmi_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 60000000), CLK_INIT(usb30_mock_utmi_clk_src.c), }, }; static struct clk_freq_tbl ftbl_usb3_phy_aux_clk_src[] = { F( 1200000, gcc_xo, 16, 0, 0), F_END }; static struct rcg_clk usb3_phy_aux_clk_src = { .cmd_rcgr_reg = USB3_PHY_AUX_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_usb3_phy_aux_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "usb3_phy_aux_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP1(LOWER, 1200000), CLK_INIT(usb3_phy_aux_clk_src.c), }, }; static struct clk_freq_tbl ftbl_usb_hs_system_clk_src[] = { F( 75000000, gpll0_out_main, 8, 0, 0), F_END }; static struct rcg_clk usb_hs_system_clk_src = { .cmd_rcgr_reg = USB_HS_SYSTEM_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_usb_hs_system_clk_src, .current_freq = &rcg_dummy_freq, .base = &virt_base, .c = { .dbg_name = "usb_hs_system_clk_src", .ops = &clk_ops_rcg, VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 60000000, NOMINAL, 75000000), CLK_INIT(usb_hs_system_clk_src.c), }, }; static struct reset_clk gcc_pcie_phy_0_reset = { .reset_reg = PCIE_PHY_0_BCR, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_phy_0_reset", .ops = &clk_ops_rst, CLK_INIT(gcc_pcie_phy_0_reset.c), }, }; static struct reset_clk gcc_pcie_phy_1_reset = { .reset_reg = PCIE_PHY_1_BCR, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_phy_1_reset", .ops = &clk_ops_rst, CLK_INIT(gcc_pcie_phy_1_reset.c), }, }; static struct reset_clk gcc_qusb2_phy_reset = { .reset_reg = QUSB2_PHY_BCR, .base = &virt_base, .c = { .dbg_name = "gcc_qusb2_phy_reset", .ops = &clk_ops_rst, CLK_INIT(gcc_qusb2_phy_reset.c), }, }; static struct reset_clk gcc_usb3_phy_reset = { .reset_reg = USB3_PHY_BCR, .base = &virt_base, .c = { .dbg_name = "gcc_usb3_phy_reset", .ops = &clk_ops_rst, CLK_INIT(gcc_usb3_phy_reset.c), }, }; static struct gate_clk gpll0_out_mmsscc = { .en_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(26), .delay_us = 1, .base = &virt_base, .c = { .parent = &gpll0_out_main.c, .dbg_name = "gpll0_out_mmsscc", .ops = &clk_ops_gate, CLK_INIT(gpll0_out_mmsscc.c), }, }; static struct gate_clk gpll0_out_msscc = { .en_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(27), .delay_us = 1, .base = &virt_base, .c = { .dbg_name = "gpll0_out_msscc", .ops = &clk_ops_gate, CLK_INIT(gpll0_out_msscc.c), }, }; static struct gate_clk pcie_0_phy_ldo = { .en_reg = PCIE_0_PHY_LDO_EN, .en_mask = BIT(0), .base = &virt_base, .c = { .dbg_name = "pcie_0_phy_ldo", .ops = &clk_ops_gate, CLK_INIT(pcie_0_phy_ldo.c), }, }; static struct gate_clk pcie_1_phy_ldo = { .en_reg = PCIE_1_PHY_LDO_EN, .en_mask = BIT(0), .base = &virt_base, .c = { .dbg_name = "pcie_1_phy_ldo", .ops = &clk_ops_gate, CLK_INIT(pcie_1_phy_ldo.c), }, }; static struct gate_clk ufs_phy_ldo = { .en_reg = UFS_PHY_LDO_EN, .en_mask = BIT(0), .base = &virt_base, .c = { .dbg_name = "ufs_phy_ldo", .ops = &clk_ops_gate, CLK_INIT(ufs_phy_ldo.c), }, }; static struct gate_clk usb_ss_phy_ldo = { .en_reg = USB_SS_PHY_LDO_EN, .en_mask = BIT(0), .base = &virt_base, .c = { .dbg_name = "usb_ss_phy_ldo", .ops = &clk_ops_gate, CLK_INIT(usb_ss_phy_ldo.c), }, }; static struct local_vote_clk gcc_bam_dma_ahb_clk = { .cbcr_reg = BAM_DMA_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(12), .base = &virt_base, .c = { .dbg_name = "gcc_bam_dma_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_bam_dma_ahb_clk.c), }, }; static struct local_vote_clk gcc_blsp1_ahb_clk = { .cbcr_reg = BLSP1_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(17), .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_blsp1_ahb_clk.c), }, }; static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP1_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup1_i2c_apps_clk", .parent = &blsp1_qup1_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = { .cbcr_reg = BLSP1_QUP1_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup1_spi_apps_clk", .parent = &blsp1_qup1_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP2_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup2_i2c_apps_clk", .parent = &blsp1_qup2_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = { .cbcr_reg = BLSP1_QUP2_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup2_spi_apps_clk", .parent = &blsp1_qup2_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP3_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup3_i2c_apps_clk", .parent = &blsp1_qup3_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = { .cbcr_reg = BLSP1_QUP3_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup3_spi_apps_clk", .parent = &blsp1_qup3_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP4_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup4_i2c_apps_clk", .parent = &blsp1_qup4_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = { .cbcr_reg = BLSP1_QUP4_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup4_spi_apps_clk", .parent = &blsp1_qup4_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP5_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup5_i2c_apps_clk", .parent = &blsp1_qup5_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = { .cbcr_reg = BLSP1_QUP5_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup5_spi_apps_clk", .parent = &blsp1_qup5_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP6_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup6_i2c_apps_clk", .parent = &blsp1_qup6_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = { .cbcr_reg = BLSP1_QUP6_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_qup6_spi_apps_clk", .parent = &blsp1_qup6_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart1_apps_clk = { .cbcr_reg = BLSP1_UART1_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_uart1_apps_clk", .parent = &blsp1_uart1_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart1_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart2_apps_clk = { .cbcr_reg = BLSP1_UART2_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_uart2_apps_clk", .parent = &blsp1_uart2_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart2_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart3_apps_clk = { .cbcr_reg = BLSP1_UART3_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_uart3_apps_clk", .parent = &blsp1_uart3_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart3_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart4_apps_clk = { .cbcr_reg = BLSP1_UART4_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_uart4_apps_clk", .parent = &blsp1_uart4_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart4_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart5_apps_clk = { .cbcr_reg = BLSP1_UART5_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_uart5_apps_clk", .parent = &blsp1_uart5_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart5_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart6_apps_clk = { .cbcr_reg = BLSP1_UART6_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp1_uart6_apps_clk", .parent = &blsp1_uart6_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart6_apps_clk.c), }, }; static struct local_vote_clk gcc_blsp2_ahb_clk = { .cbcr_reg = BLSP2_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(15), .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_blsp2_ahb_clk.c), }, }; static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP1_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup1_i2c_apps_clk", .parent = &blsp2_qup1_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = { .cbcr_reg = BLSP2_QUP1_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup1_spi_apps_clk", .parent = &blsp2_qup1_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP2_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup2_i2c_apps_clk", .parent = &blsp2_qup2_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = { .cbcr_reg = BLSP2_QUP2_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup2_spi_apps_clk", .parent = &blsp2_qup2_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP3_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup3_i2c_apps_clk", .parent = &blsp2_qup3_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = { .cbcr_reg = BLSP2_QUP3_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup3_spi_apps_clk", .parent = &blsp2_qup3_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP4_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup4_i2c_apps_clk", .parent = &blsp2_qup4_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = { .cbcr_reg = BLSP2_QUP4_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup4_spi_apps_clk", .parent = &blsp2_qup4_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup5_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP5_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup5_i2c_apps_clk", .parent = &blsp2_qup5_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup5_spi_apps_clk = { .cbcr_reg = BLSP2_QUP5_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup5_spi_apps_clk", .parent = &blsp2_qup5_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup6_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP6_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup6_i2c_apps_clk", .parent = &blsp2_qup6_i2c_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup6_spi_apps_clk = { .cbcr_reg = BLSP2_QUP6_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_qup6_spi_apps_clk", .parent = &blsp2_qup6_spi_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart1_apps_clk = { .cbcr_reg = BLSP2_UART1_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_uart1_apps_clk", .parent = &blsp2_uart1_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart1_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart2_apps_clk = { .cbcr_reg = BLSP2_UART2_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_uart2_apps_clk", .parent = &blsp2_uart2_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart2_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart3_apps_clk = { .cbcr_reg = BLSP2_UART3_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_uart3_apps_clk", .parent = &blsp2_uart3_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart3_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart4_apps_clk = { .cbcr_reg = BLSP2_UART4_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_uart4_apps_clk", .parent = &blsp2_uart4_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart4_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart5_apps_clk = { .cbcr_reg = BLSP2_UART5_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_uart5_apps_clk", .parent = &blsp2_uart5_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart5_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart6_apps_clk = { .cbcr_reg = BLSP2_UART6_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_blsp2_uart6_apps_clk", .parent = &blsp2_uart6_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart6_apps_clk.c), }, }; static struct local_vote_clk gcc_boot_rom_ahb_clk = { .cbcr_reg = BOOT_ROM_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(10), .base = &virt_base, .c = { .dbg_name = "gcc_boot_rom_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_boot_rom_ahb_clk.c), }, }; static struct branch_clk gcc_gp1_clk = { .cbcr_reg = GP1_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_gp1_clk", .parent = &gp1_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_gp1_clk.c), }, }; static struct branch_clk gcc_gp2_clk = { .cbcr_reg = GP2_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_gp2_clk", .parent = &gp2_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_gp2_clk.c), }, }; static struct branch_clk gcc_gp3_clk = { .cbcr_reg = GP3_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_gp3_clk", .parent = &gp3_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_gp3_clk.c), }, }; static struct branch_clk gcc_lpass_q6_axi_clk = { .cbcr_reg = LPASS_Q6_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_lpass_q6_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_lpass_q6_axi_clk.c), }, }; static struct branch_clk gcc_mss_q6_bimc_axi_clk = { .cbcr_reg = MSS_Q6_BIMC_AXI_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_mss_q6_bimc_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_mss_q6_bimc_axi_clk.c), }, }; static struct branch_clk gcc_pcie_0_aux_clk = { .cbcr_reg = PCIE_0_AUX_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_0_aux_clk", .parent = &pcie_0_aux_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_aux_clk.c), }, }; static struct branch_clk gcc_pcie_0_cfg_ahb_clk = { .cbcr_reg = PCIE_0_CFG_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_0_cfg_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_cfg_ahb_clk.c), }, }; static struct branch_clk gcc_pcie_0_mstr_axi_clk = { .cbcr_reg = PCIE_0_MSTR_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_0_mstr_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_mstr_axi_clk.c), }, }; static struct branch_clk gcc_pcie_0_pipe_clk = { .cbcr_reg = PCIE_0_PIPE_CBCR, .bcr_reg = PCIE_PHY_0_PHY_BCR, .has_sibling = 0, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_pcie_0_pipe_clk", .parent = &pcie_0_pipe_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_pipe_clk.c), }, }; static struct branch_clk gcc_pcie_0_slv_axi_clk = { .cbcr_reg = PCIE_0_SLV_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_0_slv_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_slv_axi_clk.c), }, }; static struct branch_clk gcc_pcie_1_aux_clk = { .cbcr_reg = PCIE_1_AUX_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_1_aux_clk", .parent = &pcie_1_aux_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_aux_clk.c), }, }; static struct branch_clk gcc_pcie_1_cfg_ahb_clk = { .cbcr_reg = PCIE_1_CFG_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_1_cfg_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_cfg_ahb_clk.c), }, }; static struct branch_clk gcc_pcie_1_mstr_axi_clk = { .cbcr_reg = PCIE_1_MSTR_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_1_mstr_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_mstr_axi_clk.c), }, }; static struct branch_clk gcc_pcie_1_pipe_clk = { .cbcr_reg = PCIE_1_PIPE_CBCR, .bcr_reg = PCIE_PHY_1_PHY_BCR, .has_sibling = 0, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_pcie_1_pipe_clk", .parent = &pcie_1_pipe_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_pipe_clk.c), }, }; static struct branch_clk gcc_pcie_1_slv_axi_clk = { .cbcr_reg = PCIE_1_SLV_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pcie_1_slv_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_slv_axi_clk.c), }, }; static struct branch_clk gcc_pdm2_clk = { .cbcr_reg = PDM2_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_pdm2_clk", .parent = &pdm2_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_pdm2_clk.c), }, }; static struct branch_clk gcc_pdm_ahb_clk = { .cbcr_reg = PDM_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_pdm_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pdm_ahb_clk.c), }, }; static struct local_vote_clk gcc_prng_ahb_clk = { .cbcr_reg = PRNG_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(13), .base = &virt_base, .c = { .dbg_name = "gcc_prng_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_prng_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc1_ahb_clk = { .cbcr_reg = SDCC1_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc1_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc1_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc1_apps_clk = { .cbcr_reg = SDCC1_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc1_apps_clk", .parent = &sdcc1_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc1_apps_clk.c), }, }; static struct branch_clk gcc_sdcc2_ahb_clk = { .cbcr_reg = SDCC2_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc2_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc2_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc2_apps_clk = { .cbcr_reg = SDCC2_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc2_apps_clk", .parent = &sdcc2_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc2_apps_clk.c), }, }; static struct branch_clk gcc_sdcc3_ahb_clk = { .cbcr_reg = SDCC3_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc3_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc3_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc3_apps_clk = { .cbcr_reg = SDCC3_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc3_apps_clk", .parent = &sdcc3_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc3_apps_clk.c), }, }; static struct branch_clk gcc_sdcc4_ahb_clk = { .cbcr_reg = SDCC4_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc4_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc4_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc4_apps_clk = { .cbcr_reg = SDCC4_APPS_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_sdcc4_apps_clk", .parent = &sdcc4_apps_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc4_apps_clk.c), }, }; static struct branch_clk gcc_sys_noc_ufs_axi_clk = { .cbcr_reg = SYS_NOC_UFS_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_sys_noc_ufs_axi_clk", .parent = &ufs_axi_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_sys_noc_ufs_axi_clk.c), }, }; static struct branch_clk gcc_sys_noc_usb3_axi_clk = { .cbcr_reg = SYS_NOC_USB3_AXI_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_sys_noc_usb3_axi_clk", .parent = &usb30_master_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_sys_noc_usb3_axi_clk.c), }, }; static struct branch_clk gcc_tsif_ahb_clk = { .cbcr_reg = TSIF_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_tsif_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_tsif_ahb_clk.c), }, }; static struct branch_clk gcc_tsif_ref_clk = { .cbcr_reg = TSIF_REF_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_tsif_ref_clk", .parent = &tsif_ref_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_tsif_ref_clk.c), }, }; static struct branch_clk gcc_ufs_ahb_clk = { .cbcr_reg = UFS_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_ufs_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ufs_ahb_clk.c), }, }; static struct branch_clk gcc_ufs_axi_clk = { .cbcr_reg = UFS_AXI_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_ufs_axi_clk", .parent = &ufs_axi_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_ufs_axi_clk.c), }, }; static struct branch_clk gcc_ufs_rx_cfg_clk = { .cbcr_reg = UFS_RX_CFG_CBCR, .has_sibling = 1, .max_div = 16, .base = &virt_base, .c = { .dbg_name = "gcc_ufs_rx_cfg_clk", .parent = &ufs_axi_clk_src.c, .ops = &clk_ops_branch, .rate = 1, CLK_INIT(gcc_ufs_rx_cfg_clk.c), }, }; static struct branch_clk gcc_ufs_rx_symbol_0_clk = { .cbcr_reg = UFS_RX_SYMBOL_0_CBCR, .has_sibling = 1, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_ufs_rx_symbol_0_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ufs_rx_symbol_0_clk.c), }, }; static struct branch_clk gcc_ufs_rx_symbol_1_clk = { .cbcr_reg = UFS_RX_SYMBOL_1_CBCR, .has_sibling = 1, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_ufs_rx_symbol_1_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ufs_rx_symbol_1_clk.c), }, }; static struct branch_clk gcc_ufs_tx_cfg_clk = { .cbcr_reg = UFS_TX_CFG_CBCR, .has_sibling = 1, .max_div = 16, .base = &virt_base, .c = { .dbg_name = "gcc_ufs_tx_cfg_clk", .parent = &ufs_axi_clk_src.c, .ops = &clk_ops_branch, .rate = 1, CLK_INIT(gcc_ufs_tx_cfg_clk.c), }, }; static struct branch_clk gcc_ufs_tx_symbol_0_clk = { .cbcr_reg = UFS_TX_SYMBOL_0_CBCR, .has_sibling = 1, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_ufs_tx_symbol_0_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ufs_tx_symbol_0_clk.c), }, }; static struct branch_clk gcc_ufs_tx_symbol_1_clk = { .cbcr_reg = UFS_TX_SYMBOL_1_CBCR, .has_sibling = 1, .base = &virt_base, .halt_check = DELAY, .c = { .dbg_name = "gcc_ufs_tx_symbol_1_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ufs_tx_symbol_1_clk.c), }, }; static struct branch_clk gcc_usb2_hs_phy_sleep_clk = { .cbcr_reg = USB2_HS_PHY_SLEEP_CBCR, .bcr_reg = USB2_HS_PHY_ONLY_BCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_usb2_hs_phy_sleep_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb2_hs_phy_sleep_clk.c), }, }; static struct branch_clk gcc_usb30_master_clk = { .cbcr_reg = USB30_MASTER_CBCR, .bcr_reg = USB_30_BCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_usb30_master_clk", .parent = &usb30_master_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_usb30_master_clk.c), .depends = &gcc_sys_noc_usb3_axi_clk.c, }, }; static struct branch_clk gcc_usb30_mock_utmi_clk = { .cbcr_reg = USB30_MOCK_UTMI_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_usb30_mock_utmi_clk", .parent = &usb30_mock_utmi_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_usb30_mock_utmi_clk.c), }, }; static struct branch_clk gcc_usb30_sleep_clk = { .cbcr_reg = USB30_SLEEP_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_usb30_sleep_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb30_sleep_clk.c), }, }; static struct branch_clk gcc_usb3_phy_aux_clk = { .cbcr_reg = USB3_PHY_AUX_CBCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_usb3_phy_aux_clk", .parent = &usb3_phy_aux_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_usb3_phy_aux_clk.c), }, }; static struct gate_clk gcc_usb3_phy_pipe_clk = { .en_reg = USB3_PHY_PIPE_CBCR, .en_mask = BIT(0), .delay_us = 50, .base = &virt_base, .c = { .dbg_name = "gcc_usb3_phy_pipe_clk", .ops = &clk_ops_gate, CLK_INIT(gcc_usb3_phy_pipe_clk.c), }, }; static struct reset_clk gcc_usb3phy_phy_reset = { .reset_reg = USB3PHY_PHY_BCR, .base = &virt_base, .c = { .dbg_name = "gcc_usb3phy_phy_reset", .ops = &clk_ops_rst, CLK_INIT(gcc_usb3phy_phy_reset.c), }, }; static struct branch_clk gcc_usb_hs_ahb_clk = { .cbcr_reg = USB_HS_AHB_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_usb_hs_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb_hs_ahb_clk.c), }, }; static struct branch_clk gcc_usb_hs_system_clk = { .cbcr_reg = USB_HS_SYSTEM_CBCR, .bcr_reg = USB_HS_BCR, .has_sibling = 0, .base = &virt_base, .c = { .dbg_name = "gcc_usb_hs_system_clk", .parent = &usb_hs_system_clk_src.c, .ops = &clk_ops_branch, CLK_INIT(gcc_usb_hs_system_clk.c), }, }; static struct branch_clk gcc_usb_phy_cfg_ahb2phy_clk = { .cbcr_reg = USB_PHY_CFG_AHB2PHY_CBCR, .has_sibling = 1, .base = &virt_base, .c = { .dbg_name = "gcc_usb_phy_cfg_ahb2phy_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb_phy_cfg_ahb2phy_clk.c), }, }; static struct mux_clk gcc_debug_mux; static struct clk_ops clk_ops_debug_mux; static struct clk_mux_ops gcc_debug_mux_ops; static struct measure_clk_data debug_mux_priv = { .cxo = &gcc_xo.c, .plltest_reg = PLLTEST_PAD_CFG, .plltest_val = 0x51A00, .xo_div4_cbcr = GCC_XO_DIV4_CBCR, .ctl_reg = CLOCK_FRQ_MEASURE_CTL, .status_reg = CLOCK_FRQ_MEASURE_STATUS, .base = &virt_base, }; static int gcc_set_mux_sel(struct mux_clk *clk, int sel) { u32 regval; /* Zero out CDIV bits in top level debug mux register */ regval = readl_relaxed(GCC_REG_BASE(GCC_DEBUG_CLK_CTL)); regval &= ~BM(15, 12); writel_relaxed(regval, GCC_REG_BASE(GCC_DEBUG_CLK_CTL)); /* * RPM clocks use the same GCC debug mux. Don't reprogram * the mux (selection) register. */ if (sel == 0xFFFF) return 0; mux_reg_ops.set_mux_sel(clk, sel); return 0; } static struct mux_clk gcc_debug_mux = { .priv = &debug_mux_priv, .ops = &gcc_debug_mux_ops, .en_mask = BIT(16), .mask = 0x3FF, .base = &virt_dbgbase, MUX_REC_SRC_LIST( &debug_mmss_clk.c, &debug_rpm_clk.c, &debug_cpu_clk.c, ), MUX_SRC_LIST( { &debug_cpu_clk.c, 0x016A }, { &debug_mmss_clk.c, 0x002b }, { &debug_rpm_clk.c, 0xffff }, { &gcc_sys_noc_usb3_axi_clk.c, 0x0006 }, { &gcc_mss_q6_bimc_axi_clk.c, 0x0031 }, { &gcc_usb30_master_clk.c, 0x0050 }, { &gcc_usb30_sleep_clk.c, 0x0051 }, { &gcc_usb30_mock_utmi_clk.c, 0x0052 }, { &gcc_usb3_phy_aux_clk.c, 0x0053 }, { &gcc_usb3_phy_pipe_clk.c, 0x0054 }, { &gcc_sys_noc_ufs_axi_clk.c, 0x0058 }, { &gcc_usb_hs_system_clk.c, 0x0060 }, { &gcc_usb_hs_ahb_clk.c, 0x0061 }, { &gcc_usb2_hs_phy_sleep_clk.c, 0x0063 }, { &gcc_usb_phy_cfg_ahb2phy_clk.c, 0x0064 }, { &gcc_sdcc1_apps_clk.c, 0x0068 }, { &gcc_sdcc1_ahb_clk.c, 0x0069 }, { &gcc_sdcc2_apps_clk.c, 0x0070 }, { &gcc_sdcc2_ahb_clk.c, 0x0071 }, { &gcc_sdcc3_apps_clk.c, 0x0078 }, { &gcc_sdcc3_ahb_clk.c, 0x0079 }, { &gcc_sdcc4_apps_clk.c, 0x0080 }, { &gcc_sdcc4_ahb_clk.c, 0x0081 }, { &gcc_blsp1_ahb_clk.c, 0x0088 }, { &gcc_blsp1_qup1_spi_apps_clk.c, 0x008a }, { &gcc_blsp1_qup1_i2c_apps_clk.c, 0x008b }, { &gcc_blsp1_uart1_apps_clk.c, 0x008c }, { &gcc_blsp1_qup2_spi_apps_clk.c, 0x008e }, { &gcc_blsp1_qup2_i2c_apps_clk.c, 0x0090 }, { &gcc_blsp1_uart2_apps_clk.c, 0x0091 }, { &gcc_blsp1_qup3_spi_apps_clk.c, 0x0093 }, { &gcc_blsp1_qup3_i2c_apps_clk.c, 0x0094 }, { &gcc_blsp1_uart3_apps_clk.c, 0x0095 }, { &gcc_blsp1_qup4_spi_apps_clk.c, 0x0098 }, { &gcc_blsp1_qup4_i2c_apps_clk.c, 0x0099 }, { &gcc_blsp1_uart4_apps_clk.c, 0x009a }, { &gcc_blsp1_qup5_spi_apps_clk.c, 0x009c }, { &gcc_blsp1_qup5_i2c_apps_clk.c, 0x009d }, { &gcc_blsp1_uart5_apps_clk.c, 0x009e }, { &gcc_blsp1_qup6_spi_apps_clk.c, 0x00a1 }, { &gcc_blsp1_qup6_i2c_apps_clk.c, 0x00a2 }, { &gcc_blsp1_uart6_apps_clk.c, 0x00a3 }, { &gcc_blsp2_ahb_clk.c, 0x00a8 }, { &gcc_blsp2_qup1_spi_apps_clk.c, 0x00aa }, { &gcc_blsp2_qup1_i2c_apps_clk.c, 0x00ab }, { &gcc_blsp2_uart1_apps_clk.c, 0x00ac }, { &gcc_blsp2_qup2_spi_apps_clk.c, 0x00ae }, { &gcc_blsp2_qup2_i2c_apps_clk.c, 0x00b0 }, { &gcc_blsp2_uart2_apps_clk.c, 0x00b1 }, { &gcc_blsp2_qup3_spi_apps_clk.c, 0x00b3 }, { &gcc_blsp2_qup3_i2c_apps_clk.c, 0x00b4 }, { &gcc_blsp2_uart3_apps_clk.c, 0x00b5 }, { &gcc_blsp2_qup4_spi_apps_clk.c, 0x00b8 }, { &gcc_blsp2_qup4_i2c_apps_clk.c, 0x00b9 }, { &gcc_blsp2_uart4_apps_clk.c, 0x00ba }, { &gcc_blsp2_qup5_spi_apps_clk.c, 0x00bc }, { &gcc_blsp2_qup5_i2c_apps_clk.c, 0x00bd }, { &gcc_blsp2_uart5_apps_clk.c, 0x00be }, { &gcc_blsp2_qup6_spi_apps_clk.c, 0x00c1 }, { &gcc_blsp2_qup6_i2c_apps_clk.c, 0x00c2 }, { &gcc_blsp2_uart6_apps_clk.c, 0x00c3 }, { &gcc_pdm_ahb_clk.c, 0x00d0 }, { &gcc_pdm2_clk.c, 0x00d2 }, { &gcc_prng_ahb_clk.c, 0x00d8 }, { &gcc_bam_dma_ahb_clk.c, 0x00e0 }, { &gcc_tsif_ahb_clk.c, 0x00e8 }, { &gcc_tsif_ref_clk.c, 0x00e9 }, { &gcc_boot_rom_ahb_clk.c, 0x00f8 }, { &gcc_lpass_q6_axi_clk.c, 0x0160 }, { &gcc_pcie_0_slv_axi_clk.c, 0x01e8 }, { &gcc_pcie_0_mstr_axi_clk.c, 0x01e9 }, { &gcc_pcie_0_cfg_ahb_clk.c, 0x01ea }, { &gcc_pcie_0_aux_clk.c, 0x01eb }, { &gcc_pcie_0_pipe_clk.c, 0x01ec }, { &gcc_pcie_1_slv_axi_clk.c, 0x01f0 }, { &gcc_pcie_1_mstr_axi_clk.c, 0x01f1 }, { &gcc_pcie_1_cfg_ahb_clk.c, 0x01f2 }, { &gcc_pcie_1_aux_clk.c, 0x01f3 }, { &gcc_pcie_1_pipe_clk.c, 0x01f4 }, { &gcc_ufs_axi_clk.c, 0x0230 }, { &gcc_ufs_ahb_clk.c, 0x0231 }, { &gcc_ufs_tx_cfg_clk.c, 0x0232 }, { &gcc_ufs_rx_cfg_clk.c, 0x0233 }, { &gcc_ufs_tx_symbol_0_clk.c, 0x0234 }, { &gcc_ufs_tx_symbol_1_clk.c, 0x0235 }, { &gcc_ufs_rx_symbol_0_clk.c, 0x0236 }, { &gcc_ufs_rx_symbol_1_clk.c, 0x0237 }, ), .c = { .dbg_name = "gcc_debug_mux", .ops = &clk_ops_debug_mux, .flags = CLKFLAG_NO_RATE_CACHE | CLKFLAG_MEASURE, CLK_INIT(gcc_debug_mux.c), }, }; static struct clk_lookup gcc_clocks_8994_v1[] = { CLK_LIST(gcc_bam_dma_ahb_clk), }; static struct clk_lookup gcc_clocks_8994_common[] = { CLK_LIST(gcc_xo), CLK_LIST(gcc_xo_a_clk), CLK_LIST(gpll0), CLK_LIST(gpll0_ao), CLK_LIST(gpll0_out_main), CLK_LIST(gpll4), CLK_LIST(gpll4_out_main), CLK_LIST(ufs_axi_clk_src), CLK_LIST(usb30_master_clk_src), CLK_LIST(blsp1_qup1_i2c_apps_clk_src), CLK_LIST(blsp1_qup1_spi_apps_clk_src), CLK_LIST(blsp1_qup2_i2c_apps_clk_src), CLK_LIST(blsp1_qup2_spi_apps_clk_src), CLK_LIST(blsp1_qup3_i2c_apps_clk_src), CLK_LIST(blsp1_qup3_spi_apps_clk_src), CLK_LIST(blsp1_qup4_i2c_apps_clk_src), CLK_LIST(blsp1_qup4_spi_apps_clk_src), CLK_LIST(blsp1_qup5_i2c_apps_clk_src), CLK_LIST(blsp1_qup5_spi_apps_clk_src), CLK_LIST(blsp1_qup6_i2c_apps_clk_src), CLK_LIST(blsp1_qup6_spi_apps_clk_src), CLK_LIST(blsp1_uart1_apps_clk_src), CLK_LIST(blsp1_uart2_apps_clk_src), CLK_LIST(blsp1_uart3_apps_clk_src), CLK_LIST(blsp1_uart4_apps_clk_src), CLK_LIST(blsp1_uart5_apps_clk_src), CLK_LIST(blsp1_uart6_apps_clk_src), CLK_LIST(blsp2_qup1_i2c_apps_clk_src), CLK_LIST(blsp2_qup1_spi_apps_clk_src), CLK_LIST(blsp2_qup2_i2c_apps_clk_src), CLK_LIST(blsp2_qup2_spi_apps_clk_src), CLK_LIST(blsp2_qup3_i2c_apps_clk_src), CLK_LIST(blsp2_qup3_spi_apps_clk_src), CLK_LIST(blsp2_qup4_i2c_apps_clk_src), CLK_LIST(blsp2_qup4_spi_apps_clk_src), CLK_LIST(blsp2_qup5_i2c_apps_clk_src), CLK_LIST(blsp2_qup5_spi_apps_clk_src), CLK_LIST(blsp2_qup6_i2c_apps_clk_src), CLK_LIST(blsp2_qup6_spi_apps_clk_src), CLK_LIST(blsp2_uart1_apps_clk_src), CLK_LIST(blsp2_uart2_apps_clk_src), CLK_LIST(blsp2_uart3_apps_clk_src), CLK_LIST(blsp2_uart4_apps_clk_src), CLK_LIST(blsp2_uart5_apps_clk_src), CLK_LIST(blsp2_uart6_apps_clk_src), CLK_LIST(gp1_clk_src), CLK_LIST(gp2_clk_src), CLK_LIST(gp3_clk_src), CLK_LIST(pcie_0_aux_clk_src), CLK_LIST(pcie_0_pipe_clk_src), CLK_LIST(pcie_1_aux_clk_src), CLK_LIST(pcie_1_pipe_clk_src), CLK_LIST(pdm2_clk_src), CLK_LIST(sdcc1_apps_clk_src), CLK_LIST(sdcc2_apps_clk_src), CLK_LIST(sdcc3_apps_clk_src), CLK_LIST(sdcc4_apps_clk_src), CLK_LIST(tsif_ref_clk_src), CLK_LIST(usb30_mock_utmi_clk_src), CLK_LIST(usb3_phy_aux_clk_src), CLK_LIST(usb_hs_system_clk_src), CLK_LIST(gcc_pcie_phy_0_reset), CLK_LIST(gcc_pcie_phy_1_reset), CLK_LIST(gcc_qusb2_phy_reset), CLK_LIST(gcc_usb3_phy_reset), CLK_LIST(gpll0_out_mmsscc), CLK_LIST(gpll0_out_msscc), CLK_LIST(pcie_0_phy_ldo), CLK_LIST(pcie_1_phy_ldo), CLK_LIST(ufs_phy_ldo), CLK_LIST(usb_ss_phy_ldo), CLK_LIST(gcc_blsp1_ahb_clk), CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk), CLK_LIST(gcc_blsp1_qup1_spi_apps_clk), CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk), CLK_LIST(gcc_blsp1_qup2_spi_apps_clk), CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk), CLK_LIST(gcc_blsp1_qup3_spi_apps_clk), CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk), CLK_LIST(gcc_blsp1_qup4_spi_apps_clk), CLK_LIST(gcc_blsp1_qup5_i2c_apps_clk), CLK_LIST(gcc_blsp1_qup5_spi_apps_clk), CLK_LIST(gcc_blsp1_qup6_i2c_apps_clk), CLK_LIST(gcc_blsp1_qup6_spi_apps_clk), CLK_LIST(gcc_blsp1_uart1_apps_clk), CLK_LIST(gcc_blsp1_uart2_apps_clk), CLK_LIST(gcc_blsp1_uart3_apps_clk), CLK_LIST(gcc_blsp1_uart4_apps_clk), CLK_LIST(gcc_blsp1_uart5_apps_clk), CLK_LIST(gcc_blsp1_uart6_apps_clk), CLK_LIST(gcc_blsp2_ahb_clk), CLK_LIST(gcc_blsp2_qup1_i2c_apps_clk), CLK_LIST(gcc_blsp2_qup1_spi_apps_clk), CLK_LIST(gcc_blsp2_qup2_i2c_apps_clk), CLK_LIST(gcc_blsp2_qup2_spi_apps_clk), CLK_LIST(gcc_blsp2_qup3_i2c_apps_clk), CLK_LIST(gcc_blsp2_qup3_spi_apps_clk), CLK_LIST(gcc_blsp2_qup4_i2c_apps_clk), CLK_LIST(gcc_blsp2_qup4_spi_apps_clk), CLK_LIST(gcc_blsp2_qup5_i2c_apps_clk), CLK_LIST(gcc_blsp2_qup5_spi_apps_clk), CLK_LIST(gcc_blsp2_qup6_i2c_apps_clk), CLK_LIST(gcc_blsp2_qup6_spi_apps_clk), CLK_LIST(gcc_blsp2_uart1_apps_clk), CLK_LIST(gcc_blsp2_uart2_apps_clk), CLK_LIST(gcc_blsp2_uart3_apps_clk), CLK_LIST(gcc_blsp2_uart4_apps_clk), CLK_LIST(gcc_blsp2_uart5_apps_clk), CLK_LIST(gcc_blsp2_uart6_apps_clk), CLK_LIST(gcc_boot_rom_ahb_clk), CLK_LIST(gcc_gp1_clk), CLK_LIST(gcc_gp2_clk), CLK_LIST(gcc_gp3_clk), CLK_LIST(gcc_lpass_q6_axi_clk), CLK_LIST(gcc_mss_q6_bimc_axi_clk), CLK_LIST(gcc_pcie_0_aux_clk), CLK_LIST(gcc_pcie_0_cfg_ahb_clk), CLK_LIST(gcc_pcie_0_mstr_axi_clk), CLK_LIST(gcc_pcie_0_pipe_clk), CLK_LIST(gcc_pcie_0_slv_axi_clk), CLK_LIST(gcc_pcie_1_aux_clk), CLK_LIST(gcc_pcie_1_cfg_ahb_clk), CLK_LIST(gcc_pcie_1_mstr_axi_clk), CLK_LIST(gcc_pcie_1_pipe_clk), CLK_LIST(gcc_pcie_1_slv_axi_clk), CLK_LIST(gcc_pdm2_clk), CLK_LIST(gcc_pdm_ahb_clk), CLK_LIST(gcc_prng_ahb_clk), CLK_LIST(gcc_sdcc1_ahb_clk), CLK_LIST(gcc_sdcc1_apps_clk), CLK_LIST(gcc_sdcc2_ahb_clk), CLK_LIST(gcc_sdcc2_apps_clk), CLK_LIST(gcc_sdcc3_ahb_clk), CLK_LIST(gcc_sdcc3_apps_clk), CLK_LIST(gcc_sdcc4_ahb_clk), CLK_LIST(gcc_sdcc4_apps_clk), CLK_LIST(gcc_sys_noc_ufs_axi_clk), CLK_LIST(gcc_sys_noc_usb3_axi_clk), CLK_LIST(gcc_tsif_ahb_clk), CLK_LIST(gcc_tsif_ref_clk), CLK_LIST(gcc_ufs_ahb_clk), CLK_LIST(gcc_ufs_axi_clk), CLK_LIST(gcc_ufs_rx_cfg_clk), CLK_LIST(gcc_ufs_rx_symbol_0_clk), CLK_LIST(gcc_ufs_rx_symbol_1_clk), CLK_LIST(gcc_ufs_tx_cfg_clk), CLK_LIST(gcc_ufs_tx_symbol_0_clk), CLK_LIST(gcc_ufs_tx_symbol_1_clk), CLK_LIST(gcc_usb2_hs_phy_sleep_clk), CLK_LIST(gcc_usb30_master_clk), CLK_LIST(gcc_usb30_mock_utmi_clk), CLK_LIST(gcc_usb30_sleep_clk), CLK_LIST(gcc_usb3_phy_aux_clk), CLK_LIST(gcc_usb3_phy_pipe_clk), CLK_LIST(gcc_usb3phy_phy_reset), CLK_LIST(gcc_usb_hs_ahb_clk), CLK_LIST(gcc_usb_hs_system_clk), CLK_LIST(gcc_usb_phy_cfg_ahb2phy_clk), }; static void msm_gcc_8994v2_fixup(void) { ufs_axi_clk_src.freq_tbl = ftbl_ufs_axi_clk_src_v2; ufs_axi_clk_src.c.fmax[VDD_DIG_NOMINAL] = 200000000; ufs_axi_clk_src.c.fmax[VDD_DIG_HIGH] = 240000000; blsp1_qup1_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp1_qup2_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp1_qup3_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp1_qup4_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp1_qup5_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp1_qup6_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp2_qup1_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp2_qup2_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp2_qup3_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp2_qup4_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp2_qup5_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp2_qup6_spi_apps_clk_src.freq_tbl = ftbl_blspqup_spi_apps_clk_src_v2; blsp1_qup1_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp1_qup2_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp1_qup3_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp1_qup4_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp1_qup5_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp1_qup6_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp2_qup1_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp2_qup2_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp2_qup3_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp2_qup4_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp2_qup5_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp2_qup6_spi_apps_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000; blsp1_qup1_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp1_qup2_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp1_qup3_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp1_qup4_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp1_qup5_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp1_qup6_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp2_qup1_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp2_qup2_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp2_qup3_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp2_qup4_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp2_qup5_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; blsp2_qup6_spi_apps_clk_src.c.fmax[VDD_DIG_HIGH] = 0; } static int msm_gcc_8994_probe(struct platform_device *pdev) { struct resource *res; struct clk *tmp_clk; int ret; const char *compat = NULL; int compatlen = 0; bool is_v2 = false; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base"); if (!res) { dev_err(&pdev->dev, "Failed to get CC base.\n"); return -EINVAL; } virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!virt_base) { dev_err(&pdev->dev, "Failed to map in CC registers.\n"); return -ENOMEM; } vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig"); if (IS_ERR(vdd_dig.regulator[0])) { if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER)) dev_err(&pdev->dev, "Unable to get vdd_dig regulator!"); return PTR_ERR(vdd_dig.regulator[0]); } tmp_clk = gcc_xo.c.parent = devm_clk_get(&pdev->dev, "xo"); if (IS_ERR(tmp_clk)) { if (!(PTR_ERR(tmp_clk) == -EPROBE_DEFER)) dev_err(&pdev->dev, "Unable to get xo clock!"); return PTR_ERR(tmp_clk); } tmp_clk = gcc_xo_a_clk.c.parent = devm_clk_get(&pdev->dev, "xo_a_clk"); if (IS_ERR(tmp_clk)) { if (!(PTR_ERR(tmp_clk) == -EPROBE_DEFER)) dev_err(&pdev->dev, "Unable to get xo_a_clk clock!"); return PTR_ERR(tmp_clk); } /* Perform revision specific fixes */ compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); if (!compat || (compatlen <= 0)) return -EINVAL; is_v2 = !strcmp(compat, "qcom,gcc-8994v2"); if (is_v2) msm_gcc_8994v2_fixup(); /* register common clock table */ ret = of_msm_clock_register(pdev->dev.of_node, gcc_clocks_8994_common, ARRAY_SIZE(gcc_clocks_8994_common)); if (ret) return ret; if (!is_v2) { /* register v1 specific clocks */ ret = of_msm_clock_register(pdev->dev.of_node, gcc_clocks_8994_v1, ARRAY_SIZE(gcc_clocks_8994_v1)); if (ret) return ret; } dev_info(&pdev->dev, "Registered GCC clocks.\n"); return 0; } static struct of_device_id msm_clock_gcc_match_table[] = { { .compatible = "qcom,gcc-8994" }, { .compatible = "qcom,gcc-8994v2" }, {} }; static struct platform_driver msm_clock_gcc_driver = { .probe = msm_gcc_8994_probe, .driver = { .name = "qcom,gcc-8994", .of_match_table = msm_clock_gcc_match_table, .owner = THIS_MODULE, }, }; int __init msm_gcc_8994_init(void) { return platform_driver_register(&msm_clock_gcc_driver); } arch_initcall(msm_gcc_8994_init); /* ======== Clock Debug Controller ======== */ static struct clk_lookup msm_clocks_measure_8994[] = { CLK_LIST(debug_mmss_clk), CLK_LIST(debug_rpm_clk), CLK_LIST(debug_cpu_clk), CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"), }; static struct of_device_id msm_clock_debug_match_table[] = { { .compatible = "qcom,cc-debug-8994" }, {} }; static int msm_clock_debug_8994_probe(struct platform_device *pdev) { struct resource *res; int ret; clk_ops_debug_mux = clk_ops_gen_mux; clk_ops_debug_mux.get_rate = measure_get_rate; gcc_debug_mux_ops = mux_reg_ops; gcc_debug_mux_ops.set_mux_sel = gcc_set_mux_sel; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base"); if (!res) { dev_err(&pdev->dev, "Failed to get CC base.\n"); return -EINVAL; } virt_dbgbase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!virt_dbgbase) { dev_err(&pdev->dev, "Failed to map in CC registers.\n"); return -ENOMEM; } debug_mmss_clk.dev = &pdev->dev; debug_mmss_clk.clk_id = "debug_mmss_clk"; debug_rpm_clk.dev = &pdev->dev; debug_rpm_clk.clk_id = "debug_rpm_clk"; debug_cpu_clk.dev = &pdev->dev; debug_cpu_clk.clk_id = "debug_cpu_clk"; ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_measure_8994, ARRAY_SIZE(msm_clocks_measure_8994)); if (ret) return ret; dev_info(&pdev->dev, "Registered debug mux.\n"); return ret; } static struct platform_driver msm_clock_debug_driver = { .probe = msm_clock_debug_8994_probe, .driver = { .name = "qcom,cc-debug-8994", .of_match_table = msm_clock_debug_match_table, .owner = THIS_MODULE, }, }; int __init msm_clock_debug_8994_init(void) { return platform_driver_register(&msm_clock_debug_driver); } late_initcall(msm_clock_debug_8994_init);
gpl-2.0
enleightond/linux
drivers/rtc/rtc-bq4802.c
1425
4300
/* rtc-bq4802.c: TI BQ4802 RTC driver. * * Copyright (C) 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/slab.h> MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("TI BQ4802 RTC driver"); MODULE_LICENSE("GPL"); struct bq4802 { void __iomem *regs; unsigned long ioport; struct rtc_device *rtc; spinlock_t lock; struct resource *r; u8 (*read)(struct bq4802 *, int); void (*write)(struct bq4802 *, int, u8); }; static u8 bq4802_read_io(struct bq4802 *p, int off) { return inb(p->ioport + off); } static void bq4802_write_io(struct bq4802 *p, int off, u8 val) { outb(val, p->ioport + off); } static u8 bq4802_read_mem(struct bq4802 *p, int off) { return readb(p->regs + off); } static void bq4802_write_mem(struct bq4802 *p, int off, u8 val) { writeb(val, p->regs + off); } static int bq4802_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct bq4802 *p = platform_get_drvdata(pdev); unsigned long flags; unsigned int century; u8 val; spin_lock_irqsave(&p->lock, flags); val = p->read(p, 0x0e); p->write(p, 0xe, val | 0x08); tm->tm_sec = p->read(p, 0x00); tm->tm_min = p->read(p, 0x02); tm->tm_hour = p->read(p, 0x04); tm->tm_mday = p->read(p, 0x06); tm->tm_mon = p->read(p, 0x09); tm->tm_year = p->read(p, 0x0a); tm->tm_wday = p->read(p, 0x08); century = p->read(p, 0x0f); p->write(p, 0x0e, val); spin_unlock_irqrestore(&p->lock, flags); tm->tm_sec = bcd2bin(tm->tm_sec); tm->tm_min = bcd2bin(tm->tm_min); tm->tm_hour = bcd2bin(tm->tm_hour); tm->tm_mday = bcd2bin(tm->tm_mday); tm->tm_mon = bcd2bin(tm->tm_mon); tm->tm_year = bcd2bin(tm->tm_year); tm->tm_wday = bcd2bin(tm->tm_wday); century = bcd2bin(century); tm->tm_year += (century * 100); tm->tm_year -= 1900; tm->tm_mon--; return 0; } static int bq4802_set_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct bq4802 *p = platform_get_drvdata(pdev); u8 sec, min, hrs, day, mon, yrs, century, val; unsigned long flags; unsigned int year; year = tm->tm_year + 1900; century = year / 100; yrs = year % 100; mon = tm->tm_mon + 1; /* tm_mon starts at zero */ day = tm->tm_mday; hrs = tm->tm_hour; min = tm->tm_min; sec = tm->tm_sec; sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); century = bin2bcd(century); spin_lock_irqsave(&p->lock, flags); val = p->read(p, 0x0e); p->write(p, 0x0e, val | 0x08); p->write(p, 0x00, sec); p->write(p, 0x02, min); p->write(p, 0x04, hrs); p->write(p, 0x06, day); p->write(p, 0x09, mon); p->write(p, 0x0a, yrs); p->write(p, 0x0f, century); p->write(p, 0x0e, val); spin_unlock_irqrestore(&p->lock, flags); return 0; } static const struct rtc_class_ops bq4802_ops = { .read_time = bq4802_read_time, .set_time = bq4802_set_time, }; static int bq4802_probe(struct platform_device *pdev) { struct bq4802 *p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); int err = -ENOMEM; if (!p) goto out; spin_lock_init(&p->lock); p->r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!p->r) { p->r = platform_get_resource(pdev, IORESOURCE_IO, 0); err = -EINVAL; if (!p->r) goto out; } if (p->r->flags & IORESOURCE_IO) { p->ioport = p->r->start; p->read = bq4802_read_io; p->write = bq4802_write_io; } else if (p->r->flags & IORESOURCE_MEM) { p->regs = devm_ioremap(&pdev->dev, p->r->start, resource_size(p->r)); p->read = bq4802_read_mem; p->write = bq4802_write_mem; } else { err = -EINVAL; goto out; } platform_set_drvdata(pdev, p); p->rtc = devm_rtc_device_register(&pdev->dev, "bq4802", &bq4802_ops, THIS_MODULE); if (IS_ERR(p->rtc)) { err = PTR_ERR(p->rtc); goto out; } err = 0; out: return err; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:rtc-bq4802"); static struct platform_driver bq4802_driver = { .driver = { .name = "rtc-bq4802", }, .probe = bq4802_probe, }; module_platform_driver(bq4802_driver);
gpl-2.0
wRieDen/imapx210-nb-linux-kernel
drivers/ata/pata_palmld.c
1681
3531
/* * drivers/ata/pata_palmld.c * * Driver for IDE channel in Palm LifeDrive * * Based on research of: * Alex Osborne <ato@meshy.org> * * Rewrite for mainline: * Marek Vasut <marek.vasut@gmail.com> * * Rewritten version based on pata_ixp4xx_cf.c: * ixp4xx PATA/Compact Flash driver * Copyright (C) 2006-07 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/libata.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <scsi/scsi_host.h> #include <mach/palmld.h> #define DRV_NAME "pata_palmld" static struct scsi_host_template palmld_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations palmld_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, }; static __devinit int palmld_pata_probe(struct platform_device *pdev) { struct ata_host *host; struct ata_port *ap; void __iomem *mem; int ret; /* allocate host */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return -ENOMEM; /* remap drive's physical memory address */ mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); if (!mem) return -ENOMEM; /* request and activate power GPIO, IRQ GPIO */ ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR"); if (ret) goto err1; ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1); if (ret) goto err2; ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST"); if (ret) goto err2; ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0); if (ret) goto err3; /* reset the drive */ gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); msleep(30); gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1); msleep(30); /* setup the ata port */ ap = host->ports[0]; ap->ops = &palmld_port_ops; ap->pio_mask = ATA_PIO4; ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING; /* memory mapping voodoo */ ap->ioaddr.cmd_addr = mem + 0x10; ap->ioaddr.altstatus_addr = mem + 0xe; ap->ioaddr.ctl_addr = mem + 0xe; /* start the port */ ata_sff_std_ports(&ap->ioaddr); /* activate host */ return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, &palmld_sht); err3: gpio_free(GPIO_NR_PALMLD_IDE_RESET); err2: gpio_free(GPIO_NR_PALMLD_IDE_PWEN); err1: return ret; } static __devexit int palmld_pata_remove(struct platform_device *dev) { struct ata_host *host = platform_get_drvdata(dev); ata_host_detach(host); /* power down the HDD */ gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); gpio_free(GPIO_NR_PALMLD_IDE_RESET); gpio_free(GPIO_NR_PALMLD_IDE_PWEN); return 0; } static struct platform_driver palmld_pata_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = palmld_pata_probe, .remove = __devexit_p(palmld_pata_remove), }; static int __init palmld_pata_init(void) { return platform_driver_register(&palmld_pata_platform_driver); } static void __exit palmld_pata_exit(void) { platform_driver_unregister(&palmld_pata_platform_driver); } MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("PalmLD PATA driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); module_init(palmld_pata_init); module_exit(palmld_pata_exit);
gpl-2.0
VorkTeam/vorkKernel-DESIRE
drivers/ata/pata_palmld.c
1681
3531
/* * drivers/ata/pata_palmld.c * * Driver for IDE channel in Palm LifeDrive * * Based on research of: * Alex Osborne <ato@meshy.org> * * Rewrite for mainline: * Marek Vasut <marek.vasut@gmail.com> * * Rewritten version based on pata_ixp4xx_cf.c: * ixp4xx PATA/Compact Flash driver * Copyright (C) 2006-07 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/libata.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <scsi/scsi_host.h> #include <mach/palmld.h> #define DRV_NAME "pata_palmld" static struct scsi_host_template palmld_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations palmld_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, }; static __devinit int palmld_pata_probe(struct platform_device *pdev) { struct ata_host *host; struct ata_port *ap; void __iomem *mem; int ret; /* allocate host */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return -ENOMEM; /* remap drive's physical memory address */ mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); if (!mem) return -ENOMEM; /* request and activate power GPIO, IRQ GPIO */ ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR"); if (ret) goto err1; ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1); if (ret) goto err2; ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST"); if (ret) goto err2; ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0); if (ret) goto err3; /* reset the drive */ gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); msleep(30); gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1); msleep(30); /* setup the ata port */ ap = host->ports[0]; ap->ops = &palmld_port_ops; ap->pio_mask = ATA_PIO4; ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING; /* memory mapping voodoo */ ap->ioaddr.cmd_addr = mem + 0x10; ap->ioaddr.altstatus_addr = mem + 0xe; ap->ioaddr.ctl_addr = mem + 0xe; /* start the port */ ata_sff_std_ports(&ap->ioaddr); /* activate host */ return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, &palmld_sht); err3: gpio_free(GPIO_NR_PALMLD_IDE_RESET); err2: gpio_free(GPIO_NR_PALMLD_IDE_PWEN); err1: return ret; } static __devexit int palmld_pata_remove(struct platform_device *dev) { struct ata_host *host = platform_get_drvdata(dev); ata_host_detach(host); /* power down the HDD */ gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); gpio_free(GPIO_NR_PALMLD_IDE_RESET); gpio_free(GPIO_NR_PALMLD_IDE_PWEN); return 0; } static struct platform_driver palmld_pata_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = palmld_pata_probe, .remove = __devexit_p(palmld_pata_remove), }; static int __init palmld_pata_init(void) { return platform_driver_register(&palmld_pata_platform_driver); } static void __exit palmld_pata_exit(void) { platform_driver_unregister(&palmld_pata_platform_driver); } MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("PalmLD PATA driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); module_init(palmld_pata_init); module_exit(palmld_pata_exit);
gpl-2.0
ffosilva/kernel
arch/s390/pci/pci_clp.c
1937
7533
/* * Copyright IBM Corp. 2012 * * Author(s): * Jan Glauber <jang@linux.vnet.ibm.com> */ #define COMPONENT "zPCI" #define pr_fmt(fmt) COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/pci.h> #include <asm/pci_debug.h> #include <asm/pci_clp.h> /* * Call Logical Processor * Retry logic is handled by the caller. */ static inline u8 clp_instr(void *data) { struct { u8 _[CLP_BLK_SIZE]; } *req = data; u64 ignored; u8 cc; asm volatile ( " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n" " ipm %[cc]\n" " srl %[cc],28\n" : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req) : [req] "a" (req) : "cc"); return cc; } static void *clp_alloc_block(void) { return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); } static void clp_free_block(void *ptr) { free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE)); } static void clp_store_query_pci_fngrp(struct zpci_dev *zdev, struct clp_rsp_query_pci_grp *response) { zdev->tlb_refresh = response->refresh; zdev->dma_mask = response->dasm; zdev->msi_addr = response->msia; zdev->fmb_update = response->mui; pr_debug("Supported number of MSI vectors: %u\n", response->noi); switch (response->version) { case 1: zdev->max_bus_speed = PCIE_SPEED_5_0GT; break; default: zdev->max_bus_speed = PCI_SPEED_UNKNOWN; break; } } static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid) { struct clp_req_rsp_query_pci_grp *rrb; int rc; rrb = clp_alloc_block(); if (!rrb) return -ENOMEM; memset(rrb, 0, sizeof(*rrb)); rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP; rrb->response.hdr.len = sizeof(rrb->response); rrb->request.pfgid = pfgid; rc = clp_instr(rrb); if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) clp_store_query_pci_fngrp(zdev, &rrb->response); else { pr_err("Query PCI FNGRP failed with response: %x cc: %d\n", rrb->response.hdr.rsp, rc); rc = -EIO; } clp_free_block(rrb); return rc; } static int clp_store_query_pci_fn(struct zpci_dev *zdev, struct clp_rsp_query_pci *response) { int i; for (i = 0; i < PCI_BAR_COUNT; i++) { zdev->bars[i].val = le32_to_cpu(response->bar[i]); zdev->bars[i].size = response->bar_size[i]; } zdev->start_dma = response->sdma; zdev->end_dma = response->edma; zdev->pchid = response->pchid; zdev->pfgid = response->pfgid; return 0; } static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh) { struct clp_req_rsp_query_pci *rrb; int rc; rrb = clp_alloc_block(); if (!rrb) return -ENOMEM; memset(rrb, 0, sizeof(*rrb)); rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.cmd = CLP_QUERY_PCI_FN; rrb->response.hdr.len = sizeof(rrb->response); rrb->request.fh = fh; rc = clp_instr(rrb); if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) { rc = clp_store_query_pci_fn(zdev, &rrb->response); if (rc) goto out; if (rrb->response.pfgid) rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid); } else { pr_err("Query PCI failed with response: %x cc: %d\n", rrb->response.hdr.rsp, rc); rc = -EIO; } out: clp_free_block(rrb); return rc; } int clp_add_pci_device(u32 fid, u32 fh, int configured) { struct zpci_dev *zdev; int rc; zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); zdev = zpci_alloc_device(); if (IS_ERR(zdev)) return PTR_ERR(zdev); zdev->fh = fh; zdev->fid = fid; /* Query function properties and update zdev */ rc = clp_query_pci_fn(zdev, fh); if (rc) goto error; if (configured) zdev->state = ZPCI_FN_STATE_CONFIGURED; else zdev->state = ZPCI_FN_STATE_STANDBY; rc = zpci_create_device(zdev); if (rc) goto error; return 0; error: zpci_free_device(zdev); return rc; } /* * Enable/Disable a given PCI function defined by its function handle. */ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) { struct clp_req_rsp_set_pci *rrb; int rc, retries = 1000; rrb = clp_alloc_block(); if (!rrb) return -ENOMEM; do { memset(rrb, 0, sizeof(*rrb)); rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.cmd = CLP_SET_PCI_FN; rrb->response.hdr.len = sizeof(rrb->response); rrb->request.fh = *fh; rrb->request.oc = command; rrb->request.ndas = nr_dma_as; rc = clp_instr(rrb); if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) { retries--; if (retries < 0) break; msleep(1); } } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) *fh = rrb->response.fh; else { zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc, rrb->response.hdr.rsp); rc = -EIO; } clp_free_block(rrb); return rc; } int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as) { u32 fh = zdev->fh; int rc; rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN); if (!rc) /* Success -> store enabled handle in zdev */ zdev->fh = fh; zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } int clp_disable_fh(struct zpci_dev *zdev) { u32 fh = zdev->fh; int rc; if (!zdev_enabled(zdev)) return 0; dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh); rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN); if (!rc) /* Success -> store disabled handle in zdev */ zdev->fh = fh; zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry) { int present, rc; if (!entry->vendor_id) return; /* TODO: be a little bit more scalable */ present = zpci_fid_present(entry->fid); if (present) pr_debug("%s: device %x already present\n", __func__, entry->fid); /* skip already used functions */ if (present && entry->config_state) return; /* aev 306: function moved to stand-by state */ if (present && !entry->config_state) { /* * The handle is already disabled, that means no iota/irq freeing via * the firmware interfaces anymore. Need to free resources manually * (DMA memory, debug, sysfs)... */ zpci_stop_device(get_zdev_by_fid(entry->fid)); return; } rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state); if (rc) pr_err("Failed to add fid: 0x%x\n", entry->fid); } int clp_find_pci_devices(void) { struct clp_req_rsp_list_pci *rrb; u64 resume_token = 0; int entries, i, rc; rrb = clp_alloc_block(); if (!rrb) return -ENOMEM; do { memset(rrb, 0, sizeof(*rrb)); rrb->request.hdr.len = sizeof(rrb->request); rrb->request.hdr.cmd = CLP_LIST_PCI; /* store as many entries as possible */ rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN; rrb->request.resume_token = resume_token; /* Get PCI function handle list */ rc = clp_instr(rrb); if (rc || rrb->response.hdr.rsp != CLP_RC_OK) { pr_err("List PCI failed with response: 0x%x cc: %d\n", rrb->response.hdr.rsp, rc); rc = -EIO; goto out; } WARN_ON_ONCE(rrb->response.entry_size != sizeof(struct clp_fh_list_entry)); entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) / rrb->response.entry_size; pr_info("Detected number of PCI functions: %u\n", entries); /* Store the returned resume token as input for the next call */ resume_token = rrb->response.resume_token; for (i = 0; i < entries; i++) clp_check_pcifn_entry(&rrb->response.fh_list[i]); } while (resume_token); pr_debug("Maximum number of supported PCI functions: %u\n", rrb->response.max_fn); out: clp_free_block(rrb); return rc; }
gpl-2.0
1N4148/kernel_golden
arch/mips/kernel/i8259.c
2193
9238
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Code to handle x86 style IRQs plus some generic interrupt stuff. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle */ #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/syscore_ops.h> #include <linux/irq.h> #include <asm/i8259.h> #include <asm/io.h> /* * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. * plus some generic x86 specific things if generic specifics makes * any sense at all. * this file should become arch/i386/kernel/irq.c when the old irq.c * moves to arch independent land */ static int i8259A_auto_eoi = -1; DEFINE_RAW_SPINLOCK(i8259A_lock); static void disable_8259A_irq(struct irq_data *d); static void enable_8259A_irq(struct irq_data *d); static void mask_and_ack_8259A(struct irq_data *d); static void init_8259A(int auto_eoi); static struct irq_chip i8259A_chip = { .name = "XT-PIC", .irq_mask = disable_8259A_irq, .irq_disable = disable_8259A_irq, .irq_unmask = enable_8259A_irq, .irq_mask_ack = mask_and_ack_8259A, #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF .irq_set_affinity = plat_set_irq_affinity, #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; /* * 8259A PIC functions to handle ISA devices: */ /* * This contains the irq mask for both 8259A irq controllers, */ static unsigned int cached_irq_mask = 0xffff; #define cached_master_mask (cached_irq_mask) #define cached_slave_mask (cached_irq_mask >> 8) static void disable_8259A_irq(struct irq_data *d) { unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; unsigned long flags; mask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void enable_8259A_irq(struct irq_data *d) { unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; unsigned long flags; mask = ~(1 << irq); raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } int i8259A_irq_pending(unsigned int irq) { unsigned int mask; unsigned long flags; int ret; irq -= I8259A_IRQ_BASE; mask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); if (irq < 8) ret = inb(PIC_MASTER_CMD) & mask; else ret = inb(PIC_SLAVE_CMD) & (mask >> 8); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return ret; } void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); enable_irq(irq); } /* * This function assumes to be called rarely. Switching between * 8259A registers is slow. * This has to be protected by the irq controller spinlock * before being called. */ static inline int i8259A_irq_real(unsigned int irq) { int value; int irqmask = 1 << irq; if (irq < 8) { outb(0x0B, PIC_MASTER_CMD); /* ISR register */ value = inb(PIC_MASTER_CMD) & irqmask; outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ return value; } outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ return value; } /* * Careful! The 8259A is a fragile beast, it pretty * much _has_ to be done exactly like this (mask it * first, _then_ send the EOI, and the order of EOI * to the two 8259s is important! */ static void mask_and_ack_8259A(struct irq_data *d) { unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE; unsigned long flags; irqmask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); /* * Lightweight spurious IRQ detection. We do not want * to overdo spurious IRQ handling - it's usually a sign * of hardware problems, so we only do the checks we can * do without slowing down good hardware unnecessarily. * * Note that IRQ7 and IRQ15 (the two spurious IRQs * usually resulting from the 8259A-1|2 PICs) occur * even if the IRQ is masked in the 8259A. Thus we * can check spurious 8259A IRQs without doing the * quite slow i8259A_irq_real() call for every IRQ. * This does not cover 100% of spurious interrupts, * but should be enough to warn the user that there * is something bad going on ... */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; cached_irq_mask |= irqmask; handle_real_irq: if (irq & 8) { inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ outb(cached_slave_mask, PIC_SLAVE_IMR); outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ } else { inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } smtc_im_ack_irq(irq); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return; spurious_8259A_irq: /* * this is the slow path - should happen rarely. */ if (i8259A_irq_real(irq)) /* * oops, the IRQ _is_ in service according to the * 8259A - not spurious, go handle it. */ goto handle_real_irq; { static int spurious_irq_mask; /* * At this point we can be sure the IRQ is spurious, * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } atomic_inc(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is * simpler for us. */ goto handle_real_irq; } } static void i8259A_resume(void) { if (i8259A_auto_eoi >= 0) init_8259A(i8259A_auto_eoi); } static void i8259A_shutdown(void) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it * out of. */ if (i8259A_auto_eoi >= 0) { outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ } } static struct syscore_ops i8259_syscore_ops = { .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; static int __init i8259A_init_sysfs(void) { register_syscore_ops(&i8259_syscore_ops); return 0; } device_initcall(i8259A_init_sysfs); static void init_8259A(int auto_eoi) { unsigned long flags; i8259A_auto_eoi = auto_eoi; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ /* * outb_p - this has to work on a wide range of PC hardware. */ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ if (auto_eoi) /* master does Auto EOI */ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); else /* master expects normal EOI */ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ if (auto_eoi) /* * In AEOI mode we just have to mask the interrupt * when acking. */ i8259A_chip.irq_mask_ack = disable_8259A_irq; else i8259A_chip.irq_mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } /* * IRQ2 is cascade interrupt to second interrupt controller */ static struct irqaction irq2 = { .handler = no_action, .name = "cascade", }; static struct resource pic1_io_resource = { .name = "pic1", .start = PIC_MASTER_CMD, .end = PIC_MASTER_IMR, .flags = IORESOURCE_BUSY }; static struct resource pic2_io_resource = { .name = "pic2", .start = PIC_SLAVE_CMD, .end = PIC_SLAVE_IMR, .flags = IORESOURCE_BUSY }; /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8259 * interrupts even if the hardware uses a different interrupt numbering. */ void __init init_i8259_irqs(void) { int i; insert_resource(&ioport_resource, &pic1_io_resource); insert_resource(&ioport_resource, &pic2_io_resource); init_8259A(0); for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); irq_set_probe(i); } setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); }
gpl-2.0
deafnote/kernel-gigabyte-rior1
arch/x86/xen/spinlock.c
2449
10779
/* * Split spinlock implementation out into its own file, so it can be * compiled in a FTRACE-compatible way. */ #include <linux/kernel_stat.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/log2.h> #include <linux/gfp.h> #include <asm/paravirt.h> #include <xen/interface/xen.h> #include <xen/events.h> #include "xen-ops.h" #include "debugfs.h" #ifdef CONFIG_XEN_DEBUG_FS static struct xen_spinlock_stats { u64 taken; u32 taken_slow; u32 taken_slow_nested; u32 taken_slow_pickup; u32 taken_slow_spurious; u32 taken_slow_irqenable; u64 released; u32 released_slow; u32 released_slow_kicked; #define HISTO_BUCKETS 30 u32 histo_spin_total[HISTO_BUCKETS+1]; u32 histo_spin_spinning[HISTO_BUCKETS+1]; u32 histo_spin_blocked[HISTO_BUCKETS+1]; u64 time_total; u64 time_spinning; u64 time_blocked; } spinlock_stats; static u8 zero_stats; static unsigned lock_timeout = 1 << 10; #define TIMEOUT lock_timeout static inline void check_zero(void) { if (unlikely(zero_stats)) { memset(&spinlock_stats, 0, sizeof(spinlock_stats)); zero_stats = 0; } } #define ADD_STATS(elem, val) \ do { check_zero(); spinlock_stats.elem += (val); } while(0) static inline u64 spin_time_start(void) { return xen_clocksource_read(); } static void __spin_time_accum(u64 delta, u32 *array) { unsigned index = ilog2(delta); check_zero(); if (index < HISTO_BUCKETS) array[index]++; else array[HISTO_BUCKETS]++; } static inline void spin_time_accum_spinning(u64 start) { u32 delta = xen_clocksource_read() - start; __spin_time_accum(delta, spinlock_stats.histo_spin_spinning); spinlock_stats.time_spinning += delta; } static inline void spin_time_accum_total(u64 start) { u32 delta = xen_clocksource_read() - start; __spin_time_accum(delta, spinlock_stats.histo_spin_total); spinlock_stats.time_total += delta; } static inline void spin_time_accum_blocked(u64 start) { u32 delta = xen_clocksource_read() - start; __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); spinlock_stats.time_blocked += delta; } #else /* !CONFIG_XEN_DEBUG_FS */ #define TIMEOUT (1 << 10) #define ADD_STATS(elem, val) do { (void)(val); } while(0) static inline u64 spin_time_start(void) { return 0; } static inline void spin_time_accum_total(u64 start) { } static inline void spin_time_accum_spinning(u64 start) { } static inline void spin_time_accum_blocked(u64 start) { } #endif /* CONFIG_XEN_DEBUG_FS */ struct xen_spinlock { unsigned char lock; /* 0 -> free; 1 -> locked */ unsigned short spinners; /* count of waiting cpus */ }; static int xen_spin_is_locked(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; return xl->lock != 0; } static int xen_spin_is_contended(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; /* Not strictly true; this is only the count of contended lock-takers entering the slow path. */ return xl->spinners != 0; } static int xen_spin_trylock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; u8 old = 1; asm("xchgb %b0,%1" : "+q" (old), "+m" (xl->lock) : : "memory"); return old == 0; } static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); /* * Mark a cpu as interested in a lock. Returns the CPU's previous * lock of interest, in case we got preempted by an interrupt. */ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) { struct xen_spinlock *prev; prev = __this_cpu_read(lock_spinners); __this_cpu_write(lock_spinners, xl); wmb(); /* set lock of interest before count */ asm(LOCK_PREFIX " incw %0" : "+m" (xl->spinners) : : "memory"); return prev; } /* * Mark a cpu as no longer interested in a lock. Restores previous * lock of interest (NULL for none). */ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) { asm(LOCK_PREFIX " decw %0" : "+m" (xl->spinners) : : "memory"); wmb(); /* decrement count before restoring lock */ __this_cpu_write(lock_spinners, prev); } static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *prev; int irq = __this_cpu_read(lock_kicker_irq); int ret; u64 start; /* If kicker interrupts not initialized yet, just spin */ if (irq == -1) return 0; start = spin_time_start(); /* announce we're spinning */ prev = spinning_lock(xl); ADD_STATS(taken_slow, 1); ADD_STATS(taken_slow_nested, prev != NULL); do { unsigned long flags; /* clear pending */ xen_clear_irq_pending(irq); /* check again make sure it didn't become free while we weren't looking */ ret = xen_spin_trylock(lock); if (ret) { ADD_STATS(taken_slow_pickup, 1); /* * If we interrupted another spinlock while it * was blocking, make sure it doesn't block * without rechecking the lock. */ if (prev != NULL) xen_set_irq_pending(irq); goto out; } flags = arch_local_save_flags(); if (irq_enable) { ADD_STATS(taken_slow_irqenable, 1); raw_local_irq_enable(); } /* * Block until irq becomes pending. If we're * interrupted at this point (after the trylock but * before entering the block), then the nested lock * handler guarantees that the irq will be left * pending if there's any chance the lock became free; * xen_poll_irq() returns immediately if the irq is * pending. */ xen_poll_irq(irq); raw_local_irq_restore(flags); ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); out: unspinning_lock(xl, prev); spin_time_accum_blocked(start); return ret; } static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; unsigned timeout; u8 oldval; u64 start_spin; ADD_STATS(taken, 1); start_spin = spin_time_start(); do { u64 start_spin_fast = spin_time_start(); timeout = TIMEOUT; asm("1: xchgb %1,%0\n" " testb %1,%1\n" " jz 3f\n" "2: rep;nop\n" " cmpb $0,%0\n" " je 1b\n" " dec %2\n" " jnz 2b\n" "3:\n" : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) : "1" (1) : "memory"); spin_time_accum_spinning(start_spin_fast); } while (unlikely(oldval != 0 && (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable)))); spin_time_accum_total(start_spin); } static void xen_spin_lock(struct arch_spinlock *lock) { __xen_spin_lock(lock, false); } static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); } static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) { int cpu; ADD_STATS(released_slow, 1); for_each_online_cpu(cpu) { /* XXX should mix up next cpu selection */ if (per_cpu(lock_spinners, cpu) == xl) { ADD_STATS(released_slow_kicked, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); break; } } } static void xen_spin_unlock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; ADD_STATS(released, 1); smp_wmb(); /* make sure no writes get moved after unlock */ xl->lock = 0; /* release lock */ /* * Make sure unlock happens before checking for waiting * spinners. We need a strong barrier to enforce the * write-read ordering to different memory locations, as the * CPU makes no implied guarantees about their ordering. */ mb(); if (unlikely(xl->spinners)) xen_spin_unlock_slow(xl); } static irqreturn_t dummy_handler(int irq, void *dev_id) { BUG(); return IRQ_HANDLED; } void __cpuinit xen_init_lock_cpu(int cpu) { int irq; const char *name; name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, dummy_handler, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, name, NULL); if (irq >= 0) { disable_irq(irq); /* make sure it's never delivered */ per_cpu(lock_kicker_irq, cpu) = irq; } printk("cpu %d spinlock event irq %d\n", cpu, irq); } void xen_uninit_lock_cpu(int cpu) { unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); } void __init xen_init_spinlocks(void) { pv_lock_ops.spin_is_locked = xen_spin_is_locked; pv_lock_ops.spin_is_contended = xen_spin_is_contended; pv_lock_ops.spin_lock = xen_spin_lock; pv_lock_ops.spin_lock_flags = xen_spin_lock_flags; pv_lock_ops.spin_trylock = xen_spin_trylock; pv_lock_ops.spin_unlock = xen_spin_unlock; } #ifdef CONFIG_XEN_DEBUG_FS static struct dentry *d_spin_debug; static int __init xen_spinlock_debugfs(void) { struct dentry *d_xen = xen_init_debugfs(); if (d_xen == NULL) return -ENOMEM; d_spin_debug = debugfs_create_dir("spinlocks", d_xen); debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout); debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken); debugfs_create_u32("taken_slow", 0444, d_spin_debug, &spinlock_stats.taken_slow); debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug, &spinlock_stats.taken_slow_nested); debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, &spinlock_stats.taken_slow_pickup); debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, &spinlock_stats.taken_slow_spurious); debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug, &spinlock_stats.taken_slow_irqenable); debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); debugfs_create_u32("released_slow", 0444, d_spin_debug, &spinlock_stats.released_slow); debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, &spinlock_stats.released_slow_kicked); debugfs_create_u64("time_spinning", 0444, d_spin_debug, &spinlock_stats.time_spinning); debugfs_create_u64("time_blocked", 0444, d_spin_debug, &spinlock_stats.time_blocked); debugfs_create_u64("time_total", 0444, d_spin_debug, &spinlock_stats.time_total); xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); return 0; } fs_initcall(xen_spinlock_debugfs); #endif /* CONFIG_XEN_DEBUG_FS */
gpl-2.0
psachin/apc-rock-II-kernel
drivers/tty/hvc/hvc_console.c
3985
23665
/* * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * Copyright (C) 2004 IBM Corporation * * Additional Author(s): * Ryan S. Arnold <rsa@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/console.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/kbd_kern.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/module.h> #include <linux/major.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/serial_core.h> #include <asm/uaccess.h> #include "hvc_console.h" #define HVC_MAJOR 229 #define HVC_MINOR 0 /* * Wait this long per iteration while trying to push buffered data to the * hypervisor before allowing the tty to complete a close operation. */ #define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ /* * These sizes are most efficient for vio, because they are the * native transfer size. We could make them selectable in the * future to better deal with backends that want other buffer sizes. */ #define N_OUTBUF 16 #define N_INBUF 16 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) static struct tty_driver *hvc_driver; static struct task_struct *hvc_task; /* Picks up late kicks after list walk but before schedule() */ static int hvc_kicked; static int hvc_init(void); #ifdef CONFIG_MAGIC_SYSRQ static int sysrq_pressed; #endif /* dynamic list of hvc_struct instances */ static LIST_HEAD(hvc_structs); /* * Protect the list of hvc_struct instances from inserts and removals during * list traversal. */ static DEFINE_SPINLOCK(hvc_structs_lock); /* * This value is used to assign a tty->index value to a hvc_struct based * upon order of exposure via hvc_probe(), when we can not match it to * a console candidate registered with hvc_instantiate(). */ static int last_hvc = -1; /* * Do not call this function with either the hvc_structs_lock or the hvc_struct * lock held. If successful, this function increments the kref reference * count against the target hvc_struct so it should be released when finished. */ static struct hvc_struct *hvc_get_by_index(int index) { struct hvc_struct *hp; unsigned long flags; spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { spin_lock_irqsave(&hp->lock, flags); if (hp->index == index) { kref_get(&hp->kref); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); return hp; } spin_unlock_irqrestore(&hp->lock, flags); } hp = NULL; spin_unlock(&hvc_structs_lock); return hp; } /* * Initial console vtermnos for console API usage prior to full console * initialization. Any vty adapter outside this range will not have usable * console interfaces but can still be used as a tty device. This has to be * static because kmalloc will not work during early console init. */ static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; /* * Console APIs, NOT TTY. These APIs are available immediately when * hvc_console_setup() finds adapters. */ static void hvc_console_print(struct console *co, const char *b, unsigned count) { char c[N_OUTBUF] __ALIGNED__; unsigned i = 0, n = 0; int r, donecr = 0, index = co->index; /* Console access attempt outside of acceptable console range. */ if (index >= MAX_NR_HVC_CONSOLES) return; /* This console adapter was removed so it is not usable. */ if (vtermnos[index] == -1) return; while (count > 0 || i > 0) { if (count > 0 && i < sizeof(c)) { if (b[n] == '\n' && !donecr) { c[i++] = '\r'; donecr = 1; } else { c[i++] = b[n++]; donecr = 0; --count; } } else { r = cons_ops[index]->put_chars(vtermnos[index], c, i); if (r <= 0) { /* throw away characters on error * but spin in case of -EAGAIN */ if (r != -EAGAIN) i = 0; } else if (r > 0) { i -= r; if (i > 0) memmove(c, c+r, i); } } } } static struct tty_driver *hvc_console_device(struct console *c, int *index) { if (vtermnos[c->index] == -1) return NULL; *index = c->index; return hvc_driver; } static int __init hvc_console_setup(struct console *co, char *options) { if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) return -ENODEV; if (vtermnos[co->index] == -1) return -ENODEV; return 0; } static struct console hvc_console = { .name = "hvc", .write = hvc_console_print, .device = hvc_console_device, .setup = hvc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; /* * Early console initialization. Precedes driver initialization. * * (1) we are first, and the user specified another driver * -- index will remain -1 * (2) we are first and the user specified no driver * -- index will be set to 0, then we will fail setup. * (3) we are first and the user specified our driver * -- index will be set to user specified driver, and we will fail * (4) we are after driver, and this initcall will register us * -- if the user didn't specify a driver then the console will match * * Note that for cases 2 and 3, we will match later when the io driver * calls hvc_instantiate() and call register again. */ static int __init hvc_console_init(void) { register_console(&hvc_console); return 0; } console_initcall(hvc_console_init); /* callback when the kboject ref count reaches zero. */ static void destroy_hvc_struct(struct kref *kref) { struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref); unsigned long flags; spin_lock(&hvc_structs_lock); spin_lock_irqsave(&hp->lock, flags); list_del(&(hp->next)); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); kfree(hp); } /* * hvc_instantiate() is an early console discovery method which locates * consoles * prior to the vio subsystem discovering them. Hotplugged * vty adapters do NOT get an hvc_instantiate() callback since they * appear after early console init. */ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) { struct hvc_struct *hp; if (index < 0 || index >= MAX_NR_HVC_CONSOLES) return -1; if (vtermnos[index] != -1) return -1; /* make sure no no tty has been registered in this index */ hp = hvc_get_by_index(index); if (hp) { kref_put(&hp->kref, destroy_hvc_struct); return -1; } vtermnos[index] = vtermno; cons_ops[index] = ops; /* reserve all indices up to and including this index */ if (last_hvc < index) last_hvc = index; /* if this index is what the user requested, then register * now (setup won't fail at this point). It's ok to just * call register again if previously .setup failed. */ if (index == hvc_console.index) register_console(&hvc_console); return 0; } EXPORT_SYMBOL_GPL(hvc_instantiate); /* Wake the sleeping khvcd */ void hvc_kick(void) { hvc_kicked = 1; wake_up_process(hvc_task); } EXPORT_SYMBOL_GPL(hvc_kick); static void hvc_unthrottle(struct tty_struct *tty) { hvc_kick(); } /* * The TTY interface won't be used until after the vio layer has exposed the vty * adapter to the kernel. */ static int hvc_open(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; unsigned long flags; int rc = 0; /* Auto increments kref reference if found. */ if (!(hp = hvc_get_by_index(tty->index))) return -ENODEV; spin_lock_irqsave(&hp->lock, flags); /* Check and then increment for fast path open. */ if (hp->count++ > 0) { tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); hvc_kick(); return 0; } /* else count == 0 */ tty->driver_data = hp; hp->tty = tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_add) rc = hp->ops->notifier_add(hp, hp->data); /* * If the notifier fails we return an error. The tty layer * will call hvc_close() after a failed open but we don't want to clean * up there so we'll clean up here and clear out the previously set * tty fields and return the kref reference. */ if (rc) { spin_lock_irqsave(&hp->lock, flags); hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); tty_kref_put(tty); tty->driver_data = NULL; kref_put(&hp->kref, destroy_hvc_struct); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); } /* Force wakeup of the polling thread */ hvc_kick(); return rc; } static void hvc_close(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; unsigned long flags; if (tty_hung_up_p(filp)) return; /* * No driver_data means that this close was issued after a failed * hvc_open by the tty layer's release_dev() function and we can just * exit cleanly because the kref reference wasn't made. */ if (!tty->driver_data) return; hp = tty->driver_data; spin_lock_irqsave(&hp->lock, flags); if (--hp->count == 0) { /* We are done with the tty pointer now. */ hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_del) hp->ops->notifier_del(hp, hp->data); /* cancel pending tty resize work */ cancel_work_sync(&hp->tty_resize); /* * Chain calls chars_in_buffer() and returns immediately if * there is no buffered data otherwise sleeps on a wait queue * waking periodically to check chars_in_buffer(). */ tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT); } else { if (hp->count < 0) printk(KERN_ERR "hvc_close %X: oops, count is %d\n", hp->vtermno, hp->count); spin_unlock_irqrestore(&hp->lock, flags); } tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } static void hvc_hangup(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; int temp_open_count; if (!hp) return; /* cancel pending tty resize work */ cancel_work_sync(&hp->tty_resize); spin_lock_irqsave(&hp->lock, flags); /* * The N_TTY line discipline has problems such that in a close vs * open->hangup case this can be called after the final close so prevent * that from happening for now. */ if (hp->count <= 0) { spin_unlock_irqrestore(&hp->lock, flags); return; } temp_open_count = hp->count; hp->count = 0; hp->n_outbuf = 0; hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_hangup) hp->ops->notifier_hangup(hp, hp->data); while(temp_open_count) { --temp_open_count; tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } } /* * Push buffered characters whether they were just recently buffered or waiting * on a blocked hypervisor. Call this function with hp->lock held. */ static int hvc_push(struct hvc_struct *hp) { int n; n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); if (n <= 0) { if (n == 0 || n == -EAGAIN) { hp->do_wakeup = 1; return 0; } /* throw away output on error; this happens when there is no session connected to the vterm. */ hp->n_outbuf = 0; } else hp->n_outbuf -= n; if (hp->n_outbuf > 0) memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); else hp->do_wakeup = 1; return n; } static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; int rsize, written = 0; /* This write was probably executed during a tty close. */ if (!hp) return -EPIPE; if (hp->count <= 0) return -EIO; spin_lock_irqsave(&hp->lock, flags); /* Push pending writes */ if (hp->n_outbuf > 0) hvc_push(hp); while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { if (rsize > count) rsize = count; memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); count -= rsize; buf += rsize; hp->n_outbuf += rsize; written += rsize; hvc_push(hp); } spin_unlock_irqrestore(&hp->lock, flags); /* * Racy, but harmless, kick thread if there is still pending data. */ if (hp->n_outbuf) hvc_kick(); return written; } /** * hvc_set_winsz() - Resize the hvc tty terminal window. * @work: work structure. * * The routine shall not be called within an atomic context because it * might sleep. * * Locking: hp->lock */ static void hvc_set_winsz(struct work_struct *work) { struct hvc_struct *hp; unsigned long hvc_flags; struct tty_struct *tty; struct winsize ws; hp = container_of(work, struct hvc_struct, tty_resize); spin_lock_irqsave(&hp->lock, hvc_flags); if (!hp->tty) { spin_unlock_irqrestore(&hp->lock, hvc_flags); return; } ws = hp->ws; tty = tty_kref_get(hp->tty); spin_unlock_irqrestore(&hp->lock, hvc_flags); tty_do_resize(tty, &ws); tty_kref_put(tty); } /* * This is actually a contract between the driver and the tty layer outlining * how much write room the driver can guarantee will be sent OR BUFFERED. This * driver MUST honor the return value. */ static int hvc_write_room(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp) return -1; return hp->outbuf_size - hp->n_outbuf; } static int hvc_chars_in_buffer(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp) return 0; return hp->n_outbuf; } /* * timeout will vary between the MIN and MAX values defined here. By default * and during console activity we will use a default MIN_TIMEOUT of 10. When * the console is idle, we increase the timeout value on each pass through * msleep until we reach the max. This may be noticeable as a brief (average * one second) delay on the console before the console responds to input when * there has been no input for some time. */ #define MIN_TIMEOUT (10) #define MAX_TIMEOUT (2000) static u32 timeout = MIN_TIMEOUT; #define HVC_POLL_READ 0x00000001 #define HVC_POLL_WRITE 0x00000002 int hvc_poll(struct hvc_struct *hp) { struct tty_struct *tty; int i, n, poll_mask = 0; char buf[N_INBUF] __ALIGNED__; unsigned long flags; int read_total = 0; int written_total = 0; spin_lock_irqsave(&hp->lock, flags); /* Push pending writes */ if (hp->n_outbuf > 0) written_total = hvc_push(hp); /* Reschedule us if still some write pending */ if (hp->n_outbuf > 0) { poll_mask |= HVC_POLL_WRITE; /* If hvc_push() was not able to write, sleep a few msecs */ timeout = (written_total) ? 0 : MIN_TIMEOUT; } /* No tty attached, just skip */ tty = tty_kref_get(hp->tty); if (tty == NULL) goto bail; /* Now check if we can get data (are we throttled ?) */ if (test_bit(TTY_THROTTLED, &tty->flags)) goto throttled; /* If we aren't notifier driven and aren't throttled, we always * request a reschedule */ if (!hp->irq_requested) poll_mask |= HVC_POLL_READ; /* Read data if any */ for (;;) { int count = tty_buffer_request_room(tty, N_INBUF); /* If flip is full, just reschedule a later read */ if (count == 0) { poll_mask |= HVC_POLL_READ; break; } n = hp->ops->get_chars(hp->vtermno, buf, count); if (n <= 0) { /* Hangup the tty when disconnected from host */ if (n == -EPIPE) { spin_unlock_irqrestore(&hp->lock, flags); tty_hangup(tty); spin_lock_irqsave(&hp->lock, flags); } else if ( n == -EAGAIN ) { /* * Some back-ends can only ensure a certain min * num of bytes read, which may be > 'count'. * Let the tty clear the flip buff to make room. */ poll_mask |= HVC_POLL_READ; } break; } for (i = 0; i < n; ++i) { #ifdef CONFIG_MAGIC_SYSRQ if (hp->index == hvc_console.index) { /* Handle the SysRq Hack */ /* XXX should support a sequence */ if (buf[i] == '\x0f') { /* ^O */ /* if ^O is pressed again, reset * sysrq_pressed and flip ^O char */ sysrq_pressed = !sysrq_pressed; if (sysrq_pressed) continue; } else if (sysrq_pressed) { handle_sysrq(buf[i]); sysrq_pressed = 0; continue; } } #endif /* CONFIG_MAGIC_SYSRQ */ tty_insert_flip_char(tty, buf[i], 0); } read_total += n; } throttled: /* Wakeup write queue if necessary */ if (hp->do_wakeup) { hp->do_wakeup = 0; tty_wakeup(tty); } bail: spin_unlock_irqrestore(&hp->lock, flags); if (read_total) { /* Activity is occurring, so reset the polling backoff value to a minimum for performance. */ timeout = MIN_TIMEOUT; tty_flip_buffer_push(tty); } if (tty) tty_kref_put(tty); return poll_mask; } EXPORT_SYMBOL_GPL(hvc_poll); /** * __hvc_resize() - Update terminal window size information. * @hp: HVC console pointer * @ws: Terminal window size structure * * Stores the specified window size information in the hvc structure of @hp. * The function schedule the tty resize update. * * Locking: Locking free; the function MUST be called holding hp->lock */ void __hvc_resize(struct hvc_struct *hp, struct winsize ws) { hp->ws = ws; schedule_work(&hp->tty_resize); } EXPORT_SYMBOL_GPL(__hvc_resize); /* * This kthread is either polling or interrupt driven. This is determined by * calling hvc_poll() who determines whether a console adapter support * interrupts. */ static int khvcd(void *unused) { int poll_mask; struct hvc_struct *hp; set_freezable(); do { poll_mask = 0; hvc_kicked = 0; try_to_freeze(); wmb(); if (!cpus_are_in_xmon()) { spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { poll_mask |= hvc_poll(hp); } spin_unlock(&hvc_structs_lock); } else poll_mask |= HVC_POLL_READ; if (hvc_kicked) continue; set_current_state(TASK_INTERRUPTIBLE); if (!hvc_kicked) { if (poll_mask == 0) schedule(); else { if (timeout < MAX_TIMEOUT) timeout += (timeout >> 6) + 1; msleep_interruptible(timeout); } } __set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); return 0; } static int hvc_tiocmget(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp || !hp->ops->tiocmget) return -EINVAL; return hp->ops->tiocmget(hp); } static int hvc_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct hvc_struct *hp = tty->driver_data; if (!hp || !hp->ops->tiocmset) return -EINVAL; return hp->ops->tiocmset(hp, set, clear); } #ifdef CONFIG_CONSOLE_POLL int hvc_poll_init(struct tty_driver *driver, int line, char *options) { return 0; } static int hvc_poll_get_char(struct tty_driver *driver, int line) { struct tty_struct *tty = driver->ttys[0]; struct hvc_struct *hp = tty->driver_data; int n; char ch; n = hp->ops->get_chars(hp->vtermno, &ch, 1); if (n == 0) return NO_POLL_CHAR; return ch; } static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch) { struct tty_struct *tty = driver->ttys[0]; struct hvc_struct *hp = tty->driver_data; int n; do { n = hp->ops->put_chars(hp->vtermno, &ch, 1); } while (n <= 0); } #endif static const struct tty_operations hvc_ops = { .open = hvc_open, .close = hvc_close, .write = hvc_write, .hangup = hvc_hangup, .unthrottle = hvc_unthrottle, .write_room = hvc_write_room, .chars_in_buffer = hvc_chars_in_buffer, .tiocmget = hvc_tiocmget, .tiocmset = hvc_tiocmset, #ifdef CONFIG_CONSOLE_POLL .poll_init = hvc_poll_init, .poll_get_char = hvc_poll_get_char, .poll_put_char = hvc_poll_put_char, #endif }; struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, const struct hv_ops *ops, int outbuf_size) { struct hvc_struct *hp; int i; /* We wait until a driver actually comes along */ if (!hvc_driver) { int err = hvc_init(); if (err) return ERR_PTR(err); } hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, GFP_KERNEL); if (!hp) return ERR_PTR(-ENOMEM); hp->vtermno = vtermno; hp->data = data; hp->ops = ops; hp->outbuf_size = outbuf_size; hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; kref_init(&hp->kref); INIT_WORK(&hp->tty_resize, hvc_set_winsz); spin_lock_init(&hp->lock); spin_lock(&hvc_structs_lock); /* * find index to use: * see if this vterm id matches one registered for console. */ for (i=0; i < MAX_NR_HVC_CONSOLES; i++) if (vtermnos[i] == hp->vtermno && cons_ops[i] == hp->ops) break; /* no matching slot, just use a counter */ if (i >= MAX_NR_HVC_CONSOLES) i = ++last_hvc; hp->index = i; list_add_tail(&(hp->next), &hvc_structs); spin_unlock(&hvc_structs_lock); return hp; } EXPORT_SYMBOL_GPL(hvc_alloc); int hvc_remove(struct hvc_struct *hp) { unsigned long flags; struct tty_struct *tty; spin_lock_irqsave(&hp->lock, flags); tty = tty_kref_get(hp->tty); if (hp->index < MAX_NR_HVC_CONSOLES) vtermnos[hp->index] = -1; /* Don't whack hp->irq because tty_hangup() will need to free the irq. */ spin_unlock_irqrestore(&hp->lock, flags); /* * We 'put' the instance that was grabbed when the kref instance * was initialized using kref_init(). Let the last holder of this * kref cause it to be removed, which will probably be the tty_vhangup * below. */ kref_put(&hp->kref, destroy_hvc_struct); /* * This function call will auto chain call hvc_hangup. */ if (tty) { tty_vhangup(tty); tty_kref_put(tty); } return 0; } EXPORT_SYMBOL_GPL(hvc_remove); /* Driver initialization: called as soon as someone uses hvc_alloc(). */ static int hvc_init(void) { struct tty_driver *drv; int err; /* We need more than hvc_count adapters due to hotplug additions. */ drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS); if (!drv) { err = -ENOMEM; goto out; } drv->driver_name = "hvc"; drv->name = "hvc"; drv->major = HVC_MAJOR; drv->minor_start = HVC_MINOR; drv->type = TTY_DRIVER_TYPE_SYSTEM; drv->init_termios = tty_std_termios; drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; tty_set_operations(drv, &hvc_ops); /* Always start the kthread because there can be hotplug vty adapters * added later. */ hvc_task = kthread_run(khvcd, NULL, "khvcd"); if (IS_ERR(hvc_task)) { printk(KERN_ERR "Couldn't create kthread for console.\n"); err = PTR_ERR(hvc_task); goto put_tty; } err = tty_register_driver(drv); if (err) { printk(KERN_ERR "Couldn't register hvc console driver\n"); goto stop_thread; } /* * Make sure tty is fully registered before allowing it to be * found by hvc_console_device. */ smp_mb(); hvc_driver = drv; return 0; stop_thread: kthread_stop(hvc_task); hvc_task = NULL; put_tty: put_tty_driver(drv); out: return err; } /* This isn't particularly necessary due to this being a console driver * but it is nice to be thorough. */ static void __exit hvc_exit(void) { if (hvc_driver) { kthread_stop(hvc_task); tty_unregister_driver(hvc_driver); /* return tty_struct instances allocated in hvc_init(). */ put_tty_driver(hvc_driver); unregister_console(&hvc_console); } } module_exit(hvc_exit);
gpl-2.0
Kuzma30/NT34K
security/apparmor/lsm.c
4753
25133
/* * AppArmor security module * * This file contains AppArmor LSM hooks. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/security.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/audit.h> #include <linux/user_namespace.h> #include <net/sock.h> #include "include/apparmor.h" #include "include/apparmorfs.h" #include "include/audit.h" #include "include/capability.h" #include "include/context.h" #include "include/file.h" #include "include/ipc.h" #include "include/path.h" #include "include/policy.h" #include "include/procattr.h" /* Flag indicating whether initialization completed */ int apparmor_initialized __initdata; /* * LSM hook functions */ /* * free the associated aa_task_cxt and put its profiles */ static void apparmor_cred_free(struct cred *cred) { aa_free_task_context(cred->security); cred->security = NULL; } /* * allocate the apparmor part of blank credentials */ static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp) { /* freed by apparmor_cred_free */ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp); if (!cxt) return -ENOMEM; cred->security = cxt; return 0; } /* * prepare new aa_task_cxt for modification by prepare_cred block */ static int apparmor_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { /* freed by apparmor_cred_free */ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp); if (!cxt) return -ENOMEM; aa_dup_task_context(cxt, old->security); new->security = cxt; return 0; } /* * transfer the apparmor data to a blank set of creds */ static void apparmor_cred_transfer(struct cred *new, const struct cred *old) { const struct aa_task_cxt *old_cxt = old->security; struct aa_task_cxt *new_cxt = new->security; aa_dup_task_context(new_cxt, old_cxt); } static int apparmor_ptrace_access_check(struct task_struct *child, unsigned int mode) { int error = cap_ptrace_access_check(child, mode); if (error) return error; return aa_ptrace(current, child, mode); } static int apparmor_ptrace_traceme(struct task_struct *parent) { int error = cap_ptrace_traceme(parent); if (error) return error; return aa_ptrace(parent, current, PTRACE_MODE_ATTACH); } /* Derived from security/commoncap.c:cap_capget */ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { struct aa_profile *profile; const struct cred *cred; rcu_read_lock(); cred = __task_cred(target); profile = aa_cred_profile(cred); *effective = cred->cap_effective; *inheritable = cred->cap_inheritable; *permitted = cred->cap_permitted; if (!unconfined(profile) && !COMPLAIN_MODE(profile)) { *effective = cap_intersect(*effective, profile->caps.allow); *permitted = cap_intersect(*permitted, profile->caps.allow); } rcu_read_unlock(); return 0; } static int apparmor_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit) { struct aa_profile *profile; /* cap_capable returns 0 on success, else -EPERM */ int error = cap_capable(cred, ns, cap, audit); if (!error) { profile = aa_cred_profile(cred); if (!unconfined(profile)) error = aa_capable(current, profile, cap, audit); } return error; } /** * common_perm - basic common permission check wrapper fn for paths * @op: operation being checked * @path: path to check permission of (NOT NULL) * @mask: requested permissions mask * @cond: conditional info for the permission request (NOT NULL) * * Returns: %0 else error code if error or permission denied */ static int common_perm(int op, struct path *path, u32 mask, struct path_cond *cond) { struct aa_profile *profile; int error = 0; profile = __aa_current_profile(); if (!unconfined(profile)) error = aa_path_perm(op, profile, path, 0, mask, cond); return error; } /** * common_perm_dir_dentry - common permission wrapper when path is dir, dentry * @op: operation being checked * @dir: directory of the dentry (NOT NULL) * @dentry: dentry to check (NOT NULL) * @mask: requested permissions mask * @cond: conditional info for the permission request (NOT NULL) * * Returns: %0 else error code if error or permission denied */ static int common_perm_dir_dentry(int op, struct path *dir, struct dentry *dentry, u32 mask, struct path_cond *cond) { struct path path = { dir->mnt, dentry }; return common_perm(op, &path, mask, cond); } /** * common_perm_mnt_dentry - common permission wrapper when mnt, dentry * @op: operation being checked * @mnt: mount point of dentry (NOT NULL) * @dentry: dentry to check (NOT NULL) * @mask: requested permissions mask * * Returns: %0 else error code if error or permission denied */ static int common_perm_mnt_dentry(int op, struct vfsmount *mnt, struct dentry *dentry, u32 mask) { struct path path = { mnt, dentry }; struct path_cond cond = { dentry->d_inode->i_uid, dentry->d_inode->i_mode }; return common_perm(op, &path, mask, &cond); } /** * common_perm_rm - common permission wrapper for operations doing rm * @op: operation being checked * @dir: directory that the dentry is in (NOT NULL) * @dentry: dentry being rm'd (NOT NULL) * @mask: requested permission mask * * Returns: %0 else error code if error or permission denied */ static int common_perm_rm(int op, struct path *dir, struct dentry *dentry, u32 mask) { struct inode *inode = dentry->d_inode; struct path_cond cond = { }; if (!inode || !dir->mnt || !mediated_filesystem(inode)) return 0; cond.uid = inode->i_uid; cond.mode = inode->i_mode; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); } /** * common_perm_create - common permission wrapper for operations doing create * @op: operation being checked * @dir: directory that dentry will be created in (NOT NULL) * @dentry: dentry to create (NOT NULL) * @mask: request permission mask * @mode: created file mode * * Returns: %0 else error code if error or permission denied */ static int common_perm_create(int op, struct path *dir, struct dentry *dentry, u32 mask, umode_t mode) { struct path_cond cond = { current_fsuid(), mode }; if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode)) return 0; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); } static int apparmor_path_unlink(struct path *dir, struct dentry *dentry) { return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE); } static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode) { return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE, S_IFDIR); } static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry) { return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE); } static int apparmor_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode); } static int apparmor_path_truncate(struct path *path) { struct path_cond cond = { path->dentry->d_inode->i_uid, path->dentry->d_inode->i_mode }; if (!path->mnt || !mediated_filesystem(path->dentry->d_inode)) return 0; return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE, &cond); } static int apparmor_path_symlink(struct path *dir, struct dentry *dentry, const char *old_name) { return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE, S_IFLNK); } static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct aa_profile *profile; int error = 0; if (!mediated_filesystem(old_dentry->d_inode)) return 0; profile = aa_current_profile(); if (!unconfined(profile)) error = aa_path_link(profile, old_dentry, new_dir, new_dentry); return error; } static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct aa_profile *profile; int error = 0; if (!mediated_filesystem(old_dentry->d_inode)) return 0; profile = aa_current_profile(); if (!unconfined(profile)) { struct path old_path = { old_dir->mnt, old_dentry }; struct path new_path = { new_dir->mnt, new_dentry }; struct path_cond cond = { old_dentry->d_inode->i_uid, old_dentry->d_inode->i_mode }; error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0, MAY_READ | AA_MAY_META_READ | MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_DELETE, &cond); if (!error) error = aa_path_perm(OP_RENAME_DEST, profile, &new_path, 0, MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CREATE, &cond); } return error; } static int apparmor_path_chmod(struct path *path, umode_t mode) { if (!mediated_filesystem(path->dentry->d_inode)) return 0; return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); } static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid) { struct path_cond cond = { path->dentry->d_inode->i_uid, path->dentry->d_inode->i_mode }; if (!mediated_filesystem(path->dentry->d_inode)) return 0; return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond); } static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { if (!mediated_filesystem(dentry->d_inode)) return 0; return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry, AA_MAY_META_READ); } static int apparmor_dentry_open(struct file *file, const struct cred *cred) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile; int error = 0; if (!mediated_filesystem(file->f_path.dentry->d_inode)) return 0; /* If in exec, permission is handled by bprm hooks. * Cache permissions granted by the previous exec check, with * implicit read and executable mmap which are required to * actually execute the image. */ if (current->in_execve) { fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP; return 0; } profile = aa_cred_profile(cred); if (!unconfined(profile)) { struct inode *inode = file->f_path.dentry->d_inode; struct path_cond cond = { inode->i_uid, inode->i_mode }; error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0, aa_map_file_to_perms(file), &cond); /* todo cache full allowed permissions set and state */ fcxt->allow = aa_map_file_to_perms(file); } return error; } static int apparmor_file_alloc_security(struct file *file) { /* freed by apparmor_file_free_security */ file->f_security = aa_alloc_file_context(GFP_KERNEL); if (!file->f_security) return -ENOMEM; return 0; } static void apparmor_file_free_security(struct file *file) { struct aa_file_cxt *cxt = file->f_security; aa_free_file_context(cxt); } static int common_file_perm(int op, struct file *file, u32 mask) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred); int error = 0; BUG_ON(!fprofile); if (!file->f_path.mnt || !mediated_filesystem(file->f_path.dentry->d_inode)) return 0; profile = __aa_current_profile(); /* revalidate access, if task is unconfined, or the cached cred * doesn't match or if the request is for more permissions than * was granted. * * Note: the test for !unconfined(fprofile) is to handle file * delegation from unconfined tasks */ if (!unconfined(profile) && !unconfined(fprofile) && ((fprofile != profile) || (mask & ~fcxt->allow))) error = aa_file_perm(op, profile, file, mask); return error; } static int apparmor_file_permission(struct file *file, int mask) { return common_file_perm(OP_FPERM, file, mask); } static int apparmor_file_lock(struct file *file, unsigned int cmd) { u32 mask = AA_MAY_LOCK; if (cmd == F_WRLCK) mask |= MAY_WRITE; return common_file_perm(OP_FLOCK, file, mask); } static int common_mmap(int op, struct file *file, unsigned long prot, unsigned long flags) { struct dentry *dentry; int mask = 0; if (!file || !file->f_security) return 0; if (prot & PROT_READ) mask |= MAY_READ; /* * Private mappings don't require write perms since they don't * write back to the files */ if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE)) mask |= MAY_WRITE; if (prot & PROT_EXEC) mask |= AA_EXEC_MMAP; dentry = file->f_path.dentry; return common_file_perm(op, file, mask); } static int apparmor_file_mmap(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags, unsigned long addr, unsigned long addr_only) { int rc = 0; /* do DAC check */ rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); if (rc || addr_only) return rc; return common_mmap(OP_FMMAP, file, prot, flags); } static int apparmor_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return common_mmap(OP_FMPROT, vma->vm_file, prot, !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0); } static int apparmor_getprocattr(struct task_struct *task, char *name, char **value) { int error = -ENOENT; struct aa_profile *profile; /* released below */ const struct cred *cred = get_task_cred(task); struct aa_task_cxt *cxt = cred->security; profile = aa_cred_profile(cred); if (strcmp(name, "current") == 0) error = aa_getprocattr(aa_newest_version(cxt->profile), value); else if (strcmp(name, "prev") == 0 && cxt->previous) error = aa_getprocattr(aa_newest_version(cxt->previous), value); else if (strcmp(name, "exec") == 0 && cxt->onexec) error = aa_getprocattr(aa_newest_version(cxt->onexec), value); else error = -EINVAL; put_cred(cred); return error; } static int apparmor_setprocattr(struct task_struct *task, char *name, void *value, size_t size) { char *command, *args = value; size_t arg_size; int error; if (size == 0) return -EINVAL; /* args points to a PAGE_SIZE buffer, AppArmor requires that * the buffer must be null terminated or have size <= PAGE_SIZE -1 * so that AppArmor can null terminate them */ if (args[size - 1] != '\0') { if (size == PAGE_SIZE) return -EINVAL; args[size] = '\0'; } /* task can only write its own attributes */ if (current != task) return -EACCES; args = value; args = strim(args); command = strsep(&args, " "); if (!args) return -EINVAL; args = skip_spaces(args); if (!*args) return -EINVAL; arg_size = size - (args - (char *) value); if (strcmp(name, "current") == 0) { if (strcmp(command, "changehat") == 0) { error = aa_setprocattr_changehat(args, arg_size, !AA_DO_TEST); } else if (strcmp(command, "permhat") == 0) { error = aa_setprocattr_changehat(args, arg_size, AA_DO_TEST); } else if (strcmp(command, "changeprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, !AA_DO_TEST); } else if (strcmp(command, "permprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, AA_DO_TEST); } else if (strcmp(command, "permipc") == 0) { error = aa_setprocattr_permipc(args); } else { struct common_audit_data sa; struct apparmor_audit_data aad = {0,}; COMMON_AUDIT_DATA_INIT(&sa, NONE); sa.aad = &aad; aad.op = OP_SETPROCATTR; aad.info = name; aad.error = -EINVAL; return aa_audit(AUDIT_APPARMOR_DENIED, __aa_current_profile(), GFP_KERNEL, &sa, NULL); } } else if (strcmp(name, "exec") == 0) { error = aa_setprocattr_changeprofile(args, AA_ONEXEC, !AA_DO_TEST); } else { /* only support the "current" and "exec" process attributes */ return -EINVAL; } if (!error) error = size; return error; } static int apparmor_task_setrlimit(struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { struct aa_profile *profile = __aa_current_profile(); int error = 0; if (!unconfined(profile)) error = aa_task_setrlimit(profile, task, resource, new_rlim); return error; } static struct security_operations apparmor_ops = { .name = "apparmor", .ptrace_access_check = apparmor_ptrace_access_check, .ptrace_traceme = apparmor_ptrace_traceme, .capget = apparmor_capget, .capable = apparmor_capable, .path_link = apparmor_path_link, .path_unlink = apparmor_path_unlink, .path_symlink = apparmor_path_symlink, .path_mkdir = apparmor_path_mkdir, .path_rmdir = apparmor_path_rmdir, .path_mknod = apparmor_path_mknod, .path_rename = apparmor_path_rename, .path_chmod = apparmor_path_chmod, .path_chown = apparmor_path_chown, .path_truncate = apparmor_path_truncate, .dentry_open = apparmor_dentry_open, .inode_getattr = apparmor_inode_getattr, .file_permission = apparmor_file_permission, .file_alloc_security = apparmor_file_alloc_security, .file_free_security = apparmor_file_free_security, .file_mmap = apparmor_file_mmap, .file_mprotect = apparmor_file_mprotect, .file_lock = apparmor_file_lock, .getprocattr = apparmor_getprocattr, .setprocattr = apparmor_setprocattr, .cred_alloc_blank = apparmor_cred_alloc_blank, .cred_free = apparmor_cred_free, .cred_prepare = apparmor_cred_prepare, .cred_transfer = apparmor_cred_transfer, .bprm_set_creds = apparmor_bprm_set_creds, .bprm_committing_creds = apparmor_bprm_committing_creds, .bprm_committed_creds = apparmor_bprm_committed_creds, .bprm_secureexec = apparmor_bprm_secureexec, .task_setrlimit = apparmor_task_setrlimit, }; /* * AppArmor sysfs module parameters */ static int param_set_aabool(const char *val, const struct kernel_param *kp); static int param_get_aabool(char *buffer, const struct kernel_param *kp); #define param_check_aabool param_check_bool static struct kernel_param_ops param_ops_aabool = { .set = param_set_aabool, .get = param_get_aabool }; static int param_set_aauint(const char *val, const struct kernel_param *kp); static int param_get_aauint(char *buffer, const struct kernel_param *kp); #define param_check_aauint param_check_uint static struct kernel_param_ops param_ops_aauint = { .set = param_set_aauint, .get = param_get_aauint }; static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp); static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp); #define param_check_aalockpolicy param_check_bool static struct kernel_param_ops param_ops_aalockpolicy = { .set = param_set_aalockpolicy, .get = param_get_aalockpolicy }; static int param_set_audit(const char *val, struct kernel_param *kp); static int param_get_audit(char *buffer, struct kernel_param *kp); static int param_set_mode(const char *val, struct kernel_param *kp); static int param_get_mode(char *buffer, struct kernel_param *kp); /* Flag values, also controllable via /sys/module/apparmor/parameters * We define special types as we want to do additional mediation. */ /* AppArmor global enforcement switch - complain, enforce, kill */ enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE; module_param_call(mode, param_set_mode, param_get_mode, &aa_g_profile_mode, S_IRUSR | S_IWUSR); /* Debug mode */ bool aa_g_debug; module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); /* Audit mode */ enum audit_mode aa_g_audit; module_param_call(audit, param_set_audit, param_get_audit, &aa_g_audit, S_IRUSR | S_IWUSR); /* Determines if audit header is included in audited messages. This * provides more context if the audit daemon is not running */ bool aa_g_audit_header = 1; module_param_named(audit_header, aa_g_audit_header, aabool, S_IRUSR | S_IWUSR); /* lock out loading/removal of policy * TODO: add in at boot loading of policy, which is the only way to * load policy, if lock_policy is set */ bool aa_g_lock_policy; module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy, S_IRUSR | S_IWUSR); /* Syscall logging mode */ bool aa_g_logsyscall; module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR); /* Maximum pathname length before accesses will start getting rejected */ unsigned int aa_g_path_max = 2 * PATH_MAX; module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); /* Determines how paranoid loading of policy is and how much verification * on the loaded policy is done. */ bool aa_g_paranoid_load = 1; module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUSR | S_IWUSR); /* Boot time disable flag */ static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE; module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR); static int __init apparmor_enabled_setup(char *str) { unsigned long enabled; int error = strict_strtoul(str, 0, &enabled); if (!error) apparmor_enabled = enabled ? 1 : 0; return 1; } __setup("apparmor=", apparmor_enabled_setup); /* set global flag turning off the ability to load policy */ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (aa_g_lock_policy) return -EACCES; return param_set_bool(val, kp); } static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aabool(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_set_bool(val, kp); } static int param_get_aabool(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aauint(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_set_uint(val, kp); } static int param_get_aauint(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_uint(buffer, kp); } static int param_get_audit(char *buffer, struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]); } static int param_set_audit(const char *val, struct kernel_param *kp) { int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; if (!val) return -EINVAL; for (i = 0; i < AUDIT_MAX_INDEX; i++) { if (strcmp(val, audit_mode_names[i]) == 0) { aa_g_audit = i; return 0; } } return -EINVAL; } static int param_get_mode(char *buffer, struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]); } static int param_set_mode(const char *val, struct kernel_param *kp) { int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; if (!val) return -EINVAL; for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) { if (strcmp(val, profile_mode_names[i]) == 0) { aa_g_profile_mode = i; return 0; } } return -EINVAL; } /* * AppArmor init functions */ /** * set_init_cxt - set a task context and profile on the first task. * * TODO: allow setting an alternate profile than unconfined */ static int __init set_init_cxt(void) { struct cred *cred = (struct cred *)current->real_cred; struct aa_task_cxt *cxt; cxt = aa_alloc_task_context(GFP_KERNEL); if (!cxt) return -ENOMEM; cxt->profile = aa_get_profile(root_ns->unconfined); cred->security = cxt; return 0; } static int __init apparmor_init(void) { int error; if (!apparmor_enabled || !security_module_enable(&apparmor_ops)) { aa_info_message("AppArmor disabled by boot time parameter"); apparmor_enabled = 0; return 0; } error = aa_alloc_root_ns(); if (error) { AA_ERROR("Unable to allocate default profile namespace\n"); goto alloc_out; } error = set_init_cxt(); if (error) { AA_ERROR("Failed to set context on init task\n"); goto register_security_out; } error = register_security(&apparmor_ops); if (error) { AA_ERROR("Unable to register AppArmor\n"); goto set_init_cxt_out; } /* Report that AppArmor successfully initialized */ apparmor_initialized = 1; if (aa_g_profile_mode == APPARMOR_COMPLAIN) aa_info_message("AppArmor initialized: complain mode enabled"); else if (aa_g_profile_mode == APPARMOR_KILL) aa_info_message("AppArmor initialized: kill mode enabled"); else aa_info_message("AppArmor initialized"); return error; set_init_cxt_out: aa_free_task_context(current->real_cred->security); register_security_out: aa_free_root_ns(); alloc_out: aa_destroy_aafs(); apparmor_enabled = 0; return error; } security_initcall(apparmor_init);
gpl-2.0
iamroot11c/kernel_source
drivers/media/rc/keymaps/rc-dib0700-rc5.c
7313
6113
/* rc-dvb0700-big.c - Keytable for devices in dvb0700 * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * TODO: This table is a real mess, as it merges RC codes from several * devices into a big table. It also has both RC-5 and NEC codes inside. * It should be broken into small tables, and the protocols should properly * be indentificated. * * The table were imported from dib0700_devices.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table dib0700_rc5_table[] = { /* Key codes for the tiny Pinnacle remote*/ { 0x0700, KEY_MUTE }, { 0x0701, KEY_MENU }, /* Pinnacle logo */ { 0x0739, KEY_POWER }, { 0x0703, KEY_VOLUMEUP }, { 0x0709, KEY_VOLUMEDOWN }, { 0x0706, KEY_CHANNELUP }, { 0x070c, KEY_CHANNELDOWN }, { 0x070f, KEY_1 }, { 0x0715, KEY_2 }, { 0x0710, KEY_3 }, { 0x0718, KEY_4 }, { 0x071b, KEY_5 }, { 0x071e, KEY_6 }, { 0x0711, KEY_7 }, { 0x0721, KEY_8 }, { 0x0712, KEY_9 }, { 0x0727, KEY_0 }, { 0x0724, KEY_SCREEN }, /* 'Square' key */ { 0x072a, KEY_TEXT }, /* 'T' key */ { 0x072d, KEY_REWIND }, { 0x0730, KEY_PLAY }, { 0x0733, KEY_FASTFORWARD }, { 0x0736, KEY_RECORD }, { 0x073c, KEY_STOP }, { 0x073f, KEY_CANCEL }, /* '?' key */ /* Key codes for the Terratec Cinergy DT XS Diversity, similar to cinergyT2.c */ { 0xeb01, KEY_POWER }, { 0xeb02, KEY_1 }, { 0xeb03, KEY_2 }, { 0xeb04, KEY_3 }, { 0xeb05, KEY_4 }, { 0xeb06, KEY_5 }, { 0xeb07, KEY_6 }, { 0xeb08, KEY_7 }, { 0xeb09, KEY_8 }, { 0xeb0a, KEY_9 }, { 0xeb0b, KEY_VIDEO }, { 0xeb0c, KEY_0 }, { 0xeb0d, KEY_REFRESH }, { 0xeb0f, KEY_EPG }, { 0xeb10, KEY_UP }, { 0xeb11, KEY_LEFT }, { 0xeb12, KEY_OK }, { 0xeb13, KEY_RIGHT }, { 0xeb14, KEY_DOWN }, { 0xeb16, KEY_INFO }, { 0xeb17, KEY_RED }, { 0xeb18, KEY_GREEN }, { 0xeb19, KEY_YELLOW }, { 0xeb1a, KEY_BLUE }, { 0xeb1b, KEY_CHANNELUP }, { 0xeb1c, KEY_VOLUMEUP }, { 0xeb1d, KEY_MUTE }, { 0xeb1e, KEY_VOLUMEDOWN }, { 0xeb1f, KEY_CHANNELDOWN }, { 0xeb40, KEY_PAUSE }, { 0xeb41, KEY_HOME }, { 0xeb42, KEY_MENU }, /* DVD Menu */ { 0xeb43, KEY_SUBTITLE }, { 0xeb44, KEY_TEXT }, /* Teletext */ { 0xeb45, KEY_DELETE }, { 0xeb46, KEY_TV }, { 0xeb47, KEY_DVD }, { 0xeb48, KEY_STOP }, { 0xeb49, KEY_VIDEO }, { 0xeb4a, KEY_AUDIO }, /* Music */ { 0xeb4b, KEY_SCREEN }, /* Pic */ { 0xeb4c, KEY_PLAY }, { 0xeb4d, KEY_BACK }, { 0xeb4e, KEY_REWIND }, { 0xeb4f, KEY_FASTFORWARD }, { 0xeb54, KEY_PREVIOUS }, { 0xeb58, KEY_RECORD }, { 0xeb5c, KEY_NEXT }, /* Key codes for the Haupauge WinTV Nova-TD, copied from nova-t-usb2.c (Nova-T USB2) */ { 0x1e00, KEY_0 }, { 0x1e01, KEY_1 }, { 0x1e02, KEY_2 }, { 0x1e03, KEY_3 }, { 0x1e04, KEY_4 }, { 0x1e05, KEY_5 }, { 0x1e06, KEY_6 }, { 0x1e07, KEY_7 }, { 0x1e08, KEY_8 }, { 0x1e09, KEY_9 }, { 0x1e0a, KEY_KPASTERISK }, { 0x1e0b, KEY_RED }, { 0x1e0c, KEY_RADIO }, { 0x1e0d, KEY_MENU }, { 0x1e0e, KEY_GRAVE }, /* # */ { 0x1e0f, KEY_MUTE }, { 0x1e10, KEY_VOLUMEUP }, { 0x1e11, KEY_VOLUMEDOWN }, { 0x1e12, KEY_CHANNEL }, { 0x1e14, KEY_UP }, { 0x1e15, KEY_DOWN }, { 0x1e16, KEY_LEFT }, { 0x1e17, KEY_RIGHT }, { 0x1e18, KEY_VIDEO }, { 0x1e19, KEY_AUDIO }, { 0x1e1a, KEY_MEDIA }, { 0x1e1b, KEY_EPG }, { 0x1e1c, KEY_TV }, { 0x1e1e, KEY_NEXT }, { 0x1e1f, KEY_BACK }, { 0x1e20, KEY_CHANNELUP }, { 0x1e21, KEY_CHANNELDOWN }, { 0x1e24, KEY_LAST }, /* Skip backwards */ { 0x1e25, KEY_OK }, { 0x1e29, KEY_BLUE}, { 0x1e2e, KEY_GREEN }, { 0x1e30, KEY_PAUSE }, { 0x1e32, KEY_REWIND }, { 0x1e34, KEY_FASTFORWARD }, { 0x1e35, KEY_PLAY }, { 0x1e36, KEY_STOP }, { 0x1e37, KEY_RECORD }, { 0x1e38, KEY_YELLOW }, { 0x1e3b, KEY_GOTO }, { 0x1e3d, KEY_POWER }, /* Key codes for the Leadtek Winfast DTV Dongle */ { 0x0042, KEY_POWER }, { 0x077c, KEY_TUNER }, { 0x0f4e, KEY_PRINT }, /* PREVIEW */ { 0x0840, KEY_SCREEN }, /* full screen toggle*/ { 0x0f71, KEY_DOT }, /* frequency */ { 0x0743, KEY_0 }, { 0x0c41, KEY_1 }, { 0x0443, KEY_2 }, { 0x0b7f, KEY_3 }, { 0x0e41, KEY_4 }, { 0x0643, KEY_5 }, { 0x097f, KEY_6 }, { 0x0d7e, KEY_7 }, { 0x057c, KEY_8 }, { 0x0a40, KEY_9 }, { 0x0e4e, KEY_CLEAR }, { 0x047c, KEY_CHANNEL }, /* show channel number */ { 0x0f41, KEY_LAST }, /* recall */ { 0x0342, KEY_MUTE }, { 0x064c, KEY_RESERVED }, /* PIP button*/ { 0x0172, KEY_SHUFFLE }, /* SNAPSHOT */ { 0x0c4e, KEY_PLAYPAUSE }, /* TIMESHIFT */ { 0x0b70, KEY_RECORD }, { 0x037d, KEY_VOLUMEUP }, { 0x017d, KEY_VOLUMEDOWN }, { 0x0242, KEY_CHANNELUP }, { 0x007d, KEY_CHANNELDOWN }, /* Key codes for Nova-TD "credit card" remote control. */ { 0x1d00, KEY_0 }, { 0x1d01, KEY_1 }, { 0x1d02, KEY_2 }, { 0x1d03, KEY_3 }, { 0x1d04, KEY_4 }, { 0x1d05, KEY_5 }, { 0x1d06, KEY_6 }, { 0x1d07, KEY_7 }, { 0x1d08, KEY_8 }, { 0x1d09, KEY_9 }, { 0x1d0a, KEY_TEXT }, { 0x1d0d, KEY_MENU }, { 0x1d0f, KEY_MUTE }, { 0x1d10, KEY_VOLUMEUP }, { 0x1d11, KEY_VOLUMEDOWN }, { 0x1d12, KEY_CHANNEL }, { 0x1d14, KEY_UP }, { 0x1d15, KEY_DOWN }, { 0x1d16, KEY_LEFT }, { 0x1d17, KEY_RIGHT }, { 0x1d1c, KEY_TV }, { 0x1d1e, KEY_NEXT }, { 0x1d1f, KEY_BACK }, { 0x1d20, KEY_CHANNELUP }, { 0x1d21, KEY_CHANNELDOWN }, { 0x1d24, KEY_LAST }, { 0x1d25, KEY_OK }, { 0x1d30, KEY_PAUSE }, { 0x1d32, KEY_REWIND }, { 0x1d34, KEY_FASTFORWARD }, { 0x1d35, KEY_PLAY }, { 0x1d36, KEY_STOP }, { 0x1d37, KEY_RECORD }, { 0x1d3b, KEY_GOTO }, { 0x1d3d, KEY_POWER }, }; static struct rc_map_list dib0700_rc5_map = { .map = { .scan = dib0700_rc5_table, .size = ARRAY_SIZE(dib0700_rc5_table), .rc_type = RC_TYPE_RC5, .name = RC_MAP_DIB0700_RC5_TABLE, } }; static int __init init_rc_map(void) { return rc_map_register(&dib0700_rc5_map); } static void __exit exit_rc_map(void) { rc_map_unregister(&dib0700_rc5_map); } module_init(init_rc_map) module_exit(exit_rc_map) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
ljalves/alce68k
drivers/input/input-polldev.c
7569
6355
/* * Generic implementation of a polled input device * Copyright (c) 2007 Dmitry Torokhov * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/module.h> #include <linux/input-polldev.h> MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); MODULE_DESCRIPTION("Generic implementation of a polled input device"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1"); static void input_polldev_queue_work(struct input_polled_dev *dev) { unsigned long delay; delay = msecs_to_jiffies(dev->poll_interval); if (delay >= HZ) delay = round_jiffies_relative(delay); queue_delayed_work(system_freezable_wq, &dev->work, delay); } static void input_polled_device_work(struct work_struct *work) { struct input_polled_dev *dev = container_of(work, struct input_polled_dev, work.work); dev->poll(dev); input_polldev_queue_work(dev); } static int input_open_polled_device(struct input_dev *input) { struct input_polled_dev *dev = input_get_drvdata(input); if (dev->open) dev->open(dev); /* Only start polling if polling is enabled */ if (dev->poll_interval > 0) { dev->poll(dev); input_polldev_queue_work(dev); } return 0; } static void input_close_polled_device(struct input_dev *input) { struct input_polled_dev *dev = input_get_drvdata(input); cancel_delayed_work_sync(&dev->work); if (dev->close) dev->close(dev); } /* SYSFS interface */ static ssize_t input_polldev_get_poll(struct device *dev, struct device_attribute *attr, char *buf) { struct input_polled_dev *polldev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", polldev->poll_interval); } static ssize_t input_polldev_set_poll(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct input_polled_dev *polldev = dev_get_drvdata(dev); struct input_dev *input = polldev->input; unsigned int interval; int err; err = kstrtouint(buf, 0, &interval); if (err) return err; if (interval < polldev->poll_interval_min) return -EINVAL; if (interval > polldev->poll_interval_max) return -EINVAL; mutex_lock(&input->mutex); polldev->poll_interval = interval; if (input->users) { cancel_delayed_work_sync(&polldev->work); if (polldev->poll_interval > 0) input_polldev_queue_work(polldev); } mutex_unlock(&input->mutex); return count; } static DEVICE_ATTR(poll, S_IRUGO | S_IWUSR, input_polldev_get_poll, input_polldev_set_poll); static ssize_t input_polldev_get_max(struct device *dev, struct device_attribute *attr, char *buf) { struct input_polled_dev *polldev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", polldev->poll_interval_max); } static DEVICE_ATTR(max, S_IRUGO, input_polldev_get_max, NULL); static ssize_t input_polldev_get_min(struct device *dev, struct device_attribute *attr, char *buf) { struct input_polled_dev *polldev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", polldev->poll_interval_min); } static DEVICE_ATTR(min, S_IRUGO, input_polldev_get_min, NULL); static struct attribute *sysfs_attrs[] = { &dev_attr_poll.attr, &dev_attr_max.attr, &dev_attr_min.attr, NULL }; static struct attribute_group input_polldev_attribute_group = { .attrs = sysfs_attrs }; /** * input_allocate_polled_device - allocate memory for polled device * * The function allocates memory for a polled device and also * for an input device associated with this polled device. */ struct input_polled_dev *input_allocate_polled_device(void) { struct input_polled_dev *dev; dev = kzalloc(sizeof(struct input_polled_dev), GFP_KERNEL); if (!dev) return NULL; dev->input = input_allocate_device(); if (!dev->input) { kfree(dev); return NULL; } return dev; } EXPORT_SYMBOL(input_allocate_polled_device); /** * input_free_polled_device - free memory allocated for polled device * @dev: device to free * * The function frees memory allocated for polling device and drops * reference to the associated input device. */ void input_free_polled_device(struct input_polled_dev *dev) { if (dev) { input_free_device(dev->input); kfree(dev); } } EXPORT_SYMBOL(input_free_polled_device); /** * input_register_polled_device - register polled device * @dev: device to register * * The function registers previously initialized polled input device * with input layer. The device should be allocated with call to * input_allocate_polled_device(). Callers should also set up poll() * method and set up capabilities (id, name, phys, bits) of the * corresponding input_dev structure. */ int input_register_polled_device(struct input_polled_dev *dev) { struct input_dev *input = dev->input; int error; input_set_drvdata(input, dev); INIT_DELAYED_WORK(&dev->work, input_polled_device_work); if (!dev->poll_interval) dev->poll_interval = 500; if (!dev->poll_interval_max) dev->poll_interval_max = dev->poll_interval; input->open = input_open_polled_device; input->close = input_close_polled_device; error = input_register_device(input); if (error) return error; error = sysfs_create_group(&input->dev.kobj, &input_polldev_attribute_group); if (error) { input_unregister_device(input); return error; } /* * Take extra reference to the underlying input device so * that it survives call to input_unregister_polled_device() * and is deleted only after input_free_polled_device() * has been invoked. This is needed to ease task of freeing * sparse keymaps. */ input_get_device(input); return 0; } EXPORT_SYMBOL(input_register_polled_device); /** * input_unregister_polled_device - unregister polled device * @dev: device to unregister * * The function unregisters previously registered polled input * device from input layer. Polling is stopped and device is * ready to be freed with call to input_free_polled_device(). */ void input_unregister_polled_device(struct input_polled_dev *dev) { sysfs_remove_group(&dev->input->dev.kobj, &input_polldev_attribute_group); input_unregister_device(dev->input); } EXPORT_SYMBOL(input_unregister_polled_device);
gpl-2.0
wan5xp/android_kernel_sony_u8500
drivers/video/sunxvr500.c
9105
11532
/* sunxvr500.c: Sun 3DLABS XVR-500 Expert3D driver for sparc64 systems * * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fb.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/of_device.h> #include <asm/io.h> /* XXX This device has a 'dev-comm' property which aparently is * XXX a pointer into the openfirmware's address space which is * XXX a shared area the kernel driver can use to keep OBP * XXX informed about the current resolution setting. The idea * XXX is that the kernel can change resolutions, and as long * XXX as the values in the 'dev-comm' area are accurate then * XXX OBP can still render text properly to the console. * XXX * XXX I'm still working out the layout of this and whether there * XXX are any signatures we need to look for etc. */ struct e3d_info { struct fb_info *info; struct pci_dev *pdev; spinlock_t lock; char __iomem *fb_base; unsigned long fb_base_phys; unsigned long fb8_buf_diff; unsigned long regs_base_phys; void __iomem *ramdac; struct device_node *of_node; unsigned int width; unsigned int height; unsigned int depth; unsigned int fb_size; u32 fb_base_reg; u32 fb8_0_off; u32 fb8_1_off; u32 pseudo_palette[16]; }; static int __devinit e3d_get_props(struct e3d_info *ep) { ep->width = of_getintprop_default(ep->of_node, "width", 0); ep->height = of_getintprop_default(ep->of_node, "height", 0); ep->depth = of_getintprop_default(ep->of_node, "depth", 8); if (!ep->width || !ep->height) { printk(KERN_ERR "e3d: Critical properties missing for %s\n", pci_name(ep->pdev)); return -EINVAL; } return 0; } /* My XVR-500 comes up, at 1280x768 and a FB base register value of * 0x04000000, the following video layout register values: * * RAMDAC_VID_WH 0x03ff04ff * RAMDAC_VID_CFG 0x1a0b0088 * RAMDAC_VID_32FB_0 0x04000000 * RAMDAC_VID_32FB_1 0x04800000 * RAMDAC_VID_8FB_0 0x05000000 * RAMDAC_VID_8FB_1 0x05200000 * RAMDAC_VID_XXXFB 0x05400000 * RAMDAC_VID_YYYFB 0x05c00000 * RAMDAC_VID_ZZZFB 0x05e00000 */ /* Video layout registers */ #define RAMDAC_VID_WH 0x00000070UL /* (height-1)<<16 | (width-1) */ #define RAMDAC_VID_CFG 0x00000074UL /* 0x1a000088|(linesz_log2<<16) */ #define RAMDAC_VID_32FB_0 0x00000078UL /* PCI base 32bpp FB buffer 0 */ #define RAMDAC_VID_32FB_1 0x0000007cUL /* PCI base 32bpp FB buffer 1 */ #define RAMDAC_VID_8FB_0 0x00000080UL /* PCI base 8bpp FB buffer 0 */ #define RAMDAC_VID_8FB_1 0x00000084UL /* PCI base 8bpp FB buffer 1 */ #define RAMDAC_VID_XXXFB 0x00000088UL /* PCI base of XXX FB */ #define RAMDAC_VID_YYYFB 0x0000008cUL /* PCI base of YYY FB */ #define RAMDAC_VID_ZZZFB 0x00000090UL /* PCI base of ZZZ FB */ /* CLUT registers */ #define RAMDAC_INDEX 0x000000bcUL #define RAMDAC_DATA 0x000000c0UL static void e3d_clut_write(struct e3d_info *ep, int index, u32 val) { void __iomem *ramdac = ep->ramdac; unsigned long flags; spin_lock_irqsave(&ep->lock, flags); writel(index, ramdac + RAMDAC_INDEX); writel(val, ramdac + RAMDAC_DATA); spin_unlock_irqrestore(&ep->lock, flags); } static int e3d_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct e3d_info *ep = info->par; u32 red_8, green_8, blue_8; u32 red_10, green_10, blue_10; u32 value; if (regno >= 256) return 1; red_8 = red >> 8; green_8 = green >> 8; blue_8 = blue >> 8; value = (blue_8 << 24) | (green_8 << 16) | (red_8 << 8); if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) ((u32 *)info->pseudo_palette)[regno] = value; red_10 = red >> 6; green_10 = green >> 6; blue_10 = blue >> 6; value = (blue_10 << 20) | (green_10 << 10) | (red_10 << 0); e3d_clut_write(ep, regno, value); return 0; } /* XXX This is a bit of a hack. I can't figure out exactly how the * XXX two 8bpp areas of the framebuffer work. I imagine there is * XXX a WID attribute somewhere else in the framebuffer which tells * XXX the ramdac which of the two 8bpp framebuffer regions to take * XXX the pixel from. So, for now, render into both regions to make * XXX sure the pixel shows up. */ static void e3d_imageblit(struct fb_info *info, const struct fb_image *image) { struct e3d_info *ep = info->par; unsigned long flags; spin_lock_irqsave(&ep->lock, flags); cfb_imageblit(info, image); info->screen_base += ep->fb8_buf_diff; cfb_imageblit(info, image); info->screen_base -= ep->fb8_buf_diff; spin_unlock_irqrestore(&ep->lock, flags); } static void e3d_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct e3d_info *ep = info->par; unsigned long flags; spin_lock_irqsave(&ep->lock, flags); cfb_fillrect(info, rect); info->screen_base += ep->fb8_buf_diff; cfb_fillrect(info, rect); info->screen_base -= ep->fb8_buf_diff; spin_unlock_irqrestore(&ep->lock, flags); } static void e3d_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct e3d_info *ep = info->par; unsigned long flags; spin_lock_irqsave(&ep->lock, flags); cfb_copyarea(info, area); info->screen_base += ep->fb8_buf_diff; cfb_copyarea(info, area); info->screen_base -= ep->fb8_buf_diff; spin_unlock_irqrestore(&ep->lock, flags); } static struct fb_ops e3d_ops = { .owner = THIS_MODULE, .fb_setcolreg = e3d_setcolreg, .fb_fillrect = e3d_fillrect, .fb_copyarea = e3d_copyarea, .fb_imageblit = e3d_imageblit, }; static int __devinit e3d_set_fbinfo(struct e3d_info *ep) { struct fb_info *info = ep->info; struct fb_var_screeninfo *var = &info->var; info->flags = FBINFO_DEFAULT; info->fbops = &e3d_ops; info->screen_base = ep->fb_base; info->screen_size = ep->fb_size; info->pseudo_palette = ep->pseudo_palette; /* Fill fix common fields */ strlcpy(info->fix.id, "e3d", sizeof(info->fix.id)); info->fix.smem_start = ep->fb_base_phys; info->fix.smem_len = ep->fb_size; info->fix.type = FB_TYPE_PACKED_PIXELS; if (ep->depth == 32 || ep->depth == 24) info->fix.visual = FB_VISUAL_TRUECOLOR; else info->fix.visual = FB_VISUAL_PSEUDOCOLOR; var->xres = ep->width; var->yres = ep->height; var->xres_virtual = var->xres; var->yres_virtual = var->yres; var->bits_per_pixel = ep->depth; var->red.offset = 8; var->red.length = 8; var->green.offset = 16; var->green.length = 8; var->blue.offset = 24; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; if (fb_alloc_cmap(&info->cmap, 256, 0)) { printk(KERN_ERR "e3d: Cannot allocate color map.\n"); return -ENOMEM; } return 0; } static int __devinit e3d_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device_node *of_node; const char *device_type; struct fb_info *info; struct e3d_info *ep; unsigned int line_length; int err; of_node = pci_device_to_OF_node(pdev); if (!of_node) { printk(KERN_ERR "e3d: Cannot find OF node of %s\n", pci_name(pdev)); return -ENODEV; } device_type = of_get_property(of_node, "device_type", NULL); if (!device_type) { printk(KERN_INFO "e3d: Ignoring secondary output device " "at %s\n", pci_name(pdev)); return -ENODEV; } err = pci_enable_device(pdev); if (err < 0) { printk(KERN_ERR "e3d: Cannot enable PCI device %s\n", pci_name(pdev)); goto err_out; } info = framebuffer_alloc(sizeof(struct e3d_info), &pdev->dev); if (!info) { printk(KERN_ERR "e3d: Cannot allocate fb_info\n"); err = -ENOMEM; goto err_disable; } ep = info->par; ep->info = info; ep->pdev = pdev; spin_lock_init(&ep->lock); ep->of_node = of_node; /* Read the PCI base register of the frame buffer, which we * need in order to interpret the RAMDAC_VID_*FB* values in * the ramdac correctly. */ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &ep->fb_base_reg); ep->fb_base_reg &= PCI_BASE_ADDRESS_MEM_MASK; ep->regs_base_phys = pci_resource_start (pdev, 1); err = pci_request_region(pdev, 1, "e3d regs"); if (err < 0) { printk("e3d: Cannot request region 1 for %s\n", pci_name(pdev)); goto err_release_fb; } ep->ramdac = ioremap(ep->regs_base_phys + 0x8000, 0x1000); if (!ep->ramdac) goto err_release_pci1; ep->fb8_0_off = readl(ep->ramdac + RAMDAC_VID_8FB_0); ep->fb8_0_off -= ep->fb_base_reg; ep->fb8_1_off = readl(ep->ramdac + RAMDAC_VID_8FB_1); ep->fb8_1_off -= ep->fb_base_reg; ep->fb8_buf_diff = ep->fb8_1_off - ep->fb8_0_off; ep->fb_base_phys = pci_resource_start (pdev, 0); ep->fb_base_phys += ep->fb8_0_off; err = pci_request_region(pdev, 0, "e3d framebuffer"); if (err < 0) { printk("e3d: Cannot request region 0 for %s\n", pci_name(pdev)); goto err_unmap_ramdac; } err = e3d_get_props(ep); if (err) goto err_release_pci0; line_length = (readl(ep->ramdac + RAMDAC_VID_CFG) >> 16) & 0xff; line_length = 1 << line_length; switch (ep->depth) { case 8: info->fix.line_length = line_length; break; case 16: info->fix.line_length = line_length * 2; break; case 24: info->fix.line_length = line_length * 3; break; case 32: info->fix.line_length = line_length * 4; break; } ep->fb_size = info->fix.line_length * ep->height; ep->fb_base = ioremap(ep->fb_base_phys, ep->fb_size); if (!ep->fb_base) goto err_release_pci0; err = e3d_set_fbinfo(ep); if (err) goto err_unmap_fb; pci_set_drvdata(pdev, info); printk("e3d: Found device at %s\n", pci_name(pdev)); err = register_framebuffer(info); if (err < 0) { printk(KERN_ERR "e3d: Could not register framebuffer %s\n", pci_name(pdev)); goto err_free_cmap; } return 0; err_free_cmap: fb_dealloc_cmap(&info->cmap); err_unmap_fb: iounmap(ep->fb_base); err_release_pci0: pci_release_region(pdev, 0); err_unmap_ramdac: iounmap(ep->ramdac); err_release_pci1: pci_release_region(pdev, 1); err_release_fb: framebuffer_release(info); err_disable: pci_disable_device(pdev); err_out: return err; } static void __devexit e3d_pci_unregister(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct e3d_info *ep = info->par; unregister_framebuffer(info); iounmap(ep->ramdac); iounmap(ep->fb_base); pci_release_region(pdev, 0); pci_release_region(pdev, 1); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); pci_disable_device(pdev); } static struct pci_device_id e3d_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0), }, { PCI_DEVICE(0x1091, 0x7a0), }, { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a2), }, { .vendor = PCI_VENDOR_ID_3DLABS, .device = PCI_ANY_ID, .subvendor = PCI_VENDOR_ID_3DLABS, .subdevice = 0x0108, }, { .vendor = PCI_VENDOR_ID_3DLABS, .device = PCI_ANY_ID, .subvendor = PCI_VENDOR_ID_3DLABS, .subdevice = 0x0140, }, { .vendor = PCI_VENDOR_ID_3DLABS, .device = PCI_ANY_ID, .subvendor = PCI_VENDOR_ID_3DLABS, .subdevice = 0x1024, }, { 0, } }; static struct pci_driver e3d_driver = { .name = "e3d", .id_table = e3d_pci_table, .probe = e3d_pci_register, .remove = __devexit_p(e3d_pci_unregister), }; static int __init e3d_init(void) { if (fb_get_options("e3d", NULL)) return -ENODEV; return pci_register_driver(&e3d_driver); } static void __exit e3d_exit(void) { pci_unregister_driver(&e3d_driver); } module_init(e3d_init); module_exit(e3d_exit); MODULE_DESCRIPTION("framebuffer driver for Sun XVR-500 graphics"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
Kali-/tf101-kernel
arch/sh/boards/mach-microdev/fdc37c93xapm.c
13969
6415
/* * * Setup for the SMSC FDC37C93xAPM * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * Copyright (C) 2003, 2004 SuperH, Inc. * Copyright (C) 2004, 2005 Paul Mundt * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/err.h> #include <mach/microdev.h> #define SMSC_CONFIG_PORT_ADDR (0x3F0) #define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR #define SMSC_DATA_PORT_ADDR (SMSC_INDEX_PORT_ADDR + 1) #define SMSC_ENTER_CONFIG_KEY 0x55 #define SMSC_EXIT_CONFIG_KEY 0xaa #define SMCS_LOGICAL_DEV_INDEX 0x07 /* Logical Device Number */ #define SMSC_DEVICE_ID_INDEX 0x20 /* Device ID */ #define SMSC_DEVICE_REV_INDEX 0x21 /* Device Revision */ #define SMSC_ACTIVATE_INDEX 0x30 /* Activate */ #define SMSC_PRIMARY_BASE_INDEX 0x60 /* Primary Base Address */ #define SMSC_SECONDARY_BASE_INDEX 0x62 /* Secondary Base Address */ #define SMSC_PRIMARY_INT_INDEX 0x70 /* Primary Interrupt Select */ #define SMSC_SECONDARY_INT_INDEX 0x72 /* Secondary Interrupt Select */ #define SMSC_HDCS0_INDEX 0xf0 /* HDCS0 Address Decoder */ #define SMSC_HDCS1_INDEX 0xf1 /* HDCS1 Address Decoder */ #define SMSC_IDE1_DEVICE 1 /* IDE #1 logical device */ #define SMSC_IDE2_DEVICE 2 /* IDE #2 logical device */ #define SMSC_PARALLEL_DEVICE 3 /* Parallel Port logical device */ #define SMSC_SERIAL1_DEVICE 4 /* Serial #1 logical device */ #define SMSC_SERIAL2_DEVICE 5 /* Serial #2 logical device */ #define SMSC_KEYBOARD_DEVICE 7 /* Keyboard logical device */ #define SMSC_CONFIG_REGISTERS 8 /* Configuration Registers (Aux I/O) */ #define SMSC_READ_INDEXED(index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ inb(SMSC_DATA_PORT_ADDR); }) #define SMSC_WRITE_INDEXED(val, index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ outb((val), SMSC_DATA_PORT_ADDR); }) #define IDE1_PRIMARY_BASE 0x01f0 /* Task File Registe base for IDE #1 */ #define IDE1_SECONDARY_BASE 0x03f6 /* Miscellaneous AT registers for IDE #1 */ #define IDE2_PRIMARY_BASE 0x0170 /* Task File Registe base for IDE #2 */ #define IDE2_SECONDARY_BASE 0x0376 /* Miscellaneous AT registers for IDE #2 */ #define SERIAL1_PRIMARY_BASE 0x03f8 #define SERIAL2_PRIMARY_BASE 0x02f8 #define MSB(x) ( (x) >> 8 ) #define LSB(x) ( (x) & 0xff ) /* General-Purpose base address on CPU-board FPGA */ #define MICRODEV_FPGA_GP_BASE 0xa6100000ul static int __init smsc_superio_setup(void) { unsigned char devid, devrev; /* Initially the chip is in run state */ /* Put it into configuration state */ outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); /* Read device ID info */ devid = SMSC_READ_INDEXED(SMSC_DEVICE_ID_INDEX); devrev = SMSC_READ_INDEXED(SMSC_DEVICE_REV_INDEX); if ((devid == 0x30) && (devrev == 0x01)) printk("SMSC FDC37C93xAPM SuperIO device detected\n"); else return -ENODEV; /* Select the keyboard device */ SMSC_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_KEYBOARD, SMSC_PRIMARY_INT_INDEX); SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_MOUSE, SMSC_SECONDARY_INT_INDEX); /* Select the Serial #1 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL1, SMSC_PRIMARY_INT_INDEX); /* Select the Serial #2 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL2, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#1 device */ SMSC_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x0c, SMSC_HDCS0_INDEX); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS1_INDEX); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE1, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#2 device */ SMSC_WRITE_INDEXED(SMSC_IDE2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE2, SMSC_PRIMARY_INT_INDEX); /* Select the configuration registers */ SMSC_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, SMCS_LOGICAL_DEV_INDEX); /* enable the appropriate GPIO pins for IDE functionality: * bit[0] In/Out 1==input; 0==output * bit[1] Polarity 1==invert; 0==no invert * bit[2] Int Enb #1 1==Enable Combined IRQ #1; 0==disable * bit[3:4] Function Select 00==original; 01==Alternate Function #1 */ SMSC_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ SMSC_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ SMSC_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ SMSC_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ SMSC_WRITE_INDEXED(0x08, 0xe8); /* GP20 = nIDE2_OE */ /* Exit the configuration state */ outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); return 0; } device_initcall(smsc_superio_setup);
gpl-2.0
penhoi/linux-3.14.56
arch/sh/boards/mach-microdev/fdc37c93xapm.c
13969
6415
/* * * Setup for the SMSC FDC37C93xAPM * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * Copyright (C) 2003, 2004 SuperH, Inc. * Copyright (C) 2004, 2005 Paul Mundt * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/err.h> #include <mach/microdev.h> #define SMSC_CONFIG_PORT_ADDR (0x3F0) #define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR #define SMSC_DATA_PORT_ADDR (SMSC_INDEX_PORT_ADDR + 1) #define SMSC_ENTER_CONFIG_KEY 0x55 #define SMSC_EXIT_CONFIG_KEY 0xaa #define SMCS_LOGICAL_DEV_INDEX 0x07 /* Logical Device Number */ #define SMSC_DEVICE_ID_INDEX 0x20 /* Device ID */ #define SMSC_DEVICE_REV_INDEX 0x21 /* Device Revision */ #define SMSC_ACTIVATE_INDEX 0x30 /* Activate */ #define SMSC_PRIMARY_BASE_INDEX 0x60 /* Primary Base Address */ #define SMSC_SECONDARY_BASE_INDEX 0x62 /* Secondary Base Address */ #define SMSC_PRIMARY_INT_INDEX 0x70 /* Primary Interrupt Select */ #define SMSC_SECONDARY_INT_INDEX 0x72 /* Secondary Interrupt Select */ #define SMSC_HDCS0_INDEX 0xf0 /* HDCS0 Address Decoder */ #define SMSC_HDCS1_INDEX 0xf1 /* HDCS1 Address Decoder */ #define SMSC_IDE1_DEVICE 1 /* IDE #1 logical device */ #define SMSC_IDE2_DEVICE 2 /* IDE #2 logical device */ #define SMSC_PARALLEL_DEVICE 3 /* Parallel Port logical device */ #define SMSC_SERIAL1_DEVICE 4 /* Serial #1 logical device */ #define SMSC_SERIAL2_DEVICE 5 /* Serial #2 logical device */ #define SMSC_KEYBOARD_DEVICE 7 /* Keyboard logical device */ #define SMSC_CONFIG_REGISTERS 8 /* Configuration Registers (Aux I/O) */ #define SMSC_READ_INDEXED(index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ inb(SMSC_DATA_PORT_ADDR); }) #define SMSC_WRITE_INDEXED(val, index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ outb((val), SMSC_DATA_PORT_ADDR); }) #define IDE1_PRIMARY_BASE 0x01f0 /* Task File Registe base for IDE #1 */ #define IDE1_SECONDARY_BASE 0x03f6 /* Miscellaneous AT registers for IDE #1 */ #define IDE2_PRIMARY_BASE 0x0170 /* Task File Registe base for IDE #2 */ #define IDE2_SECONDARY_BASE 0x0376 /* Miscellaneous AT registers for IDE #2 */ #define SERIAL1_PRIMARY_BASE 0x03f8 #define SERIAL2_PRIMARY_BASE 0x02f8 #define MSB(x) ( (x) >> 8 ) #define LSB(x) ( (x) & 0xff ) /* General-Purpose base address on CPU-board FPGA */ #define MICRODEV_FPGA_GP_BASE 0xa6100000ul static int __init smsc_superio_setup(void) { unsigned char devid, devrev; /* Initially the chip is in run state */ /* Put it into configuration state */ outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); /* Read device ID info */ devid = SMSC_READ_INDEXED(SMSC_DEVICE_ID_INDEX); devrev = SMSC_READ_INDEXED(SMSC_DEVICE_REV_INDEX); if ((devid == 0x30) && (devrev == 0x01)) printk("SMSC FDC37C93xAPM SuperIO device detected\n"); else return -ENODEV; /* Select the keyboard device */ SMSC_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_KEYBOARD, SMSC_PRIMARY_INT_INDEX); SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_MOUSE, SMSC_SECONDARY_INT_INDEX); /* Select the Serial #1 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL1, SMSC_PRIMARY_INT_INDEX); /* Select the Serial #2 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL2, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#1 device */ SMSC_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x0c, SMSC_HDCS0_INDEX); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS1_INDEX); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE1, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#2 device */ SMSC_WRITE_INDEXED(SMSC_IDE2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE2, SMSC_PRIMARY_INT_INDEX); /* Select the configuration registers */ SMSC_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, SMCS_LOGICAL_DEV_INDEX); /* enable the appropriate GPIO pins for IDE functionality: * bit[0] In/Out 1==input; 0==output * bit[1] Polarity 1==invert; 0==no invert * bit[2] Int Enb #1 1==Enable Combined IRQ #1; 0==disable * bit[3:4] Function Select 00==original; 01==Alternate Function #1 */ SMSC_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ SMSC_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ SMSC_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ SMSC_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ SMSC_WRITE_INDEXED(0x08, 0xe8); /* GP20 = nIDE2_OE */ /* Exit the configuration state */ outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); return 0; } device_initcall(smsc_superio_setup);
gpl-2.0
androidbftab1/bf-kernel-4.2
drivers/pinctrl/pinctrl-single.c
146
50264
/* * Generic device tree based pinctrl driver for one register per pin * type pinmux controllers * * Copyright (C) 2012 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_data/pinctrl-single.h> #include "core.h" #include "pinconf.h" #define DRIVER_NAME "pinctrl-single" #define PCS_MUX_PINS_NAME "pinctrl-single,pins" #define PCS_MUX_BITS_NAME "pinctrl-single,bits" #define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 3) #define PCS_OFF_DISABLED ~0U /** * struct pcs_pingroup - pingroups for a function * @np: pingroup device node pointer * @name: pingroup name * @gpins: array of the pins in the group * @ngpins: number of pins in the group * @node: list node */ struct pcs_pingroup { struct device_node *np; const char *name; int *gpins; int ngpins; struct list_head node; }; /** * struct pcs_func_vals - mux function register offset and value pair * @reg: register virtual address * @val: register value */ struct pcs_func_vals { void __iomem *reg; unsigned val; unsigned mask; }; /** * struct pcs_conf_vals - pinconf parameter, pinconf register offset * and value, enable, disable, mask * @param: config parameter * @val: user input bits in the pinconf register * @enable: enable bits in the pinconf register * @disable: disable bits in the pinconf register * @mask: mask bits in the register value */ struct pcs_conf_vals { enum pin_config_param param; unsigned val; unsigned enable; unsigned disable; unsigned mask; }; /** * struct pcs_conf_type - pinconf property name, pinconf param pair * @name: property name in DTS file * @param: config parameter */ struct pcs_conf_type { const char *name; enum pin_config_param param; }; /** * struct pcs_function - pinctrl function * @name: pinctrl function name * @vals: register and vals array * @nvals: number of entries in vals array * @pgnames: array of pingroup names the function uses * @npgnames: number of pingroup names the function uses * @node: list node */ struct pcs_function { const char *name; struct pcs_func_vals *vals; unsigned nvals; const char **pgnames; int npgnames; struct pcs_conf_vals *conf; int nconfs; struct list_head node; }; /** * struct pcs_gpiofunc_range - pin ranges with same mux value of gpio function * @offset: offset base of pins * @npins: number pins with the same mux value of gpio function * @gpiofunc: mux value of gpio function * @node: list node */ struct pcs_gpiofunc_range { unsigned offset; unsigned npins; unsigned gpiofunc; struct list_head node; }; /** * struct pcs_data - wrapper for data needed by pinctrl framework * @pa: pindesc array * @cur: index to current element * * REVISIT: We should be able to drop this eventually by adding * support for registering pins individually in the pinctrl * framework for those drivers that don't need a static array. */ struct pcs_data { struct pinctrl_pin_desc *pa; int cur; }; /** * struct pcs_name - register name for a pin * @name: name of the pinctrl register * * REVISIT: We may want to make names optional in the pinctrl * framework as some drivers may not care about pin names to * avoid kernel bloat. The pin names can be deciphered by user * space tools using debugfs based on the register address and * SoC packaging information. */ struct pcs_name { char name[PCS_REG_NAME_LEN]; }; /** * struct pcs_soc_data - SoC specific settings * @flags: initial SoC specific PCS_FEAT_xxx values * @irq: optional interrupt for the controller * @irq_enable_mask: optional SoC specific interrupt enable mask * @irq_status_mask: optional SoC specific interrupt status mask * @rearm: optional SoC specific wake-up rearm function */ struct pcs_soc_data { unsigned flags; int irq; unsigned irq_enable_mask; unsigned irq_status_mask; void (*rearm)(void); }; /** * struct pcs_device - pinctrl device instance * @res: resources * @base: virtual address of the controller * @size: size of the ioremapped area * @dev: device entry * @pctl: pin controller device * @flags: mask of PCS_FEAT_xxx values * @lock: spinlock for register access * @mutex: mutex protecting the lists * @width: bits per mux register * @fmask: function register mask * @fshift: function register shift * @foff: value to turn mux off * @fmax: max number of functions in fmask * @bits_per_pin:number of bits per pin * @names: array of register names for pins * @pins: physical pins on the SoC * @pgtree: pingroup index radix tree * @ftree: function index radix tree * @pingroups: list of pingroups * @functions: list of functions * @gpiofuncs: list of gpio functions * @irqs: list of interrupt registers * @chip: chip container for this instance * @domain: IRQ domain for this instance * @ngroups: number of pingroups * @nfuncs: number of functions * @desc: pin controller descriptor * @read: register read function to use * @write: register write function to use */ struct pcs_device { struct resource *res; void __iomem *base; unsigned size; struct device *dev; struct pinctrl_dev *pctl; unsigned flags; #define PCS_QUIRK_SHARED_IRQ (1 << 2) #define PCS_FEAT_IRQ (1 << 1) #define PCS_FEAT_PINCONF (1 << 0) struct pcs_soc_data socdata; raw_spinlock_t lock; struct mutex mutex; unsigned width; unsigned fmask; unsigned fshift; unsigned foff; unsigned fmax; bool bits_per_mux; unsigned bits_per_pin; struct pcs_name *names; struct pcs_data pins; struct radix_tree_root pgtree; struct radix_tree_root ftree; struct list_head pingroups; struct list_head functions; struct list_head gpiofuncs; struct list_head irqs; struct irq_chip chip; struct irq_domain *domain; unsigned ngroups; unsigned nfuncs; struct pinctrl_desc desc; unsigned (*read)(void __iomem *reg); void (*write)(unsigned val, void __iomem *reg); }; #define PCS_QUIRK_HAS_SHARED_IRQ (pcs->flags & PCS_QUIRK_SHARED_IRQ) #define PCS_HAS_IRQ (pcs->flags & PCS_FEAT_IRQ) #define PCS_HAS_PINCONF (pcs->flags & PCS_FEAT_PINCONF) static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config); static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs); static enum pin_config_param pcs_bias[] = { PIN_CONFIG_BIAS_PULL_DOWN, PIN_CONFIG_BIAS_PULL_UP, }; /* * REVISIT: Reads and writes could eventually use regmap or something * generic. But at least on omaps, some mux registers are performance * critical as they may need to be remuxed every time before and after * idle. Adding tests for register access width for every read and * write like regmap is doing is not desired, and caching the registers * does not help in this case. */ static unsigned __maybe_unused pcs_readb(void __iomem *reg) { return readb(reg); } static unsigned __maybe_unused pcs_readw(void __iomem *reg) { return readw(reg); } static unsigned __maybe_unused pcs_readl(void __iomem *reg) { return readl(reg); } static void __maybe_unused pcs_writeb(unsigned val, void __iomem *reg) { writeb(val, reg); } static void __maybe_unused pcs_writew(unsigned val, void __iomem *reg) { writew(val, reg); } static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg) { writel(val, reg); } static int pcs_get_groups_count(struct pinctrl_dev *pctldev) { struct pcs_device *pcs; pcs = pinctrl_dev_get_drvdata(pctldev); return pcs->ngroups; } static const char *pcs_get_group_name(struct pinctrl_dev *pctldev, unsigned gselector) { struct pcs_device *pcs; struct pcs_pingroup *group; pcs = pinctrl_dev_get_drvdata(pctldev); group = radix_tree_lookup(&pcs->pgtree, gselector); if (!group) { dev_err(pcs->dev, "%s could not find pingroup%i\n", __func__, gselector); return NULL; } return group->name; } static int pcs_get_group_pins(struct pinctrl_dev *pctldev, unsigned gselector, const unsigned **pins, unsigned *npins) { struct pcs_device *pcs; struct pcs_pingroup *group; pcs = pinctrl_dev_get_drvdata(pctldev); group = radix_tree_lookup(&pcs->pgtree, gselector); if (!group) { dev_err(pcs->dev, "%s could not find pingroup%i\n", __func__, gselector); return -EINVAL; } *pins = group->gpins; *npins = group->ngpins; return 0; } static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin) { struct pcs_device *pcs; unsigned val, mux_bytes; pcs = pinctrl_dev_get_drvdata(pctldev); mux_bytes = pcs->width / BITS_PER_BYTE; val = pcs->read(pcs->base + pin * mux_bytes); seq_printf(s, "%08x %s " , val, DRIVER_NAME); } static void pcs_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { struct pcs_device *pcs; pcs = pinctrl_dev_get_drvdata(pctldev); devm_kfree(pcs->dev, map); } static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps); static const struct pinctrl_ops pcs_pinctrl_ops = { .get_groups_count = pcs_get_groups_count, .get_group_name = pcs_get_group_name, .get_group_pins = pcs_get_group_pins, .pin_dbg_show = pcs_pin_dbg_show, .dt_node_to_map = pcs_dt_node_to_map, .dt_free_map = pcs_dt_free_map, }; static int pcs_get_functions_count(struct pinctrl_dev *pctldev) { struct pcs_device *pcs; pcs = pinctrl_dev_get_drvdata(pctldev); return pcs->nfuncs; } static const char *pcs_get_function_name(struct pinctrl_dev *pctldev, unsigned fselector) { struct pcs_device *pcs; struct pcs_function *func; pcs = pinctrl_dev_get_drvdata(pctldev); func = radix_tree_lookup(&pcs->ftree, fselector); if (!func) { dev_err(pcs->dev, "%s could not find function%i\n", __func__, fselector); return NULL; } return func->name; } static int pcs_get_function_groups(struct pinctrl_dev *pctldev, unsigned fselector, const char * const **groups, unsigned * const ngroups) { struct pcs_device *pcs; struct pcs_function *func; pcs = pinctrl_dev_get_drvdata(pctldev); func = radix_tree_lookup(&pcs->ftree, fselector); if (!func) { dev_err(pcs->dev, "%s could not find function%i\n", __func__, fselector); return -EINVAL; } *groups = func->pgnames; *ngroups = func->npgnames; return 0; } static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin, struct pcs_function **func) { struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); struct pin_desc *pdesc = pin_desc_get(pctldev, pin); const struct pinctrl_setting_mux *setting; unsigned fselector; /* If pin is not described in DTS & enabled, mux_setting is NULL. */ setting = pdesc->mux_setting; if (!setting) return -ENOTSUPP; fselector = setting->func; *func = radix_tree_lookup(&pcs->ftree, fselector); if (!(*func)) { dev_err(pcs->dev, "%s could not find function%i\n", __func__, fselector); return -ENOTSUPP; } return 0; } static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector, unsigned group) { struct pcs_device *pcs; struct pcs_function *func; int i; pcs = pinctrl_dev_get_drvdata(pctldev); /* If function mask is null, needn't enable it. */ if (!pcs->fmask) return 0; func = radix_tree_lookup(&pcs->ftree, fselector); if (!func) return -EINVAL; dev_dbg(pcs->dev, "enabling %s function%i\n", func->name, fselector); for (i = 0; i < func->nvals; i++) { struct pcs_func_vals *vals; unsigned long flags; unsigned val, mask; vals = &func->vals[i]; raw_spin_lock_irqsave(&pcs->lock, flags); val = pcs->read(vals->reg); if (pcs->bits_per_mux) mask = vals->mask; else mask = pcs->fmask; val &= ~mask; val |= (vals->val & mask); pcs->write(val, vals->reg); raw_spin_unlock_irqrestore(&pcs->lock, flags); } return 0; } static int pcs_request_gpio(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) { struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); struct pcs_gpiofunc_range *frange = NULL; struct list_head *pos, *tmp; int mux_bytes = 0; unsigned data; /* If function mask is null, return directly. */ if (!pcs->fmask) return -ENOTSUPP; list_for_each_safe(pos, tmp, &pcs->gpiofuncs) { frange = list_entry(pos, struct pcs_gpiofunc_range, node); if (pin >= frange->offset + frange->npins || pin < frange->offset) continue; mux_bytes = pcs->width / BITS_PER_BYTE; data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask; data |= frange->gpiofunc; pcs->write(data, pcs->base + pin * mux_bytes); break; } return 0; } static const struct pinmux_ops pcs_pinmux_ops = { .get_functions_count = pcs_get_functions_count, .get_function_name = pcs_get_function_name, .get_function_groups = pcs_get_function_groups, .set_mux = pcs_set_mux, .gpio_request_enable = pcs_request_gpio, }; /* Clear BIAS value */ static void pcs_pinconf_clear_bias(struct pinctrl_dev *pctldev, unsigned pin) { unsigned long config; int i; for (i = 0; i < ARRAY_SIZE(pcs_bias); i++) { config = pinconf_to_config_packed(pcs_bias[i], 0); pcs_pinconf_set(pctldev, pin, &config, 1); } } /* * Check whether PIN_CONFIG_BIAS_DISABLE is valid. * It's depend on that PULL_DOWN & PULL_UP configs are all invalid. */ static bool pcs_pinconf_bias_disable(struct pinctrl_dev *pctldev, unsigned pin) { unsigned long config; int i; for (i = 0; i < ARRAY_SIZE(pcs_bias); i++) { config = pinconf_to_config_packed(pcs_bias[i], 0); if (!pcs_pinconf_get(pctldev, pin, &config)) goto out; } return true; out: return false; } static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); struct pcs_function *func; enum pin_config_param param; unsigned offset = 0, data = 0, i, j, ret; ret = pcs_get_function(pctldev, pin, &func); if (ret) return ret; for (i = 0; i < func->nconfs; i++) { param = pinconf_to_config_param(*config); if (param == PIN_CONFIG_BIAS_DISABLE) { if (pcs_pinconf_bias_disable(pctldev, pin)) { *config = 0; return 0; } else { return -ENOTSUPP; } } else if (param != func->conf[i].param) { continue; } offset = pin * (pcs->width / BITS_PER_BYTE); data = pcs->read(pcs->base + offset) & func->conf[i].mask; switch (func->conf[i].param) { /* 4 parameters */ case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_INPUT_SCHMITT_ENABLE: if ((data != func->conf[i].enable) || (data == func->conf[i].disable)) return -ENOTSUPP; *config = 0; break; /* 2 parameters */ case PIN_CONFIG_INPUT_SCHMITT: for (j = 0; j < func->nconfs; j++) { switch (func->conf[j].param) { case PIN_CONFIG_INPUT_SCHMITT_ENABLE: if (data != func->conf[j].enable) return -ENOTSUPP; break; default: break; } } *config = data; break; case PIN_CONFIG_DRIVE_STRENGTH: case PIN_CONFIG_SLEW_RATE: case PIN_CONFIG_LOW_POWER_MODE: default: *config = data; break; } return 0; } return -ENOTSUPP; } static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs) { struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); struct pcs_function *func; unsigned offset = 0, shift = 0, i, data, ret; u16 arg; int j; ret = pcs_get_function(pctldev, pin, &func); if (ret) return ret; for (j = 0; j < num_configs; j++) { for (i = 0; i < func->nconfs; i++) { if (pinconf_to_config_param(configs[j]) != func->conf[i].param) continue; offset = pin * (pcs->width / BITS_PER_BYTE); data = pcs->read(pcs->base + offset); arg = pinconf_to_config_argument(configs[j]); switch (func->conf[i].param) { /* 2 parameters */ case PIN_CONFIG_INPUT_SCHMITT: case PIN_CONFIG_DRIVE_STRENGTH: case PIN_CONFIG_SLEW_RATE: case PIN_CONFIG_LOW_POWER_MODE: shift = ffs(func->conf[i].mask) - 1; data &= ~func->conf[i].mask; data |= (arg << shift) & func->conf[i].mask; break; /* 4 parameters */ case PIN_CONFIG_BIAS_DISABLE: pcs_pinconf_clear_bias(pctldev, pin); break; case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: if (arg) pcs_pinconf_clear_bias(pctldev, pin); /* fall through */ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: data &= ~func->conf[i].mask; if (arg) data |= func->conf[i].enable; else data |= func->conf[i].disable; break; default: return -ENOTSUPP; } pcs->write(data, pcs->base + offset); break; } if (i >= func->nconfs) return -ENOTSUPP; } /* for each config */ return 0; } static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) { const unsigned *pins; unsigned npins, old = 0; int i, ret; ret = pcs_get_group_pins(pctldev, group, &pins, &npins); if (ret) return ret; for (i = 0; i < npins; i++) { if (pcs_pinconf_get(pctldev, pins[i], config)) return -ENOTSUPP; /* configs do not match between two pins */ if (i && (old != *config)) return -ENOTSUPP; old = *config; } return 0; } static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, unsigned long *configs, unsigned num_configs) { const unsigned *pins; unsigned npins; int i, ret; ret = pcs_get_group_pins(pctldev, group, &pins, &npins); if (ret) return ret; for (i = 0; i < npins; i++) { if (pcs_pinconf_set(pctldev, pins[i], configs, num_configs)) return -ENOTSUPP; } return 0; } static void pcs_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin) { } static void pcs_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned selector) { } static void pcs_pinconf_config_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned long config) { pinconf_generic_dump_config(pctldev, s, config); } static const struct pinconf_ops pcs_pinconf_ops = { .pin_config_get = pcs_pinconf_get, .pin_config_set = pcs_pinconf_set, .pin_config_group_get = pcs_pinconf_group_get, .pin_config_group_set = pcs_pinconf_group_set, .pin_config_dbg_show = pcs_pinconf_dbg_show, .pin_config_group_dbg_show = pcs_pinconf_group_dbg_show, .pin_config_config_dbg_show = pcs_pinconf_config_dbg_show, .is_generic = true, }; /** * pcs_add_pin() - add a pin to the static per controller pin array * @pcs: pcs driver instance * @offset: register offset from base */ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, unsigned pin_pos) { struct pcs_soc_data *pcs_soc = &pcs->socdata; struct pinctrl_pin_desc *pin; struct pcs_name *pn; int i; i = pcs->pins.cur; if (i >= pcs->desc.npins) { dev_err(pcs->dev, "too many pins, max %i\n", pcs->desc.npins); return -ENOMEM; } if (pcs_soc->irq_enable_mask) { unsigned val; val = pcs->read(pcs->base + offset); if (val & pcs_soc->irq_enable_mask) { dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n", (unsigned long)pcs->res->start + offset, val); val &= ~pcs_soc->irq_enable_mask; pcs->write(val, pcs->base + offset); } } pin = &pcs->pins.pa[i]; pn = &pcs->names[i]; sprintf(pn->name, "%lx.%u", (unsigned long)pcs->res->start + offset, pin_pos); pin->name = pn->name; pin->number = i; pcs->pins.cur++; return i; } /** * pcs_allocate_pin_table() - adds all the pins for the pinctrl driver * @pcs: pcs driver instance * * In case of errors, resources are freed in pcs_free_resources. * * If your hardware needs holes in the address space, then just set * up multiple driver instances. */ static int pcs_allocate_pin_table(struct pcs_device *pcs) { int mux_bytes, nr_pins, i; int num_pins_in_register = 0; mux_bytes = pcs->width / BITS_PER_BYTE; if (pcs->bits_per_mux) { pcs->bits_per_pin = fls(pcs->fmask); nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin; num_pins_in_register = pcs->width / pcs->bits_per_pin; } else { nr_pins = pcs->size / mux_bytes; } dev_dbg(pcs->dev, "allocating %i pins\n", nr_pins); pcs->pins.pa = devm_kzalloc(pcs->dev, sizeof(*pcs->pins.pa) * nr_pins, GFP_KERNEL); if (!pcs->pins.pa) return -ENOMEM; pcs->names = devm_kzalloc(pcs->dev, sizeof(struct pcs_name) * nr_pins, GFP_KERNEL); if (!pcs->names) return -ENOMEM; pcs->desc.pins = pcs->pins.pa; pcs->desc.npins = nr_pins; for (i = 0; i < pcs->desc.npins; i++) { unsigned offset; int res; int byte_num; int pin_pos = 0; if (pcs->bits_per_mux) { byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE; offset = (byte_num / mux_bytes) * mux_bytes; pin_pos = i % num_pins_in_register; } else { offset = i * mux_bytes; } res = pcs_add_pin(pcs, offset, pin_pos); if (res < 0) { dev_err(pcs->dev, "error adding pins: %i\n", res); return res; } } return 0; } /** * pcs_add_function() - adds a new function to the function list * @pcs: pcs driver instance * @np: device node of the mux entry * @name: name of the function * @vals: array of mux register value pairs used by the function * @nvals: number of mux register value pairs * @pgnames: array of pingroup names for the function * @npgnames: number of pingroup names */ static struct pcs_function *pcs_add_function(struct pcs_device *pcs, struct device_node *np, const char *name, struct pcs_func_vals *vals, unsigned nvals, const char **pgnames, unsigned npgnames) { struct pcs_function *function; function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL); if (!function) return NULL; function->name = name; function->vals = vals; function->nvals = nvals; function->pgnames = pgnames; function->npgnames = npgnames; mutex_lock(&pcs->mutex); list_add_tail(&function->node, &pcs->functions); radix_tree_insert(&pcs->ftree, pcs->nfuncs, function); pcs->nfuncs++; mutex_unlock(&pcs->mutex); return function; } static void pcs_remove_function(struct pcs_device *pcs, struct pcs_function *function) { int i; mutex_lock(&pcs->mutex); for (i = 0; i < pcs->nfuncs; i++) { struct pcs_function *found; found = radix_tree_lookup(&pcs->ftree, i); if (found == function) radix_tree_delete(&pcs->ftree, i); } list_del(&function->node); mutex_unlock(&pcs->mutex); } /** * pcs_add_pingroup() - add a pingroup to the pingroup list * @pcs: pcs driver instance * @np: device node of the mux entry * @name: name of the pingroup * @gpins: array of the pins that belong to the group * @ngpins: number of pins in the group */ static int pcs_add_pingroup(struct pcs_device *pcs, struct device_node *np, const char *name, int *gpins, int ngpins) { struct pcs_pingroup *pingroup; pingroup = devm_kzalloc(pcs->dev, sizeof(*pingroup), GFP_KERNEL); if (!pingroup) return -ENOMEM; pingroup->name = name; pingroup->np = np; pingroup->gpins = gpins; pingroup->ngpins = ngpins; mutex_lock(&pcs->mutex); list_add_tail(&pingroup->node, &pcs->pingroups); radix_tree_insert(&pcs->pgtree, pcs->ngroups, pingroup); pcs->ngroups++; mutex_unlock(&pcs->mutex); return 0; } /** * pcs_get_pin_by_offset() - get a pin index based on the register offset * @pcs: pcs driver instance * @offset: register offset from the base * * Note that this is OK as long as the pins are in a static array. */ static int pcs_get_pin_by_offset(struct pcs_device *pcs, unsigned offset) { unsigned index; if (offset >= pcs->size) { dev_err(pcs->dev, "mux offset out of range: 0x%x (0x%x)\n", offset, pcs->size); return -EINVAL; } if (pcs->bits_per_mux) index = (offset * BITS_PER_BYTE) / pcs->bits_per_pin; else index = offset / (pcs->width / BITS_PER_BYTE); return index; } /* * check whether data matches enable bits or disable bits * Return value: 1 for matching enable bits, 0 for matching disable bits, * and negative value for matching failure. */ static int pcs_config_match(unsigned data, unsigned enable, unsigned disable) { int ret = -EINVAL; if (data == enable) ret = 1; else if (data == disable) ret = 0; return ret; } static void add_config(struct pcs_conf_vals **conf, enum pin_config_param param, unsigned value, unsigned enable, unsigned disable, unsigned mask) { (*conf)->param = param; (*conf)->val = value; (*conf)->enable = enable; (*conf)->disable = disable; (*conf)->mask = mask; (*conf)++; } static void add_setting(unsigned long **setting, enum pin_config_param param, unsigned arg) { **setting = pinconf_to_config_packed(param, arg); (*setting)++; } /* add pinconf setting with 2 parameters */ static void pcs_add_conf2(struct pcs_device *pcs, struct device_node *np, const char *name, enum pin_config_param param, struct pcs_conf_vals **conf, unsigned long **settings) { unsigned value[2], shift; int ret; ret = of_property_read_u32_array(np, name, value, 2); if (ret) return; /* set value & mask */ value[0] &= value[1]; shift = ffs(value[1]) - 1; /* skip enable & disable */ add_config(conf, param, value[0], 0, 0, value[1]); add_setting(settings, param, value[0] >> shift); } /* add pinconf setting with 4 parameters */ static void pcs_add_conf4(struct pcs_device *pcs, struct device_node *np, const char *name, enum pin_config_param param, struct pcs_conf_vals **conf, unsigned long **settings) { unsigned value[4]; int ret; /* value to set, enable, disable, mask */ ret = of_property_read_u32_array(np, name, value, 4); if (ret) return; if (!value[3]) { dev_err(pcs->dev, "mask field of the property can't be 0\n"); return; } value[0] &= value[3]; value[1] &= value[3]; value[2] &= value[3]; ret = pcs_config_match(value[0], value[1], value[2]); if (ret < 0) dev_dbg(pcs->dev, "failed to match enable or disable bits\n"); add_config(conf, param, value[0], value[1], value[2], value[3]); add_setting(settings, param, ret); } static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, struct pcs_function *func, struct pinctrl_map **map) { struct pinctrl_map *m = *map; int i = 0, nconfs = 0; unsigned long *settings = NULL, *s = NULL; struct pcs_conf_vals *conf = NULL; struct pcs_conf_type prop2[] = { { "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, }, { "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, }, { "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, }, { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, }, }; struct pcs_conf_type prop4[] = { { "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, }, { "pinctrl-single,bias-pulldown", PIN_CONFIG_BIAS_PULL_DOWN, }, { "pinctrl-single,input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, }, }; /* If pinconf isn't supported, don't parse properties in below. */ if (!PCS_HAS_PINCONF) return 0; /* cacluate how much properties are supported in current node */ for (i = 0; i < ARRAY_SIZE(prop2); i++) { if (of_find_property(np, prop2[i].name, NULL)) nconfs++; } for (i = 0; i < ARRAY_SIZE(prop4); i++) { if (of_find_property(np, prop4[i].name, NULL)) nconfs++; } if (!nconfs) return 0; func->conf = devm_kzalloc(pcs->dev, sizeof(struct pcs_conf_vals) * nconfs, GFP_KERNEL); if (!func->conf) return -ENOMEM; func->nconfs = nconfs; conf = &(func->conf[0]); m++; settings = devm_kzalloc(pcs->dev, sizeof(unsigned long) * nconfs, GFP_KERNEL); if (!settings) return -ENOMEM; s = &settings[0]; for (i = 0; i < ARRAY_SIZE(prop2); i++) pcs_add_conf2(pcs, np, prop2[i].name, prop2[i].param, &conf, &s); for (i = 0; i < ARRAY_SIZE(prop4); i++) pcs_add_conf4(pcs, np, prop4[i].name, prop4[i].param, &conf, &s); m->type = PIN_MAP_TYPE_CONFIGS_GROUP; m->data.configs.group_or_pin = np->name; m->data.configs.configs = settings; m->data.configs.num_configs = nconfs; return 0; } static void pcs_free_pingroups(struct pcs_device *pcs); /** * smux_parse_one_pinctrl_entry() - parses a device tree mux entry * @pcs: pinctrl driver instance * @np: device node of the mux entry * @map: map entry * @num_maps: number of map * @pgnames: pingroup names * * Note that this binding currently supports only sets of one register + value. * * Also note that this driver tries to avoid understanding pin and function * names because of the extra bloat they would cause especially in the case of * a large number of pins. This driver just sets what is specified for the board * in the .dts file. Further user space debugging tools can be developed to * decipher the pin and function names using debugfs. * * If you are concerned about the boot time, set up the static pins in * the bootloader, and only set up selected pins as device tree entries. */ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps, const char **pgnames) { struct pcs_func_vals *vals; const __be32 *mux; int size, rows, *pins, index = 0, found = 0, res = -ENOMEM; struct pcs_function *function; mux = of_get_property(np, PCS_MUX_PINS_NAME, &size); if ((!mux) || (size < sizeof(*mux) * 2)) { dev_err(pcs->dev, "bad data for mux %s\n", np->name); return -EINVAL; } size /= sizeof(*mux); /* Number of elements in array */ rows = size / 2; vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows, GFP_KERNEL); if (!vals) return -ENOMEM; pins = devm_kzalloc(pcs->dev, sizeof(*pins) * rows, GFP_KERNEL); if (!pins) goto free_vals; while (index < size) { unsigned offset, val; int pin; offset = be32_to_cpup(mux + index++); val = be32_to_cpup(mux + index++); vals[found].reg = pcs->base + offset; vals[found].val = val; pin = pcs_get_pin_by_offset(pcs, offset); if (pin < 0) { dev_err(pcs->dev, "could not add functions for %s %ux\n", np->name, offset); break; } pins[found++] = pin; } pgnames[0] = np->name; function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1); if (!function) goto free_pins; res = pcs_add_pingroup(pcs, np, np->name, pins, found); if (res < 0) goto free_function; (*map)->type = PIN_MAP_TYPE_MUX_GROUP; (*map)->data.mux.group = np->name; (*map)->data.mux.function = np->name; if (PCS_HAS_PINCONF) { res = pcs_parse_pinconf(pcs, np, function, map); if (res) goto free_pingroups; *num_maps = 2; } else { *num_maps = 1; } return 0; free_pingroups: pcs_free_pingroups(pcs); *num_maps = 1; free_function: pcs_remove_function(pcs, function); free_pins: devm_kfree(pcs->dev, pins); free_vals: devm_kfree(pcs->dev, vals); return res; } #define PARAMS_FOR_BITS_PER_MUX 3 static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps, const char **pgnames) { struct pcs_func_vals *vals; const __be32 *mux; int size, rows, *pins, index = 0, found = 0, res = -ENOMEM; int npins_in_row; struct pcs_function *function; mux = of_get_property(np, PCS_MUX_BITS_NAME, &size); if (!mux) { dev_err(pcs->dev, "no valid property for %s\n", np->name); return -EINVAL; } if (size < (sizeof(*mux) * PARAMS_FOR_BITS_PER_MUX)) { dev_err(pcs->dev, "bad data for %s\n", np->name); return -EINVAL; } /* Number of elements in array */ size /= sizeof(*mux); rows = size / PARAMS_FOR_BITS_PER_MUX; npins_in_row = pcs->width / pcs->bits_per_pin; vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows * npins_in_row, GFP_KERNEL); if (!vals) return -ENOMEM; pins = devm_kzalloc(pcs->dev, sizeof(*pins) * rows * npins_in_row, GFP_KERNEL); if (!pins) goto free_vals; while (index < size) { unsigned offset, val; unsigned mask, bit_pos, val_pos, mask_pos, submask; unsigned pin_num_from_lsb; int pin; offset = be32_to_cpup(mux + index++); val = be32_to_cpup(mux + index++); mask = be32_to_cpup(mux + index++); /* Parse pins in each row from LSB */ while (mask) { bit_pos = ffs(mask); pin_num_from_lsb = bit_pos / pcs->bits_per_pin; mask_pos = ((pcs->fmask) << (bit_pos - 1)); val_pos = val & mask_pos; submask = mask & mask_pos; if ((mask & mask_pos) == 0) { dev_err(pcs->dev, "Invalid mask for %s at 0x%x\n", np->name, offset); break; } mask &= ~mask_pos; if (submask != mask_pos) { dev_warn(pcs->dev, "Invalid submask 0x%x for %s at 0x%x\n", submask, np->name, offset); continue; } vals[found].mask = submask; vals[found].reg = pcs->base + offset; vals[found].val = val_pos; pin = pcs_get_pin_by_offset(pcs, offset); if (pin < 0) { dev_err(pcs->dev, "could not add functions for %s %ux\n", np->name, offset); break; } pins[found++] = pin + pin_num_from_lsb; } } pgnames[0] = np->name; function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1); if (!function) goto free_pins; res = pcs_add_pingroup(pcs, np, np->name, pins, found); if (res < 0) goto free_function; (*map)->type = PIN_MAP_TYPE_MUX_GROUP; (*map)->data.mux.group = np->name; (*map)->data.mux.function = np->name; if (PCS_HAS_PINCONF) { dev_err(pcs->dev, "pinconf not supported\n"); goto free_pingroups; } *num_maps = 1; return 0; free_pingroups: pcs_free_pingroups(pcs); *num_maps = 1; free_function: pcs_remove_function(pcs, function); free_pins: devm_kfree(pcs->dev, pins); free_vals: devm_kfree(pcs->dev, vals); return res; } /** * pcs_dt_node_to_map() - allocates and parses pinctrl maps * @pctldev: pinctrl instance * @np_config: device tree pinmux entry * @map: array of map entries * @num_maps: number of maps */ static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { struct pcs_device *pcs; const char **pgnames; int ret; pcs = pinctrl_dev_get_drvdata(pctldev); /* create 2 maps. One is for pinmux, and the other is for pinconf. */ *map = devm_kzalloc(pcs->dev, sizeof(**map) * 2, GFP_KERNEL); if (!*map) return -ENOMEM; *num_maps = 0; pgnames = devm_kzalloc(pcs->dev, sizeof(*pgnames), GFP_KERNEL); if (!pgnames) { ret = -ENOMEM; goto free_map; } if (pcs->bits_per_mux) { ret = pcs_parse_bits_in_pinctrl_entry(pcs, np_config, map, num_maps, pgnames); if (ret < 0) { dev_err(pcs->dev, "no pins entries for %s\n", np_config->name); goto free_pgnames; } } else { ret = pcs_parse_one_pinctrl_entry(pcs, np_config, map, num_maps, pgnames); if (ret < 0) { dev_err(pcs->dev, "no pins entries for %s\n", np_config->name); goto free_pgnames; } } return 0; free_pgnames: devm_kfree(pcs->dev, pgnames); free_map: devm_kfree(pcs->dev, *map); return ret; } /** * pcs_free_funcs() - free memory used by functions * @pcs: pcs driver instance */ static void pcs_free_funcs(struct pcs_device *pcs) { struct list_head *pos, *tmp; int i; mutex_lock(&pcs->mutex); for (i = 0; i < pcs->nfuncs; i++) { struct pcs_function *func; func = radix_tree_lookup(&pcs->ftree, i); if (!func) continue; radix_tree_delete(&pcs->ftree, i); } list_for_each_safe(pos, tmp, &pcs->functions) { struct pcs_function *function; function = list_entry(pos, struct pcs_function, node); list_del(&function->node); } mutex_unlock(&pcs->mutex); } /** * pcs_free_pingroups() - free memory used by pingroups * @pcs: pcs driver instance */ static void pcs_free_pingroups(struct pcs_device *pcs) { struct list_head *pos, *tmp; int i; mutex_lock(&pcs->mutex); for (i = 0; i < pcs->ngroups; i++) { struct pcs_pingroup *pingroup; pingroup = radix_tree_lookup(&pcs->pgtree, i); if (!pingroup) continue; radix_tree_delete(&pcs->pgtree, i); } list_for_each_safe(pos, tmp, &pcs->pingroups) { struct pcs_pingroup *pingroup; pingroup = list_entry(pos, struct pcs_pingroup, node); list_del(&pingroup->node); } mutex_unlock(&pcs->mutex); } /** * pcs_irq_free() - free interrupt * @pcs: pcs driver instance */ static void pcs_irq_free(struct pcs_device *pcs) { struct pcs_soc_data *pcs_soc = &pcs->socdata; if (pcs_soc->irq < 0) return; if (pcs->domain) irq_domain_remove(pcs->domain); if (PCS_QUIRK_HAS_SHARED_IRQ) free_irq(pcs_soc->irq, pcs_soc); else irq_set_chained_handler(pcs_soc->irq, NULL); } /** * pcs_free_resources() - free memory used by this driver * @pcs: pcs driver instance */ static void pcs_free_resources(struct pcs_device *pcs) { pcs_irq_free(pcs); if (pcs->pctl) pinctrl_unregister(pcs->pctl); pcs_free_funcs(pcs); pcs_free_pingroups(pcs); } #define PCS_GET_PROP_U32(name, reg, err) \ do { \ ret = of_property_read_u32(np, name, reg); \ if (ret) { \ dev_err(pcs->dev, err); \ return ret; \ } \ } while (0); static const struct of_device_id pcs_of_match[]; static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs) { const char *propname = "pinctrl-single,gpio-range"; const char *cellname = "#pinctrl-single,gpio-range-cells"; struct of_phandle_args gpiospec; struct pcs_gpiofunc_range *range; int ret, i; for (i = 0; ; i++) { ret = of_parse_phandle_with_args(node, propname, cellname, i, &gpiospec); /* Do not treat it as error. Only treat it as end condition. */ if (ret) { ret = 0; break; } range = devm_kzalloc(pcs->dev, sizeof(*range), GFP_KERNEL); if (!range) { ret = -ENOMEM; break; } range->offset = gpiospec.args[0]; range->npins = gpiospec.args[1]; range->gpiofunc = gpiospec.args[2]; mutex_lock(&pcs->mutex); list_add_tail(&range->node, &pcs->gpiofuncs); mutex_unlock(&pcs->mutex); } return ret; } /** * @reg: virtual address of interrupt register * @hwirq: hardware irq number * @irq: virtual irq number * @node: list node */ struct pcs_interrupt { void __iomem *reg; irq_hw_number_t hwirq; unsigned int irq; struct list_head node; }; /** * pcs_irq_set() - enables or disables an interrupt * * Note that this currently assumes one interrupt per pinctrl * register that is typically used for wake-up events. */ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc, int irq, const bool enable) { struct pcs_device *pcs; struct list_head *pos; unsigned mask; pcs = container_of(pcs_soc, struct pcs_device, socdata); list_for_each(pos, &pcs->irqs) { struct pcs_interrupt *pcswi; unsigned soc_mask; pcswi = list_entry(pos, struct pcs_interrupt, node); if (irq != pcswi->irq) continue; soc_mask = pcs_soc->irq_enable_mask; raw_spin_lock(&pcs->lock); mask = pcs->read(pcswi->reg); if (enable) mask |= soc_mask; else mask &= ~soc_mask; pcs->write(mask, pcswi->reg); raw_spin_unlock(&pcs->lock); } if (pcs_soc->rearm) pcs_soc->rearm(); } /** * pcs_irq_mask() - mask pinctrl interrupt * @d: interrupt data */ static void pcs_irq_mask(struct irq_data *d) { struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d); pcs_irq_set(pcs_soc, d->irq, false); } /** * pcs_irq_unmask() - unmask pinctrl interrupt * @d: interrupt data */ static void pcs_irq_unmask(struct irq_data *d) { struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d); pcs_irq_set(pcs_soc, d->irq, true); } /** * pcs_irq_set_wake() - toggle the suspend and resume wake up * @d: interrupt data * @state: wake-up state * * Note that this should be called only for suspend and resume. * For runtime PM, the wake-up events should be enabled by default. */ static int pcs_irq_set_wake(struct irq_data *d, unsigned int state) { if (state) pcs_irq_unmask(d); else pcs_irq_mask(d); return 0; } /** * pcs_irq_handle() - common interrupt handler * @pcs_irq: interrupt data * * Note that this currently assumes we have one interrupt bit per * mux register. This interrupt is typically used for wake-up events. * For more complex interrupts different handlers can be specified. */ static int pcs_irq_handle(struct pcs_soc_data *pcs_soc) { struct pcs_device *pcs; struct list_head *pos; int count = 0; pcs = container_of(pcs_soc, struct pcs_device, socdata); list_for_each(pos, &pcs->irqs) { struct pcs_interrupt *pcswi; unsigned mask; pcswi = list_entry(pos, struct pcs_interrupt, node); raw_spin_lock(&pcs->lock); mask = pcs->read(pcswi->reg); raw_spin_unlock(&pcs->lock); if (mask & pcs_soc->irq_status_mask) { generic_handle_irq(irq_find_mapping(pcs->domain, pcswi->hwirq)); count++; } } return count; } /** * pcs_irq_handler() - handler for the shared interrupt case * @irq: interrupt * @d: data * * Use this for cases where multiple instances of * pinctrl-single share a single interrupt like on omaps. */ static irqreturn_t pcs_irq_handler(int irq, void *d) { struct pcs_soc_data *pcs_soc = d; return pcs_irq_handle(pcs_soc) ? IRQ_HANDLED : IRQ_NONE; } /** * pcs_irq_handle() - handler for the dedicated chained interrupt case * @irq: interrupt * @desc: interrupt descriptor * * Use this if you have a separate interrupt for each * pinctrl-single instance. */ static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc) { struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); struct irq_chip *chip; chip = irq_get_chip(irq); chained_irq_enter(chip, desc); pcs_irq_handle(pcs_soc); /* REVISIT: export and add handle_bad_irq(irq, desc)? */ chained_irq_exit(chip, desc); return; } static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct pcs_soc_data *pcs_soc = d->host_data; struct pcs_device *pcs; struct pcs_interrupt *pcswi; pcs = container_of(pcs_soc, struct pcs_device, socdata); pcswi = devm_kzalloc(pcs->dev, sizeof(*pcswi), GFP_KERNEL); if (!pcswi) return -ENOMEM; pcswi->reg = pcs->base + hwirq; pcswi->hwirq = hwirq; pcswi->irq = irq; mutex_lock(&pcs->mutex); list_add_tail(&pcswi->node, &pcs->irqs); mutex_unlock(&pcs->mutex); irq_set_chip_data(irq, pcs_soc); irq_set_chip_and_handler(irq, &pcs->chip, handle_level_irq); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif return 0; } static const struct irq_domain_ops pcs_irqdomain_ops = { .map = pcs_irqdomain_map, .xlate = irq_domain_xlate_onecell, }; /** * pcs_irq_init_chained_handler() - set up a chained interrupt handler * @pcs: pcs driver instance * @np: device node pointer */ static int pcs_irq_init_chained_handler(struct pcs_device *pcs, struct device_node *np) { struct pcs_soc_data *pcs_soc = &pcs->socdata; const char *name = "pinctrl"; int num_irqs; if (!pcs_soc->irq_enable_mask || !pcs_soc->irq_status_mask) { pcs_soc->irq = -1; return -EINVAL; } INIT_LIST_HEAD(&pcs->irqs); pcs->chip.name = name; pcs->chip.irq_ack = pcs_irq_mask; pcs->chip.irq_mask = pcs_irq_mask; pcs->chip.irq_unmask = pcs_irq_unmask; pcs->chip.irq_set_wake = pcs_irq_set_wake; if (PCS_QUIRK_HAS_SHARED_IRQ) { int res; res = request_irq(pcs_soc->irq, pcs_irq_handler, IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_NO_THREAD, name, pcs_soc); if (res) { pcs_soc->irq = -1; return res; } } else { irq_set_handler_data(pcs_soc->irq, pcs_soc); irq_set_chained_handler(pcs_soc->irq, pcs_irq_chain_handler); } /* * We can use the register offset as the hardirq * number as irq_domain_add_simple maps them lazily. * This way we can easily support more than one * interrupt per function if needed. */ num_irqs = pcs->size; pcs->domain = irq_domain_add_simple(np, num_irqs, 0, &pcs_irqdomain_ops, pcs_soc); if (!pcs->domain) { irq_set_chained_handler(pcs_soc->irq, NULL); return -EINVAL; } return 0; } #ifdef CONFIG_PM static int pinctrl_single_suspend(struct platform_device *pdev, pm_message_t state) { struct pcs_device *pcs; pcs = platform_get_drvdata(pdev); if (!pcs) return -EINVAL; return pinctrl_force_sleep(pcs->pctl); } static int pinctrl_single_resume(struct platform_device *pdev) { struct pcs_device *pcs; pcs = platform_get_drvdata(pdev); if (!pcs) return -EINVAL; return pinctrl_force_default(pcs->pctl); } #endif static int pcs_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; struct pcs_pdata *pdata; struct resource *res; struct pcs_device *pcs; const struct pcs_soc_data *soc; int ret; match = of_match_device(pcs_of_match, &pdev->dev); if (!match) return -EINVAL; pcs = devm_kzalloc(&pdev->dev, sizeof(*pcs), GFP_KERNEL); if (!pcs) { dev_err(&pdev->dev, "could not allocate\n"); return -ENOMEM; } pcs->dev = &pdev->dev; raw_spin_lock_init(&pcs->lock); mutex_init(&pcs->mutex); INIT_LIST_HEAD(&pcs->pingroups); INIT_LIST_HEAD(&pcs->functions); INIT_LIST_HEAD(&pcs->gpiofuncs); soc = match->data; pcs->flags = soc->flags; memcpy(&pcs->socdata, soc, sizeof(*soc)); PCS_GET_PROP_U32("pinctrl-single,register-width", &pcs->width, "register width not specified\n"); ret = of_property_read_u32(np, "pinctrl-single,function-mask", &pcs->fmask); if (!ret) { pcs->fshift = ffs(pcs->fmask) - 1; pcs->fmax = pcs->fmask >> pcs->fshift; } else { /* If mask property doesn't exist, function mux is invalid. */ pcs->fmask = 0; pcs->fshift = 0; pcs->fmax = 0; } ret = of_property_read_u32(np, "pinctrl-single,function-off", &pcs->foff); if (ret) pcs->foff = PCS_OFF_DISABLED; pcs->bits_per_mux = of_property_read_bool(np, "pinctrl-single,bit-per-mux"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(pcs->dev, "could not get resource\n"); return -ENODEV; } pcs->res = devm_request_mem_region(pcs->dev, res->start, resource_size(res), DRIVER_NAME); if (!pcs->res) { dev_err(pcs->dev, "could not get mem_region\n"); return -EBUSY; } pcs->size = resource_size(pcs->res); pcs->base = devm_ioremap(pcs->dev, pcs->res->start, pcs->size); if (!pcs->base) { dev_err(pcs->dev, "could not ioremap\n"); return -ENODEV; } INIT_RADIX_TREE(&pcs->pgtree, GFP_KERNEL); INIT_RADIX_TREE(&pcs->ftree, GFP_KERNEL); platform_set_drvdata(pdev, pcs); switch (pcs->width) { case 8: pcs->read = pcs_readb; pcs->write = pcs_writeb; break; case 16: pcs->read = pcs_readw; pcs->write = pcs_writew; break; case 32: pcs->read = pcs_readl; pcs->write = pcs_writel; break; default: break; } pcs->desc.name = DRIVER_NAME; pcs->desc.pctlops = &pcs_pinctrl_ops; pcs->desc.pmxops = &pcs_pinmux_ops; if (PCS_HAS_PINCONF) pcs->desc.confops = &pcs_pinconf_ops; pcs->desc.owner = THIS_MODULE; ret = pcs_allocate_pin_table(pcs); if (ret < 0) goto free; pcs->pctl = pinctrl_register(&pcs->desc, pcs->dev, pcs); if (IS_ERR(pcs->pctl)) { dev_err(pcs->dev, "could not register single pinctrl driver\n"); ret = PTR_ERR(pcs->pctl); goto free; } ret = pcs_add_gpio_func(np, pcs); if (ret < 0) goto free; pcs->socdata.irq = irq_of_parse_and_map(np, 0); if (pcs->socdata.irq) pcs->flags |= PCS_FEAT_IRQ; /* We still need auxdata for some omaps for PRM interrupts */ pdata = dev_get_platdata(&pdev->dev); if (pdata) { if (pdata->rearm) pcs->socdata.rearm = pdata->rearm; if (pdata->irq) { pcs->socdata.irq = pdata->irq; pcs->flags |= PCS_FEAT_IRQ; } } if (PCS_HAS_IRQ) { ret = pcs_irq_init_chained_handler(pcs, np); if (ret < 0) dev_warn(pcs->dev, "initialized with no interrupts\n"); } dev_info(pcs->dev, "%i pins at pa %p size %u\n", pcs->desc.npins, pcs->base, pcs->size); return 0; free: pcs_free_resources(pcs); return ret; } static int pcs_remove(struct platform_device *pdev) { struct pcs_device *pcs = platform_get_drvdata(pdev); if (!pcs) return 0; pcs_free_resources(pcs); return 0; } static const struct pcs_soc_data pinctrl_single_omap_wkup = { .flags = PCS_QUIRK_SHARED_IRQ, .irq_enable_mask = (1 << 14), /* OMAP_WAKEUP_EN */ .irq_status_mask = (1 << 15), /* OMAP_WAKEUP_EVENT */ }; static const struct pcs_soc_data pinctrl_single_dra7 = { .flags = PCS_QUIRK_SHARED_IRQ, .irq_enable_mask = (1 << 24), /* WAKEUPENABLE */ .irq_status_mask = (1 << 25), /* WAKEUPEVENT */ }; static const struct pcs_soc_data pinctrl_single_am437x = { .flags = PCS_QUIRK_SHARED_IRQ, .irq_enable_mask = (1 << 29), /* OMAP_WAKEUP_EN */ .irq_status_mask = (1 << 30), /* OMAP_WAKEUP_EVENT */ }; static const struct pcs_soc_data pinctrl_single = { }; static const struct pcs_soc_data pinconf_single = { .flags = PCS_FEAT_PINCONF, }; static const struct of_device_id pcs_of_match[] = { { .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup }, { .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup }, { .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup }, { .compatible = "ti,dra7-padconf", .data = &pinctrl_single_dra7 }, { .compatible = "ti,am437-padconf", .data = &pinctrl_single_am437x }, { .compatible = "pinctrl-single", .data = &pinctrl_single }, { .compatible = "pinconf-single", .data = &pinconf_single }, { }, }; MODULE_DEVICE_TABLE(of, pcs_of_match); static struct platform_driver pcs_driver = { .probe = pcs_probe, .remove = pcs_remove, .driver = { .name = DRIVER_NAME, .of_match_table = pcs_of_match, }, #ifdef CONFIG_PM .suspend = pinctrl_single_suspend, .resume = pinctrl_single_resume, #endif }; module_platform_driver(pcs_driver); MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>"); MODULE_DESCRIPTION("One-register-per-pin type device tree based pinctrl driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
arm-embedded/newlib.debian
newlib/libc/string/wmemmove.c
146
2957
/* FUNCTION <<wmemmove>>---copy wide characters in memory with overlapping areas ANSI_SYNOPSIS #include <wchar.h> wchar_t *wmemmove(wchar_t *<[d]>, const wchar_t *<[s]>, size_t <[n]>); TRAD_SYNOPSIS wchar_t *wmemmove(<[d]>, <[s]>, <[n]> wchar_t *<[d]>; const wchar_t *<[s]>; size_t <[n]>; DESCRIPTION The <<wmemmove>> function copies <[n]> wide characters from the object pointed to by <[s]> to the object pointed to by <[d]>. Copying takes place as if the <[n]> wide characters from the object pointed to by <[s]> are first copied into a temporary array of <[n]> wide characters that does not overlap the objects pointed to by <[d]> or <[s]>, and then the <[n]> wide characters from the temporary array are copied into the object pointed to by <[d]>. This function is not affected by locale and all wchar_t values are treated identically. The null wide character and wchar_t values not corresponding to valid characters are not treated specially. If <[n]> is zero, <[d]> and <[s]> must be a valid pointers, and the function copies zero wide characters. RETURNS The <<wmemmove>> function returns the value of <[d]>. PORTABILITY <<wmemmove>> is ISO/IEC 9899/AMD1:1995 (ISO C). No supporting OS subroutines are required. */ /* $NetBSD: wmemmove.c,v 1.1 2000/12/23 23:14:37 itojun Exp $ */ /*- * Copyright (c)1999 Citrus Project, * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * citrus Id: wmemmove.c,v 1.2 2000/12/20 14:08:31 itojun Exp */ #include <_ansi.h> #include <string.h> #include <wchar.h> wchar_t * _DEFUN (wmemmove, (d, s, n), wchar_t * d _AND _CONST wchar_t * s _AND size_t n) { return (wchar_t *) memmove (d, s, n * sizeof (wchar_t)); }
gpl-2.0
TeamExodus/kernel_lge_hammerhead
fs/exec.c
146
54019
/* * linux/fs/exec.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * #!-checking implemented by tytso. */ /* * Demand-loading implemented 01.12.91 - no need to read anything but * the header into memory. The inode of the executable is put into * "current->executable", and page faults do the actual loading. Clean. * * Once more I can proudly say that linux stood up to being changed: it * was less than 2 hours work to get demand-loading completely implemented. * * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead, * current->executable is only used by the procfs. This allows a dispatch * table to check for several different types of binary formats. We keep * trying until we recognize the file or we run out of supported binary * formats. */ #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/mm.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/swap.h> #include <linux/string.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/perf_event.h> #include <linux/highmem.h> #include <linux/spinlock.h> #include <linux/key.h> #include <linux/personality.h> #include <linux/binfmts.h> #include <linux/utsname.h> #include <linux/pid_namespace.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/audit.h> #include <linux/tracehook.h> #include <linux/kmod.h> #include <linux/fsnotify.h> #include <linux/fs_struct.h> #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/compat.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #include <asm/exec.h> #include <trace/events/task.h> #include "internal.h" #include <trace/events/sched.h> int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; unsigned int core_pipe_limit; int suid_dumpable = 0; struct core_name { char *corename; int used, size; }; static atomic_t call_count = ATOMIC_INIT(1); /* The maximal length of core_pattern is also specified in sysctl.c */ static LIST_HEAD(formats); static DEFINE_RWLOCK(binfmt_lock); void __register_binfmt(struct linux_binfmt * fmt, int insert) { BUG_ON(!fmt); write_lock(&binfmt_lock); insert ? list_add(&fmt->lh, &formats) : list_add_tail(&fmt->lh, &formats); write_unlock(&binfmt_lock); } EXPORT_SYMBOL(__register_binfmt); void unregister_binfmt(struct linux_binfmt * fmt) { write_lock(&binfmt_lock); list_del(&fmt->lh); write_unlock(&binfmt_lock); } EXPORT_SYMBOL(unregister_binfmt); static inline void put_binfmt(struct linux_binfmt * fmt) { module_put(fmt->module); } /* * Note that a shared library must be both readable and executable due to * security reasons. * * Also note that we take the address to load from from the file itself. */ SYSCALL_DEFINE1(uselib, const char __user *, library) { struct file *file; char *tmp = getname(library); int error = PTR_ERR(tmp); static const struct open_flags uselib_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN, .intent = LOOKUP_OPEN }; if (IS_ERR(tmp)) goto out; file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW); putname(tmp); error = PTR_ERR(file); if (IS_ERR(file)) goto out; error = -EINVAL; if (!S_ISREG(file->f_path.dentry->d_inode->i_mode)) goto exit; error = -EACCES; if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) goto exit; fsnotify_open(file); error = -ENOEXEC; if(file->f_op) { struct linux_binfmt * fmt; read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { if (!fmt->load_shlib) continue; if (!try_module_get(fmt->module)) continue; read_unlock(&binfmt_lock); error = fmt->load_shlib(file); read_lock(&binfmt_lock); put_binfmt(fmt); if (error != -ENOEXEC) break; } read_unlock(&binfmt_lock); } exit: fput(file); out: return error; } #ifdef CONFIG_MMU /* * The nascent bprm->mm is not visible until exec_mmap() but it can * use a lot of memory, account these pages in current->mm temporary * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we * change the counter back via acct_arg_size(0). */ static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) { struct mm_struct *mm = current->mm; long diff = (long)(pages - bprm->vma_pages); if (!mm || !diff) return; bprm->vma_pages = pages; add_mm_counter(mm, MM_ANONPAGES, diff); } static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; int ret; #ifdef CONFIG_STACK_GROWSUP if (write) { ret = expand_downwards(bprm->vma, pos); if (ret < 0) return NULL; } #endif ret = get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL); if (ret <= 0) return NULL; if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; struct rlimit *rlim; acct_arg_size(bprm, size / PAGE_SIZE); /* * We've historically supported up to 32 pages (ARG_MAX) * of argument strings even with small stacks */ if (size <= ARG_MAX) return page; /* * Limit to 1/4-th the stack size for the argv+env strings. * This ensures that: * - the remaining binfmt code will not run out of stack space, * - the program will have a reasonable amount of stack left * to work from. */ rlim = current->signal->rlim; if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { put_page(page); return NULL; } } return page; } static void put_arg_page(struct page *page) { put_page(page); } static void free_arg_page(struct linux_binprm *bprm, int i) { } static void free_arg_pages(struct linux_binprm *bprm) { } static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, struct page *page) { flush_cache_page(bprm->vma, pos, page_to_pfn(page)); } static int __bprm_mm_init(struct linux_binprm *bprm) { int err; struct vm_area_struct *vma = NULL; struct mm_struct *mm = bprm->mm; bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) return -ENOMEM; down_write(&mm->mmap_sem); vma->vm_mm = mm; /* * Place the stack at the largest stack address the architecture * supports. Later, we'll move this to an appropriate place. We don't * use STACK_TOP because that can depend on attributes which aren't * configured yet. */ BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); if (err) goto err; err = insert_vm_struct(mm, vma); if (err) goto err; mm->stack_vm = mm->total_vm = 1; up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); return 0; err: up_write(&mm->mmap_sem); bprm->vma = NULL; kmem_cache_free(vm_area_cachep, vma); return err; } static bool valid_arg_len(struct linux_binprm *bprm, long len) { return len <= MAX_ARG_STRLEN; } #else static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) { } static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; page = bprm->page[pos / PAGE_SIZE]; if (!page && write) { page = alloc_page(GFP_HIGHUSER|__GFP_ZERO); if (!page) return NULL; bprm->page[pos / PAGE_SIZE] = page; } return page; } static void put_arg_page(struct page *page) { } static void free_arg_page(struct linux_binprm *bprm, int i) { if (bprm->page[i]) { __free_page(bprm->page[i]); bprm->page[i] = NULL; } } static void free_arg_pages(struct linux_binprm *bprm) { int i; for (i = 0; i < MAX_ARG_PAGES; i++) free_arg_page(bprm, i); } static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, struct page *page) { } static int __bprm_mm_init(struct linux_binprm *bprm) { bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *); return 0; } static bool valid_arg_len(struct linux_binprm *bprm, long len) { return len <= bprm->p; } #endif /* CONFIG_MMU */ /* * Create a new mm_struct and populate it with a temporary stack * vm_area_struct. We don't have enough context at this point to set the stack * flags, permissions, and offset, so we use temporary values. We'll update * them later in setup_arg_pages(). */ int bprm_mm_init(struct linux_binprm *bprm) { int err; struct mm_struct *mm = NULL; bprm->mm = mm = mm_alloc(); err = -ENOMEM; if (!mm) goto err; err = init_new_context(current, mm); if (err) goto err; err = __bprm_mm_init(bprm); if (err) goto err; return 0; err: if (mm) { bprm->mm = NULL; mmdrop(mm); } return err; } struct user_arg_ptr { #ifdef CONFIG_COMPAT bool is_compat; #endif union { const char __user *const __user *native; #ifdef CONFIG_COMPAT compat_uptr_t __user *compat; #endif } ptr; }; static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) { const char __user *native; #ifdef CONFIG_COMPAT if (unlikely(argv.is_compat)) { compat_uptr_t compat; if (get_user(compat, argv.ptr.compat + nr)) return ERR_PTR(-EFAULT); return compat_ptr(compat); } #endif if (get_user(native, argv.ptr.native + nr)) return ERR_PTR(-EFAULT); return native; } /* * count() counts the number of strings in array ARGV. */ static int count(struct user_arg_ptr argv, int max) { int i = 0; if (argv.ptr.native != NULL) { for (;;) { const char __user *p = get_user_arg_ptr(argv, i); if (!p) break; if (IS_ERR(p)) return -EFAULT; if (i++ >= max) return -E2BIG; if (fatal_signal_pending(current)) return -ERESTARTNOHAND; cond_resched(); } } return i; } /* * 'copy_strings()' copies argument/environment strings from the old * processes's memory to the new process's stack. The call to get_user_pages() * ensures the destination page is created and not swapped out. */ static int copy_strings(int argc, struct user_arg_ptr argv, struct linux_binprm *bprm) { struct page *kmapped_page = NULL; char *kaddr = NULL; unsigned long kpos = 0; int ret; while (argc-- > 0) { const char __user *str; int len; unsigned long pos; ret = -EFAULT; str = get_user_arg_ptr(argv, argc); if (IS_ERR(str)) goto out; len = strnlen_user(str, MAX_ARG_STRLEN); if (!len) goto out; ret = -E2BIG; if (!valid_arg_len(bprm, len)) goto out; /* We're going to work our way backwords. */ pos = bprm->p; str += len; bprm->p -= len; while (len > 0) { int offset, bytes_to_copy; if (fatal_signal_pending(current)) { ret = -ERESTARTNOHAND; goto out; } cond_resched(); offset = pos % PAGE_SIZE; if (offset == 0) offset = PAGE_SIZE; bytes_to_copy = offset; if (bytes_to_copy > len) bytes_to_copy = len; offset -= bytes_to_copy; pos -= bytes_to_copy; str -= bytes_to_copy; len -= bytes_to_copy; if (!kmapped_page || kpos != (pos & PAGE_MASK)) { struct page *page; page = get_arg_page(bprm, pos, 1); if (!page) { ret = -E2BIG; goto out; } if (kmapped_page) { flush_kernel_dcache_page(kmapped_page); kunmap(kmapped_page); put_arg_page(kmapped_page); } kmapped_page = page; kaddr = kmap(kmapped_page); kpos = pos & PAGE_MASK; flush_arg_page(bprm, kpos, kmapped_page); } if (copy_from_user(kaddr+offset, str, bytes_to_copy)) { ret = -EFAULT; goto out; } } } ret = 0; out: if (kmapped_page) { flush_kernel_dcache_page(kmapped_page); kunmap(kmapped_page); put_arg_page(kmapped_page); } return ret; } /* * Like copy_strings, but get argv and its values from kernel memory. */ int copy_strings_kernel(int argc, const char *const *__argv, struct linux_binprm *bprm) { int r; mm_segment_t oldfs = get_fs(); struct user_arg_ptr argv = { .ptr.native = (const char __user *const __user *)__argv, }; set_fs(KERNEL_DS); r = copy_strings(argc, argv, bprm); set_fs(oldfs); return r; } EXPORT_SYMBOL(copy_strings_kernel); #ifdef CONFIG_MMU /* * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once * the binfmt code determines where the new stack should reside, we shift it to * its final location. The process proceeds as follows: * * 1) Use shift to calculate the new vma endpoints. * 2) Extend vma to cover both the old and new ranges. This ensures the * arguments passed to subsequent functions are consistent. * 3) Move vma's page tables to the new range. * 4) Free up any cleared pgd range. * 5) Shrink the vma to cover only the new range. */ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) { struct mm_struct *mm = vma->vm_mm; unsigned long old_start = vma->vm_start; unsigned long old_end = vma->vm_end; unsigned long length = old_end - old_start; unsigned long new_start = old_start - shift; unsigned long new_end = old_end - shift; struct mmu_gather tlb; BUG_ON(new_start > new_end); /* * ensure there are no vmas between where we want to go * and where we are */ if (vma != find_vma(mm, new_start)) return -EFAULT; /* * cover the whole range: [new_start, old_end) */ if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL)) return -ENOMEM; /* * move the page tables downwards, on failure we rely on * process cleanup to remove whatever mess we made. */ if (length != move_page_tables(vma, old_start, vma, new_start, length)) return -ENOMEM; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); if (new_end > old_start) { /* * when the old and new regions overlap clear from new_end. */ free_pgd_range(&tlb, new_end, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : 0); } else { /* * otherwise, clean from old_start; this is done to not touch * the address space in [new_end, old_start) some architectures * have constraints on va-space that make this illegal (IA64) - * for the others its just a little faster. */ free_pgd_range(&tlb, old_start, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : 0); } tlb_finish_mmu(&tlb, new_end, old_end); /* * Shrink the vma to just the new range. Always succeeds. */ vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL); return 0; } /* * Finalizes the stack vm_area_struct. The flags and permissions are updated, * the stack is optionally relocated, and some extra space is added. */ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack) { unsigned long ret; unsigned long stack_shift; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = bprm->vma; struct vm_area_struct *prev = NULL; unsigned long vm_flags; unsigned long stack_base; unsigned long stack_size; unsigned long stack_expand; unsigned long rlim_stack; #ifdef CONFIG_STACK_GROWSUP /* Limit stack size to 1GB */ stack_base = rlimit_max(RLIMIT_STACK); if (stack_base > (1 << 30)) stack_base = 1 << 30; /* Make sure we didn't let the argument array grow too large. */ if (vma->vm_end - vma->vm_start > stack_base) return -ENOMEM; stack_base = PAGE_ALIGN(stack_top - stack_base); stack_shift = vma->vm_start - stack_base; mm->arg_start = bprm->p - stack_shift; bprm->p = vma->vm_end - stack_shift; #else stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); if (unlikely(stack_top < mmap_min_addr) || unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) return -ENOMEM; stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; mm->arg_start = bprm->p; #endif if (bprm->loader) bprm->loader -= stack_shift; bprm->exec -= stack_shift; down_write(&mm->mmap_sem); vm_flags = VM_STACK_FLAGS; /* * Adjust stack execute permissions; explicitly enable for * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone * (arch default) otherwise. */ if (unlikely(executable_stack == EXSTACK_ENABLE_X)) vm_flags |= VM_EXEC; else if (executable_stack == EXSTACK_DISABLE_X) vm_flags &= ~VM_EXEC; vm_flags |= mm->def_flags; vm_flags |= VM_STACK_INCOMPLETE_SETUP; ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, vm_flags); if (ret) goto out_unlock; BUG_ON(prev != vma); /* Move stack pages down in memory. */ if (stack_shift) { ret = shift_arg_pages(vma, stack_shift); if (ret) goto out_unlock; } /* mprotect_fixup is overkill to remove the temporary stack flags */ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ stack_size = vma->vm_end - vma->vm_start; /* * Align this down to a page boundary as expand_stack * will align it up. */ rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; #ifdef CONFIG_STACK_GROWSUP if (stack_size + stack_expand > rlim_stack) stack_base = vma->vm_start + rlim_stack; else stack_base = vma->vm_end + stack_expand; #else if (stack_size + stack_expand > rlim_stack) stack_base = vma->vm_end - rlim_stack; else stack_base = vma->vm_start - stack_expand; #endif current->mm->start_stack = bprm->p; ret = expand_stack(vma, stack_base); if (ret) ret = -EFAULT; out_unlock: up_write(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(setup_arg_pages); #endif /* CONFIG_MMU */ struct file *open_exec(const char *name) { struct file *file; int err; static const struct open_flags open_exec_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_EXEC | MAY_OPEN, .intent = LOOKUP_OPEN }; file = do_filp_open(AT_FDCWD, name, &open_exec_flags, LOOKUP_FOLLOW); if (IS_ERR(file)) goto out; err = -EACCES; if (!S_ISREG(file->f_path.dentry->d_inode->i_mode)) goto exit; if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) goto exit; fsnotify_open(file); err = deny_write_access(file); if (err) goto exit; out: return file; exit: fput(file); return ERR_PTR(err); } EXPORT_SYMBOL(open_exec); int kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count) { mm_segment_t old_fs; loff_t pos = offset; int result; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ result = vfs_read(file, (void __user *)addr, count, &pos); set_fs(old_fs); return result; } EXPORT_SYMBOL(kernel_read); static int exec_mmap(struct mm_struct *mm) { struct task_struct *tsk; struct mm_struct * old_mm, *active_mm; /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; sync_mm_rss(old_mm); mm_release(tsk, old_mm); if (old_mm) { /* * Make sure that if there is a core dump in progress * for the old mm, we get out and die instead of going * through with the exec. We must hold mmap_sem around * checking core_state and changing tsk->mm. */ down_read(&old_mm->mmap_sem); if (unlikely(old_mm->core_state)) { up_read(&old_mm->mmap_sem); return -EINTR; } } task_lock(tsk); active_mm = tsk->active_mm; tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { up_read(&old_mm->mmap_sem); BUG_ON(active_mm != old_mm); setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm); mm_update_next_owner(old_mm); mmput(old_mm); return 0; } mmdrop(active_mm); return 0; } /* * This function makes sure the current process has its own signal table, * so that flush_signal_handlers can later reset the handlers without * disturbing other processes. (Other processes might share the signal * table via the CLONE_SIGHAND option to clone().) */ static int de_thread(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct sighand_struct *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; if (thread_group_empty(tsk)) goto no_thread_group; /* * Kill all other threads in the thread group. */ spin_lock_irq(lock); if (signal_group_exit(sig)) { /* * Another group action in progress, just * return so that the signal is processed. */ spin_unlock_irq(lock); return -EAGAIN; } sig->group_exit_task = tsk; sig->notify_count = zap_other_threads(tsk); if (!thread_group_leader(tsk)) sig->notify_count--; while (sig->notify_count) { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(lock); schedule(); spin_lock_irq(lock); } spin_unlock_irq(lock); /* * At this point all other threads have exited, all we have to * do is to wait for the thread group leader to become inactive, * and to assume its PID: */ if (!thread_group_leader(tsk)) { struct task_struct *leader = tsk->group_leader; sig->notify_count = -1; /* for exit_notify() */ for (;;) { write_lock_irq(&tasklist_lock); if (likely(leader->exit_state)) break; __set_current_state(TASK_UNINTERRUPTIBLE); write_unlock_irq(&tasklist_lock); schedule(); } /* * The only record we have of the real-time age of a * process, regardless of execs it's done, is start_time. * All the past CPU time is accumulated in signal_struct * from sister threads now dead. But in this non-leader * exec, nothing survives from the original leader thread, * whose birth marks the true age of this process now. * When we take on its identity by switching to its PID, we * also take its birthdate (always earlier than our own). */ tsk->start_time = leader->start_time; BUG_ON(!same_thread_group(leader, tsk)); BUG_ON(has_group_leader_pid(tsk)); /* * An exec() starts a new thread group with the * TGID of the previous thread group. Rehash the * two threads with a switched PID, and release * the former thread group leader: */ /* Become a process group leader with the old leader's pid. * The old leader becomes a thread of the this thread group. * Note: The old leader also uses this pid until release_task * is called. Odd but simple and correct. */ detach_pid(tsk, PIDTYPE_PID); tsk->pid = leader->pid; attach_pid(tsk, PIDTYPE_PID, task_pid(leader)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); list_replace_rcu(&leader->tasks, &tsk->tasks); list_replace_init(&leader->sibling, &tsk->sibling); tsk->group_leader = tsk; leader->group_leader = tsk; tsk->exit_signal = SIGCHLD; leader->exit_signal = -1; BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; /* * We are going to release_task()->ptrace_unlink() silently, * the tracer can sleep in do_wait(). EXIT_DEAD guarantees * the tracer wont't block again waiting for this thread. */ if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); release_task(leader); } sig->group_exit_task = NULL; sig->notify_count = 0; no_thread_group: /* we have changed execution domain */ tsk->exit_signal = SIGCHLD; exit_itimers(sig); flush_itimer_signals(); if (atomic_read(&oldsighand->count) != 1) { struct sighand_struct *newsighand; /* * This ->sighand is shared with the CLONE_SIGHAND * but not CLONE_THREAD task, switch to the new one. */ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); if (!newsighand) return -ENOMEM; atomic_set(&newsighand->count, 1); memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); rcu_assign_pointer(tsk->sighand, newsighand); spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); __cleanup_sighand(oldsighand); } BUG_ON(!thread_group_leader(tsk)); return 0; } /* * These functions flushes out all traces of the currently running executable * so that a new one can be started */ static void flush_old_files(struct files_struct * files) { long j = -1; struct fdtable *fdt; spin_lock(&files->file_lock); for (;;) { unsigned long set, i; j++; i = j * __NFDBITS; fdt = files_fdtable(files); if (i >= fdt->max_fds) break; set = fdt->close_on_exec[j]; if (!set) continue; fdt->close_on_exec[j] = 0; spin_unlock(&files->file_lock); for ( ; set ; i++,set >>= 1) { if (set & 1) { sys_close(i); } } spin_lock(&files->file_lock); } spin_unlock(&files->file_lock); } char *get_task_comm(char *buf, struct task_struct *tsk) { /* buf must be at least sizeof(tsk->comm) in size */ task_lock(tsk); strncpy(buf, tsk->comm, sizeof(tsk->comm)); task_unlock(tsk); return buf; } EXPORT_SYMBOL_GPL(get_task_comm); void set_task_comm(struct task_struct *tsk, char *buf) { task_lock(tsk); trace_task_rename(tsk, buf); /* * Threads may access current->comm without holding * the task lock, so write the string carefully. * Readers without a lock may see incomplete new * names but are safe from non-terminating string reads. */ memset(tsk->comm, 0, TASK_COMM_LEN); wmb(); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); perf_event_comm(tsk); } static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len) { int i, ch; /* Copies the binary name from after last slash */ for (i = 0; (ch = *(fn++)) != '\0';) { if (ch == '/') i = 0; /* overwrite what we wrote */ else if (i < len - 1) tcomm[i++] = ch; } tcomm[i] = '\0'; } int flush_old_exec(struct linux_binprm * bprm) { int retval; /* * Make sure we have a private signal table and that * we are unassociated from the previous thread group. */ retval = de_thread(current); if (retval) goto out; set_mm_exe_file(bprm->mm, bprm->file); filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm)); /* * Release all of the old mmap stuff */ acct_arg_size(bprm, 0); retval = exec_mmap(bprm->mm); if (retval) goto out; bprm->mm = NULL; /* We're using it now */ set_fs(USER_DS); current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD); flush_thread(); current->personality &= ~bprm->per_clear; return 0; out: return retval; } EXPORT_SYMBOL(flush_old_exec); void would_dump(struct linux_binprm *bprm, struct file *file) { if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0) bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; } EXPORT_SYMBOL(would_dump); void setup_new_exec(struct linux_binprm * bprm) { arch_pick_mmap_layout(current->mm); /* This is the point of no return */ current->sas_ss_sp = current->sas_ss_size = 0; if (current_euid() == current_uid() && current_egid() == current_gid()) set_dumpable(current->mm, 1); else set_dumpable(current->mm, suid_dumpable); set_task_comm(current, bprm->tcomm); /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on * some architectures like powerpc */ current->mm->task_size = TASK_SIZE; /* install the new credentials */ if (bprm->cred->uid != current_euid() || bprm->cred->gid != current_egid()) { current->pdeath_signal = 0; } else { would_dump(bprm, bprm->file); if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) set_dumpable(current->mm, suid_dumpable); } /* * Flush performance counters when crossing a * security domain: */ if (!get_dumpable(current->mm)) perf_event_exit_task(current); /* An exec changes our domain. We are no longer part of the thread group */ current->self_exec_id++; flush_signal_handlers(current, 0); flush_old_files(current->files); } EXPORT_SYMBOL(setup_new_exec); /* * Prepare credentials and lock ->cred_guard_mutex. * install_exec_creds() commits the new creds and drops the lock. * Or, if exec fails before, free_bprm() should release ->cred and * and unlock. */ int prepare_bprm_creds(struct linux_binprm *bprm) { if (mutex_lock_interruptible(&current->signal->cred_guard_mutex)) return -ERESTARTNOINTR; bprm->cred = prepare_exec_creds(); if (likely(bprm->cred)) return 0; mutex_unlock(&current->signal->cred_guard_mutex); return -ENOMEM; } void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { mutex_unlock(&current->signal->cred_guard_mutex); abort_creds(bprm->cred); } kfree(bprm); } /* * install the new credentials for this executable */ void install_exec_creds(struct linux_binprm *bprm) { security_bprm_committing_creds(bprm); commit_creds(bprm->cred); bprm->cred = NULL; /* * cred_guard_mutex must be held at least to this point to prevent * ptrace_attach() from altering our determination of the task's * credentials; any time after this it may be unlocked. */ security_bprm_committed_creds(bprm); mutex_unlock(&current->signal->cred_guard_mutex); } EXPORT_SYMBOL(install_exec_creds); /* * determine how safe it is to execute the proposed program * - the caller must hold ->cred_guard_mutex to protect against * PTRACE_ATTACH or seccomp thread-sync */ static int check_unsafe_exec(struct linux_binprm *bprm) { struct task_struct *p = current, *t; unsigned n_fs; int res = 0; if (p->ptrace) { if (p->ptrace & PT_PTRACE_CAP) bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP; else bprm->unsafe |= LSM_UNSAFE_PTRACE; } /* * This isn't strictly necessary, but it makes it harder for LSMs to * mess up. */ if (task_no_new_privs(current)) bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; n_fs = 1; spin_lock(&p->fs->lock); rcu_read_lock(); for (t = next_thread(p); t != p; t = next_thread(t)) { if (t->fs == p->fs) n_fs++; } rcu_read_unlock(); if (p->fs->users > n_fs) { bprm->unsafe |= LSM_UNSAFE_SHARE; } else { res = -EAGAIN; if (!p->fs->in_exec) { p->fs->in_exec = 1; res = 1; } } spin_unlock(&p->fs->lock); return res; } /* * Fill the binprm structure from the inode. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes * * This may be called multiple times for binary chains (scripts for example). */ int prepare_binprm(struct linux_binprm *bprm) { umode_t mode; struct inode * inode = bprm->file->f_path.dentry->d_inode; int retval; mode = inode->i_mode; if (bprm->file->f_op == NULL) return -EACCES; /* clear any previous set[ug]id data from a previous binary */ bprm->cred->euid = current_euid(); bprm->cred->egid = current_egid(); if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) && !task_no_new_privs(current)) { /* Set-uid? */ if (mode & S_ISUID) { bprm->per_clear |= PER_CLEAR_ON_SETID; bprm->cred->euid = inode->i_uid; } /* Set-gid? */ /* * If setgid is set but no group execute bit then this * is a candidate for mandatory locking, not a setgid * executable. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { bprm->per_clear |= PER_CLEAR_ON_SETID; bprm->cred->egid = inode->i_gid; } } /* fill in binprm security blob */ retval = security_bprm_set_creds(bprm); if (retval) return retval; bprm->cred_prepared = 1; memset(bprm->buf, 0, BINPRM_BUF_SIZE); return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE); } EXPORT_SYMBOL(prepare_binprm); /* * Arguments are '\0' separated strings found at the location bprm->p * points to; chop off the first by relocating brpm->p to right after * the first '\0' encountered. */ int remove_arg_zero(struct linux_binprm *bprm) { int ret = 0; unsigned long offset; char *kaddr; struct page *page; if (!bprm->argc) return 0; do { offset = bprm->p & ~PAGE_MASK; page = get_arg_page(bprm, bprm->p, 0); if (!page) { ret = -EFAULT; goto out; } kaddr = kmap_atomic(page); for (; offset < PAGE_SIZE && kaddr[offset]; offset++, bprm->p++) ; kunmap_atomic(kaddr); put_arg_page(page); if (offset == PAGE_SIZE) free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1); } while (offset == PAGE_SIZE); bprm->p++; bprm->argc--; ret = 0; out: return ret; } EXPORT_SYMBOL(remove_arg_zero); /* * cycle the list of binary formats handler, until one recognizes the image */ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) { unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; pid_t old_pid, old_vpid; retval = security_bprm_check(bprm); if (retval) return retval; retval = audit_bprm(bprm); if (retval) return retval; /* Need to fetch pid before load_binary changes it */ old_pid = current->pid; rcu_read_lock(); old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); rcu_read_unlock(); retval = -ENOENT; for (try=0; try<2; try++) { read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary; if (!fn) continue; if (!try_module_get(fmt->module)) continue; read_unlock(&binfmt_lock); retval = fn(bprm, regs); /* * Restore the depth counter to its starting value * in this call, so we don't have to rely on every * load_binary function to restore it on return. */ bprm->recursion_depth = depth; if (retval >= 0) { if (depth == 0) { trace_sched_process_exec(current, old_pid, bprm); ptrace_event(PTRACE_EVENT_EXEC, old_vpid); } put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) fput(bprm->file); bprm->file = NULL; current->did_exec = 1; proc_exec_connector(current); return retval; } read_lock(&binfmt_lock); put_binfmt(fmt); if (retval != -ENOEXEC || bprm->mm == NULL) break; if (!bprm->file) { read_unlock(&binfmt_lock); return retval; } } read_unlock(&binfmt_lock); #ifdef CONFIG_MODULES if (retval != -ENOEXEC || bprm->mm == NULL) { break; } else { #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && printable(bprm->buf[2]) && printable(bprm->buf[3])) break; /* -ENOEXEC */ if (try) break; /* -ENOEXEC */ request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2])); } #else break; #endif } return retval; } EXPORT_SYMBOL(search_binary_handler); /* * sys_execve() executes a new program. */ static int do_execve_common(const char *filename, struct user_arg_ptr argv, struct user_arg_ptr envp, struct pt_regs *regs) { struct linux_binprm *bprm; struct file *file; struct files_struct *displaced; bool clear_in_exec; int retval; const struct cred *cred = current_cred(); /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs * don't check setuid() return code. Here we additionally recheck * whether NPROC limit is still exceeded. */ if ((current->flags & PF_NPROC_EXCEEDED) && atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) { retval = -EAGAIN; goto out_ret; } /* We're below the limit (still or again), so we don't want to make * further execve() calls fail. */ current->flags &= ~PF_NPROC_EXCEEDED; retval = unshare_files(&displaced); if (retval) goto out_ret; retval = -ENOMEM; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); if (!bprm) goto out_files; retval = prepare_bprm_creds(bprm); if (retval) goto out_free; retval = check_unsafe_exec(bprm); if (retval < 0) goto out_free; clear_in_exec = retval; current->in_execve = 1; file = open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) goto out_unmark; sched_exec(); bprm->file = file; bprm->filename = filename; bprm->interp = filename; retval = bprm_mm_init(bprm); if (retval) goto out_file; bprm->argc = count(argv, MAX_ARG_STRINGS); if ((retval = bprm->argc) < 0) goto out; bprm->envc = count(envp, MAX_ARG_STRINGS); if ((retval = bprm->envc) < 0) goto out; retval = prepare_binprm(bprm); if (retval < 0) goto out; retval = copy_strings_kernel(1, &bprm->filename, bprm); if (retval < 0) goto out; bprm->exec = bprm->p; retval = copy_strings(bprm->envc, envp, bprm); if (retval < 0) goto out; retval = copy_strings(bprm->argc, argv, bprm); if (retval < 0) goto out; retval = search_binary_handler(bprm,regs); if (retval < 0) goto out; /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); free_bprm(bprm); if (displaced) put_files_struct(displaced); return retval; out: if (bprm->mm) { acct_arg_size(bprm, 0); mmput(bprm->mm); } out_file: if (bprm->file) { allow_write_access(bprm->file); fput(bprm->file); } out_unmark: if (clear_in_exec) current->fs->in_exec = 0; current->in_execve = 0; out_free: free_bprm(bprm); out_files: if (displaced) reset_files_struct(displaced); out_ret: return retval; } int do_execve(const char *filename, const char __user *const __user *__argv, const char __user *const __user *__envp, struct pt_regs *regs) { struct user_arg_ptr argv = { .ptr.native = __argv }; struct user_arg_ptr envp = { .ptr.native = __envp }; return do_execve_common(filename, argv, envp, regs); } #ifdef CONFIG_COMPAT int compat_do_execve(char *filename, compat_uptr_t __user *__argv, compat_uptr_t __user *__envp, struct pt_regs *regs) { struct user_arg_ptr argv = { .is_compat = true, .ptr.compat = __argv, }; struct user_arg_ptr envp = { .is_compat = true, .ptr.compat = __envp, }; return do_execve_common(filename, argv, envp, regs); } #endif void set_binfmt(struct linux_binfmt *new) { struct mm_struct *mm = current->mm; if (mm->binfmt) module_put(mm->binfmt->module); mm->binfmt = new; if (new) __module_get(new->module); } EXPORT_SYMBOL(set_binfmt); static int expand_corename(struct core_name *cn) { char *old_corename = cn->corename; cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); if (!cn->corename) { kfree(old_corename); return -ENOMEM; } return 0; } static int cn_printf(struct core_name *cn, const char *fmt, ...) { char *cur; int need; int ret; va_list arg; va_start(arg, fmt); need = vsnprintf(NULL, 0, fmt, arg); va_end(arg); if (likely(need < cn->size - cn->used - 1)) goto out_printf; ret = expand_corename(cn); if (ret) goto expand_fail; out_printf: cur = cn->corename + cn->used; va_start(arg, fmt); vsnprintf(cur, need + 1, fmt, arg); va_end(arg); cn->used += need; return 0; expand_fail: return ret; } static void cn_escape(char *str) { for (; *str; str++) if (*str == '/') *str = '!'; } static int cn_print_exe_file(struct core_name *cn) { struct file *exe_file; char *pathbuf, *path; int ret; exe_file = get_mm_exe_file(current->mm); if (!exe_file) { char *commstart = cn->corename + cn->used; ret = cn_printf(cn, "%s (path unknown)", current->comm); cn_escape(commstart); return ret; } pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); if (!pathbuf) { ret = -ENOMEM; goto put_exe_file; } path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); if (IS_ERR(path)) { ret = PTR_ERR(path); goto free_buf; } cn_escape(path); ret = cn_printf(cn, "%s", path); free_buf: kfree(pathbuf); put_exe_file: fput(exe_file); return ret; } /* format_corename will inspect the pattern parameter, and output a * name into corename, which must have space for at least * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. */ static int format_corename(struct core_name *cn, long signr) { const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; int ispipe = (*pat_ptr == '|'); int pid_in_pattern = 0; int err = 0; cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); cn->corename = kmalloc(cn->size, GFP_KERNEL); cn->used = 0; if (!cn->corename) return -ENOMEM; /* Repeat as long as we have more pattern to process and more output space */ while (*pat_ptr) { if (*pat_ptr != '%') { if (*pat_ptr == 0) goto out; err = cn_printf(cn, "%c", *pat_ptr++); } else { switch (*++pat_ptr) { /* single % at the end, drop that */ case 0: goto out; /* Double percent, output one percent */ case '%': err = cn_printf(cn, "%c", '%'); break; /* pid */ case 'p': pid_in_pattern = 1; err = cn_printf(cn, "%d", task_tgid_vnr(current)); break; /* uid */ case 'u': err = cn_printf(cn, "%d", cred->uid); break; /* gid */ case 'g': err = cn_printf(cn, "%d", cred->gid); break; /* signal that caused the coredump */ case 's': err = cn_printf(cn, "%ld", signr); break; /* UNIX time of coredump */ case 't': { struct timeval tv; do_gettimeofday(&tv); err = cn_printf(cn, "%lu", tv.tv_sec); break; } /* hostname */ case 'h': { char *namestart = cn->corename + cn->used; down_read(&uts_sem); err = cn_printf(cn, "%s", utsname()->nodename); up_read(&uts_sem); cn_escape(namestart); break; } /* executable */ case 'e': { char *commstart = cn->corename + cn->used; err = cn_printf(cn, "%s", current->comm); cn_escape(commstart); break; } case 'E': err = cn_print_exe_file(cn); break; /* core limit size */ case 'c': err = cn_printf(cn, "%lu", rlimit(RLIMIT_CORE)); break; default: break; } ++pat_ptr; } if (err) return err; } /* Backward compatibility with core_uses_pid: * * If core_pattern does not include a %p (as is the default) * and core_uses_pid is set, then .%pid will be appended to * the filename. Do not do this for piped commands. */ if (!ispipe && !pid_in_pattern && core_uses_pid) { err = cn_printf(cn, ".%d", task_tgid_vnr(current)); if (err) return err; } out: return ispipe; } static int zap_process(struct task_struct *start, int exit_code) { struct task_struct *t; int nr = 0; start->signal->flags = SIGNAL_GROUP_EXIT; start->signal->group_exit_code = exit_code; start->signal->group_stop_count = 0; t = start; do { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); if (t != current && t->mm) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); nr++; } } while_each_thread(start, t); return nr; } static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, struct core_state *core_state, int exit_code) { struct task_struct *g, *p; unsigned long flags; int nr = -EAGAIN; spin_lock_irq(&tsk->sighand->siglock); if (!signal_group_exit(tsk->signal)) { mm->core_state = core_state; nr = zap_process(tsk, exit_code); } spin_unlock_irq(&tsk->sighand->siglock); if (unlikely(nr < 0)) return nr; if (atomic_read(&mm->mm_users) == nr + 1) goto done; /* * We should find and kill all tasks which use this mm, and we should * count them correctly into ->nr_threads. We don't take tasklist * lock, but this is safe wrt: * * fork: * None of sub-threads can fork after zap_process(leader). All * processes which were created before this point should be * visible to zap_threads() because copy_process() adds the new * process to the tail of init_task.tasks list, and lock/unlock * of ->siglock provides a memory barrier. * * do_exit: * The caller holds mm->mmap_sem. This means that the task which * uses this mm can't pass exit_mm(), so it can't exit or clear * its ->mm. * * de_thread: * It does list_replace_rcu(&leader->tasks, &current->tasks), * we must see either old or new leader, this does not matter. * However, it can change p->sighand, so lock_task_sighand(p) * must be used. Since p->mm != NULL and we hold ->mmap_sem * it can't fail. * * Note also that "g" can be the old leader with ->mm == NULL * and already unhashed and thus removed from ->thread_group. * This is OK, __unhash_process()->list_del_rcu() does not * clear the ->next pointer, we will find the new leader via * next_thread(). */ rcu_read_lock(); for_each_process(g) { if (g == tsk->group_leader) continue; if (g->flags & PF_KTHREAD) continue; p = g; do { if (p->mm) { if (unlikely(p->mm == mm)) { lock_task_sighand(p, &flags); nr += zap_process(p, exit_code); unlock_task_sighand(p, &flags); } break; } } while_each_thread(g, p); } rcu_read_unlock(); done: atomic_set(&core_state->nr_threads, nr); return nr; } static int coredump_wait(int exit_code, struct core_state *core_state) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; int core_waiters = -EBUSY; init_completion(&core_state->startup); core_state->dumper.task = tsk; core_state->dumper.next = NULL; down_write(&mm->mmap_sem); if (!mm->core_state) core_waiters = zap_threads(tsk, mm, core_state, exit_code); up_write(&mm->mmap_sem); if (core_waiters > 0) wait_for_completion(&core_state->startup); return core_waiters; } static void coredump_finish(struct mm_struct *mm) { struct core_thread *curr, *next; struct task_struct *task; next = mm->core_state->dumper.next; while ((curr = next) != NULL) { next = curr->next; task = curr->task; /* * see exit_mm(), curr->task must not see * ->task == NULL before we read ->next. */ smp_mb(); curr->task = NULL; wake_up_process(task); } mm->core_state = NULL; } /* * set_dumpable converts traditional three-value dumpable to two flags and * stores them into mm->flags. It modifies lower two bits of mm->flags, but * these bits are not changed atomically. So get_dumpable can observe the * intermediate state. To avoid doing unexpected behavior, get get_dumpable * return either old dumpable or new one by paying attention to the order of * modifying the bits. * * dumpable | mm->flags (binary) * old new | initial interim final * ---------+----------------------- * 0 1 | 00 01 01 * 0 2 | 00 10(*) 11 * 1 0 | 01 00 00 * 1 2 | 01 11 11 * 2 0 | 11 10(*) 00 * 2 1 | 11 11 01 * * (*) get_dumpable regards interim value of 10 as 11. */ void set_dumpable(struct mm_struct *mm, int value) { switch (value) { case 0: clear_bit(MMF_DUMPABLE, &mm->flags); smp_wmb(); clear_bit(MMF_DUMP_SECURELY, &mm->flags); break; case 1: set_bit(MMF_DUMPABLE, &mm->flags); smp_wmb(); clear_bit(MMF_DUMP_SECURELY, &mm->flags); break; case 2: set_bit(MMF_DUMP_SECURELY, &mm->flags); smp_wmb(); set_bit(MMF_DUMPABLE, &mm->flags); break; } } static int __get_dumpable(unsigned long mm_flags) { int ret; ret = mm_flags & MMF_DUMPABLE_MASK; return (ret >= 2) ? 2 : ret; } int get_dumpable(struct mm_struct *mm) { return __get_dumpable(mm->flags); } static void wait_for_dump_helpers(struct file *file) { struct pipe_inode_info *pipe; pipe = file->f_path.dentry->d_inode->i_pipe; pipe_lock(pipe); pipe->readers++; pipe->writers--; while ((pipe->readers > 1) && (!signal_pending(current))) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_wait(pipe); } pipe->readers--; pipe->writers++; pipe_unlock(pipe); } /* * umh_pipe_setup * helper function to customize the process used * to collect the core in userspace. Specifically * it sets up a pipe and installs it as fd 0 (stdin) * for the process. Returns 0 on success, or * PTR_ERR on failure. * Note that it also sets the core limit to 1. This * is a special value that we use to trap recursive * core dumps */ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) { struct file *rp, *wp; struct fdtable *fdt; struct coredump_params *cp = (struct coredump_params *)info->data; struct files_struct *cf = current->files; wp = create_write_pipe(0); if (IS_ERR(wp)) return PTR_ERR(wp); rp = create_read_pipe(wp, 0); if (IS_ERR(rp)) { free_write_pipe(wp); return PTR_ERR(rp); } cp->file = wp; sys_close(0); fd_install(0, rp); spin_lock(&cf->file_lock); fdt = files_fdtable(cf); __set_open_fd(0, fdt); __clear_close_on_exec(0, fdt); spin_unlock(&cf->file_lock); /* and disallow core files too */ current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; return 0; } void do_coredump(long signr, int exit_code, struct pt_regs *regs) { struct core_state core_state; struct core_name cn; struct mm_struct *mm = current->mm; struct linux_binfmt * binfmt; const struct cred *old_cred; struct cred *cred; int retval = 0; int flag = 0; int ispipe; static atomic_t core_dump_count = ATOMIC_INIT(0); struct coredump_params cprm = { .signr = signr, .regs = regs, .limit = rlimit(RLIMIT_CORE), /* * We must use the same mm->flags while dumping core to avoid * inconsistency of bit flags, since this flag is not protected * by any locks. */ .mm_flags = mm->flags, }; audit_core_dumps(signr); binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; if (!__get_dumpable(cprm.mm_flags)) goto fail; cred = prepare_creds(); if (!cred) goto fail; /* * We cannot trust fsuid as being the "true" uid of the * process nor do we know its entire history. We only know it * was tainted so we dump it as root in mode 2. */ if (__get_dumpable(cprm.mm_flags) == 2) { /* Setuid core dump mode */ flag = O_EXCL; /* Stop rewrite attacks */ cred->fsuid = 0; /* Dump root private */ } retval = coredump_wait(exit_code, &core_state); if (retval < 0) goto fail_creds; old_cred = override_creds(cred); /* * Clear any false indication of pending signals that might * be seen by the filesystem code called to write the core file. */ clear_thread_flag(TIF_SIGPENDING); ispipe = format_corename(&cn, signr); if (ispipe) { int dump_count; char **helper_argv; if (ispipe < 0) { printk(KERN_WARNING "format_corename failed\n"); printk(KERN_WARNING "Aborting core\n"); goto fail_corename; } if (cprm.limit == 1) { /* * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use * cprm.limit of 1 here as a speacial value. Any * non-1 limit gets set to RLIM_INFINITY below, but * a limit of 0 skips the dump. This is a consistent * way to catch recursive crashes. We can still crash * if the core_pattern binary sets RLIM_CORE = !1 * but it runs as root, and can do lots of stupid things * Note that we use task_tgid_vnr here to grab the pid * of the process group leader. That way we get the * right pid if a thread in a multi-threaded * core_pattern process dies. */ printk(KERN_WARNING "Process %d(%s) has RLIMIT_CORE set to 1\n", task_tgid_vnr(current), current->comm); printk(KERN_WARNING "Aborting core\n"); goto fail_unlock; } cprm.limit = RLIM_INFINITY; dump_count = atomic_inc_return(&core_dump_count); if (core_pipe_limit && (core_pipe_limit < dump_count)) { printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", task_tgid_vnr(current), current->comm); printk(KERN_WARNING "Skipping core dump\n"); goto fail_dropcount; } helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL); if (!helper_argv) { printk(KERN_WARNING "%s failed to allocate memory\n", __func__); goto fail_dropcount; } retval = call_usermodehelper_fns(helper_argv[0], helper_argv, NULL, UMH_WAIT_EXEC, umh_pipe_setup, NULL, &cprm); argv_free(helper_argv); if (retval) { printk(KERN_INFO "Core dump to %s pipe failed\n", cn.corename); goto close_fail; } } else { struct inode *inode; if (cprm.limit < binfmt->min_coredump) goto fail_unlock; cprm.file = filp_open(cn.corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600); if (IS_ERR(cprm.file)) goto fail_unlock; inode = cprm.file->f_path.dentry->d_inode; if (inode->i_nlink > 1) goto close_fail; if (d_unhashed(cprm.file->f_path.dentry)) goto close_fail; /* * AK: actually i see no reason to not allow this for named * pipes etc, but keep the previous behaviour for now. */ if (!S_ISREG(inode->i_mode)) goto close_fail; /* * Dont allow local users get cute and trick others to coredump * into their pre-created files. */ if (inode->i_uid != current_fsuid()) goto close_fail; if (!cprm.file->f_op || !cprm.file->f_op->write) goto close_fail; if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) goto close_fail; } retval = binfmt->core_dump(&cprm); if (retval) current->signal->group_exit_code |= 0x80; if (ispipe && core_pipe_limit) wait_for_dump_helpers(cprm.file); close_fail: if (cprm.file) filp_close(cprm.file, NULL); fail_dropcount: if (ispipe) atomic_dec(&core_dump_count); fail_unlock: kfree(cn.corename); fail_corename: coredump_finish(mm); revert_creds(old_cred); fail_creds: put_cred(cred); fail: return; } /* * Core dumping helper functions. These are the only things you should * do on a core-file: use only these functions to write out all the * necessary info. */ int dump_write(struct file *file, const void *addr, int nr) { return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; } EXPORT_SYMBOL(dump_write); int dump_seek(struct file *file, loff_t off) { int ret = 1; if (file->f_op->llseek && file->f_op->llseek != no_llseek) { if (file->f_op->llseek(file, off, SEEK_CUR) < 0) return 0; } else { char *buf = (char *)get_zeroed_page(GFP_KERNEL); if (!buf) return 0; while (off > 0) { unsigned long n = off; if (n > PAGE_SIZE) n = PAGE_SIZE; if (!dump_write(file, buf, n)) { ret = 0; break; } off -= n; } free_page((unsigned long)buf); } return ret; } EXPORT_SYMBOL(dump_seek);
gpl-2.0
osmc/vero-linux
drivers/media/platform/exynos4-is/fimc-capture.c
146
51076
/* * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver * * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd. * Sylwester Nawrocki <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/bug.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include "common.h" #include "fimc-core.h" #include "fimc-reg.h" #include "media-dev.h" static int fimc_capture_hw_init(struct fimc_dev *fimc) { struct fimc_source_info *si = &fimc->vid_cap.source_config; struct fimc_ctx *ctx = fimc->vid_cap.ctx; int ret; unsigned long flags; if (ctx == NULL || ctx->s_frame.fmt == NULL) return -EINVAL; if (si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) { ret = fimc_hw_camblk_cfg_writeback(fimc); if (ret < 0) return ret; } spin_lock_irqsave(&fimc->slock, flags); fimc_prepare_dma_offset(ctx, &ctx->d_frame); fimc_set_yuv_order(ctx); fimc_hw_set_camera_polarity(fimc, si); fimc_hw_set_camera_type(fimc, si); fimc_hw_set_camera_source(fimc, si); fimc_hw_set_camera_offset(fimc, &ctx->s_frame); ret = fimc_set_scaler_info(ctx); if (!ret) { fimc_hw_set_input_path(ctx); fimc_hw_set_prescaler(ctx); fimc_hw_set_mainscaler(ctx); fimc_hw_set_target_format(ctx); fimc_hw_set_rotation(ctx); fimc_hw_set_effect(ctx); fimc_hw_set_output_path(ctx); fimc_hw_set_out_dma(ctx); if (fimc->drv_data->alpha_color) fimc_hw_set_rgb_alpha(ctx); clear_bit(ST_CAPT_APPLY_CFG, &fimc->state); } spin_unlock_irqrestore(&fimc->slock, flags); return ret; } /* * Reinitialize the driver so it is ready to start the streaming again. * Set fimc->state to indicate stream off and the hardware shut down state. * If not suspending (@suspend is false), return any buffers to videobuf2. * Otherwise put any owned buffers onto the pending buffers queue, so they * can be re-spun when the device is being resumed. Also perform FIMC * software reset and disable streaming on the whole pipeline if required. */ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend) { struct fimc_vid_cap *cap = &fimc->vid_cap; struct fimc_vid_buffer *buf; unsigned long flags; bool streaming; spin_lock_irqsave(&fimc->slock, flags); streaming = fimc->state & (1 << ST_CAPT_ISP_STREAM); fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT | 1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM); if (suspend) fimc->state |= (1 << ST_CAPT_SUSPENDED); else fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED); /* Release unused buffers */ while (!suspend && !list_empty(&cap->pending_buf_q)) { buf = fimc_pending_queue_pop(cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } /* If suspending put unused buffers onto pending queue */ while (!list_empty(&cap->active_buf_q)) { buf = fimc_active_queue_pop(cap); if (suspend) fimc_pending_queue_add(cap, buf); else vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } fimc_hw_reset(fimc); cap->buf_index = 0; spin_unlock_irqrestore(&fimc->slock, flags); if (streaming) return fimc_pipeline_call(&cap->ve, set_stream, 0); else return 0; } static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend) { unsigned long flags; if (!fimc_capture_active(fimc)) return 0; spin_lock_irqsave(&fimc->slock, flags); set_bit(ST_CAPT_SHUT, &fimc->state); fimc_deactivate_capture(fimc); spin_unlock_irqrestore(&fimc->slock, flags); wait_event_timeout(fimc->irq_queue, !test_bit(ST_CAPT_SHUT, &fimc->state), (2*HZ/10)); /* 200 ms */ return fimc_capture_state_cleanup(fimc, suspend); } /** * fimc_capture_config_update - apply the camera interface configuration * * To be called from within the interrupt handler with fimc.slock * spinlock held. It updates the camera pixel crop, rotation and * image flip in H/W. */ static int fimc_capture_config_update(struct fimc_ctx *ctx) { struct fimc_dev *fimc = ctx->fimc_dev; int ret; fimc_hw_set_camera_offset(fimc, &ctx->s_frame); ret = fimc_set_scaler_info(ctx); if (ret) return ret; fimc_hw_set_prescaler(ctx); fimc_hw_set_mainscaler(ctx); fimc_hw_set_target_format(ctx); fimc_hw_set_rotation(ctx); fimc_hw_set_effect(ctx); fimc_prepare_dma_offset(ctx, &ctx->d_frame); fimc_hw_set_out_dma(ctx); if (fimc->drv_data->alpha_color) fimc_hw_set_rgb_alpha(ctx); clear_bit(ST_CAPT_APPLY_CFG, &fimc->state); return ret; } void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf) { struct fimc_vid_cap *cap = &fimc->vid_cap; struct fimc_pipeline *p = to_fimc_pipeline(cap->ve.pipe); struct v4l2_subdev *csis = p->subdevs[IDX_CSIS]; struct fimc_frame *f = &cap->ctx->d_frame; struct fimc_vid_buffer *v_buf; if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) { wake_up(&fimc->irq_queue); goto done; } if (!list_empty(&cap->active_buf_q) && test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) { v_buf = fimc_active_queue_pop(cap); v4l2_get_timestamp(&v_buf->vb.timestamp); v_buf->vb.sequence = cap->frame_count++; vb2_buffer_done(&v_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } if (!list_empty(&cap->pending_buf_q)) { v_buf = fimc_pending_queue_pop(cap); fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index); v_buf->index = cap->buf_index; /* Move the buffer to the capture active queue */ fimc_active_queue_add(cap, v_buf); dbg("next frame: %d, done frame: %d", fimc_hw_get_frame_index(fimc), v_buf->index); if (++cap->buf_index >= FIMC_MAX_OUT_BUFS) cap->buf_index = 0; } /* * Set up a buffer at MIPI-CSIS if current image format * requires the frame embedded data capture. */ if (f->fmt->mdataplanes && !list_empty(&cap->active_buf_q)) { unsigned int plane = ffs(f->fmt->mdataplanes) - 1; unsigned int size = f->payload[plane]; s32 index = fimc_hw_get_frame_index(fimc); void *vaddr; list_for_each_entry(v_buf, &cap->active_buf_q, list) { if (v_buf->index != index) continue; vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane); v4l2_subdev_call(csis, video, s_rx_buffer, vaddr, &size); break; } } if (cap->active_buf_cnt == 0) { if (deq_buf) clear_bit(ST_CAPT_RUN, &fimc->state); if (++cap->buf_index >= FIMC_MAX_OUT_BUFS) cap->buf_index = 0; } else { set_bit(ST_CAPT_RUN, &fimc->state); } if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state)) fimc_capture_config_update(cap->ctx); done: if (cap->active_buf_cnt == 1) { fimc_deactivate_capture(fimc); clear_bit(ST_CAPT_STREAM, &fimc->state); } dbg("frame: %d, active_buf_cnt: %d", fimc_hw_get_frame_index(fimc), cap->active_buf_cnt); } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct fimc_ctx *ctx = q->drv_priv; struct fimc_dev *fimc = ctx->fimc_dev; struct fimc_vid_cap *vid_cap = &fimc->vid_cap; int min_bufs; int ret; vid_cap->frame_count = 0; ret = fimc_capture_hw_init(fimc); if (ret) { fimc_capture_state_cleanup(fimc, false); return ret; } set_bit(ST_CAPT_PEND, &fimc->state); min_bufs = fimc->vid_cap.reqbufs_count > 1 ? 2 : 1; if (vid_cap->active_buf_cnt >= min_bufs && !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) { fimc_activate_capture(ctx); if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state)) return fimc_pipeline_call(&vid_cap->ve, set_stream, 1); } return 0; } static void stop_streaming(struct vb2_queue *q) { struct fimc_ctx *ctx = q->drv_priv; struct fimc_dev *fimc = ctx->fimc_dev; if (!fimc_capture_active(fimc)) return; fimc_stop_capture(fimc, false); } int fimc_capture_suspend(struct fimc_dev *fimc) { bool suspend = fimc_capture_busy(fimc); int ret = fimc_stop_capture(fimc, suspend); if (ret) return ret; return fimc_pipeline_call(&fimc->vid_cap.ve, close); } static void buffer_queue(struct vb2_buffer *vb); int fimc_capture_resume(struct fimc_dev *fimc) { struct fimc_vid_cap *vid_cap = &fimc->vid_cap; struct exynos_video_entity *ve = &vid_cap->ve; struct fimc_vid_buffer *buf; int i; if (!test_and_clear_bit(ST_CAPT_SUSPENDED, &fimc->state)) return 0; INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q); vid_cap->buf_index = 0; fimc_pipeline_call(ve, open, &ve->vdev.entity, false); fimc_capture_hw_init(fimc); clear_bit(ST_CAPT_SUSPENDED, &fimc->state); for (i = 0; i < vid_cap->reqbufs_count; i++) { if (list_empty(&vid_cap->pending_buf_q)) break; buf = fimc_pending_queue_pop(vid_cap); buffer_queue(&buf->vb.vb2_buf); } return 0; } static int queue_setup(struct vb2_queue *vq, const void *parg, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { const struct v4l2_format *pfmt = parg; const struct v4l2_pix_format_mplane *pixm = NULL; struct fimc_ctx *ctx = vq->drv_priv; struct fimc_frame *frame = &ctx->d_frame; struct fimc_fmt *fmt = frame->fmt; unsigned long wh; int i; if (pfmt) { pixm = &pfmt->fmt.pix_mp; fmt = fimc_find_format(&pixm->pixelformat, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1); wh = pixm->width * pixm->height; } else { wh = frame->f_width * frame->f_height; } if (fmt == NULL) return -EINVAL; *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { unsigned int size = (wh * fmt->depth[i]) / 8; if (pixm) sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); else if (fimc_fmt_is_user_defined(fmt->color)) sizes[i] = frame->payload[i]; else sizes[i] = max_t(u32, size, frame->payload[i]); allocators[i] = ctx->fimc_dev->alloc_ctx; } return 0; } static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct fimc_ctx *ctx = vq->drv_priv; int i; if (ctx->d_frame.fmt == NULL) return -EINVAL; for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) { unsigned long size = ctx->d_frame.payload[i]; if (vb2_plane_size(vb, i) < size) { v4l2_err(&ctx->fimc_dev->vid_cap.ve.vdev, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; } static void buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct fimc_vid_buffer *buf = container_of(vbuf, struct fimc_vid_buffer, vb); struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct fimc_dev *fimc = ctx->fimc_dev; struct fimc_vid_cap *vid_cap = &fimc->vid_cap; struct exynos_video_entity *ve = &vid_cap->ve; unsigned long flags; int min_bufs; spin_lock_irqsave(&fimc->slock, flags); fimc_prepare_addr(ctx, &buf->vb.vb2_buf, &ctx->d_frame, &buf->paddr); if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) && !test_bit(ST_CAPT_STREAM, &fimc->state) && vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) { /* Setup the buffer directly for processing. */ int buf_id = (vid_cap->reqbufs_count == 1) ? -1 : vid_cap->buf_index; fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id); buf->index = vid_cap->buf_index; fimc_active_queue_add(vid_cap, buf); if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS) vid_cap->buf_index = 0; } else { fimc_pending_queue_add(vid_cap, buf); } min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1; if (vb2_is_streaming(&vid_cap->vbq) && vid_cap->active_buf_cnt >= min_bufs && !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) { int ret; fimc_activate_capture(ctx); spin_unlock_irqrestore(&fimc->slock, flags); if (test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state)) return; ret = fimc_pipeline_call(ve, set_stream, 1); if (ret < 0) v4l2_err(&ve->vdev, "stream on failed: %d\n", ret); return; } spin_unlock_irqrestore(&fimc->slock, flags); } static struct vb2_ops fimc_capture_qops = { .queue_setup = queue_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = start_streaming, .stop_streaming = stop_streaming, }; static int fimc_capture_set_default_format(struct fimc_dev *fimc); static int fimc_capture_open(struct file *file) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_vid_cap *vc = &fimc->vid_cap; struct exynos_video_entity *ve = &vc->ve; int ret = -EBUSY; dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); mutex_lock(&fimc->lock); if (fimc_m2m_active(fimc)) goto unlock; set_bit(ST_CAPT_BUSY, &fimc->state); ret = pm_runtime_get_sync(&fimc->pdev->dev); if (ret < 0) goto unlock; ret = v4l2_fh_open(file); if (ret) { pm_runtime_put_sync(&fimc->pdev->dev); goto unlock; } if (v4l2_fh_is_singular_file(file)) { fimc_md_graph_lock(ve); ret = fimc_pipeline_call(ve, open, &ve->vdev.entity, true); if (ret == 0 && vc->user_subdev_api && vc->inh_sensor_ctrls) { /* * Recreate controls of the the video node to drop * any controls inherited from the sensor subdev. */ fimc_ctrls_delete(vc->ctx); ret = fimc_ctrls_create(vc->ctx); if (ret == 0) vc->inh_sensor_ctrls = false; } if (ret == 0) ve->vdev.entity.use_count++; fimc_md_graph_unlock(ve); if (ret == 0) ret = fimc_capture_set_default_format(fimc); if (ret < 0) { clear_bit(ST_CAPT_BUSY, &fimc->state); pm_runtime_put_sync(&fimc->pdev->dev); v4l2_fh_release(file); } } unlock: mutex_unlock(&fimc->lock); return ret; } static int fimc_capture_release(struct file *file) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_vid_cap *vc = &fimc->vid_cap; bool close = v4l2_fh_is_singular_file(file); int ret; dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); mutex_lock(&fimc->lock); if (close && vc->streaming) { media_entity_pipeline_stop(&vc->ve.vdev.entity); vc->streaming = false; } ret = _vb2_fop_release(file, NULL); if (close) { clear_bit(ST_CAPT_BUSY, &fimc->state); fimc_pipeline_call(&vc->ve, close); clear_bit(ST_CAPT_SUSPENDED, &fimc->state); fimc_md_graph_lock(&vc->ve); vc->ve.vdev.entity.use_count--; fimc_md_graph_unlock(&vc->ve); } pm_runtime_put_sync(&fimc->pdev->dev); mutex_unlock(&fimc->lock); return ret; } static const struct v4l2_file_operations fimc_capture_fops = { .owner = THIS_MODULE, .open = fimc_capture_open, .release = fimc_capture_release, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, }; /* * Format and crop negotiation helpers */ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx, u32 *width, u32 *height, u32 *code, u32 *fourcc, int pad) { bool rotation = ctx->rotation == 90 || ctx->rotation == 270; struct fimc_dev *fimc = ctx->fimc_dev; const struct fimc_variant *var = fimc->variant; const struct fimc_pix_limit *pl = var->pix_limit; struct fimc_frame *dst = &ctx->d_frame; u32 depth, min_w, max_w, min_h, align_h = 3; u32 mask = FMT_FLAGS_CAM; struct fimc_fmt *ffmt; /* Conversion from/to JPEG or User Defined format is not supported */ if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE && fimc_fmt_is_user_defined(ctx->s_frame.fmt->color)) *code = ctx->s_frame.fmt->mbus_code; if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad == FIMC_SD_PAD_SOURCE) mask |= FMT_FLAGS_M2M; if (pad == FIMC_SD_PAD_SINK_FIFO) mask = FMT_FLAGS_WRITEBACK; ffmt = fimc_find_format(fourcc, code, mask, 0); if (WARN_ON(!ffmt)) return NULL; if (code) *code = ffmt->mbus_code; if (fourcc) *fourcc = ffmt->fourcc; if (pad != FIMC_SD_PAD_SOURCE) { max_w = fimc_fmt_is_user_defined(ffmt->color) ? pl->scaler_dis_w : pl->scaler_en_w; /* Apply the camera input interface pixel constraints */ v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4, height, max_t(u32, *height, 32), FIMC_CAMIF_MAX_HEIGHT, fimc_fmt_is_user_defined(ffmt->color) ? 3 : 1, 0); return ffmt; } /* Can't scale or crop in transparent (JPEG) transfer mode */ if (fimc_fmt_is_user_defined(ffmt->color)) { *width = ctx->s_frame.f_width; *height = ctx->s_frame.f_height; return ffmt; } /* Apply the scaler and the output DMA constraints */ max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w; if (ctx->state & FIMC_COMPOSE) { min_w = dst->offs_h + dst->width; min_h = dst->offs_v + dst->height; } else { min_w = var->min_out_pixsize; min_h = var->min_out_pixsize; } if (var->min_vsize_align == 1 && !rotation) align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1; depth = fimc_get_format_depth(ffmt); v4l_bound_align_image(width, min_w, max_w, ffs(var->min_out_pixsize) - 1, height, min_h, FIMC_CAMIF_MAX_HEIGHT, align_h, 64/(ALIGN(depth, 8))); dbg("pad%d: code: 0x%x, %dx%d. dst fmt: %dx%d", pad, code ? *code : 0, *width, *height, dst->f_width, dst->f_height); return ffmt; } static void fimc_capture_try_selection(struct fimc_ctx *ctx, struct v4l2_rect *r, int target) { bool rotate = ctx->rotation == 90 || ctx->rotation == 270; struct fimc_dev *fimc = ctx->fimc_dev; const struct fimc_variant *var = fimc->variant; const struct fimc_pix_limit *pl = var->pix_limit; struct fimc_frame *sink = &ctx->s_frame; u32 max_w, max_h, min_w = 0, min_h = 0, min_sz; u32 align_sz = 0, align_h = 4; u32 max_sc_h, max_sc_v; /* In JPEG transparent transfer mode cropping is not supported */ if (fimc_fmt_is_user_defined(ctx->d_frame.fmt->color)) { r->width = sink->f_width; r->height = sink->f_height; r->left = r->top = 0; return; } if (target == V4L2_SEL_TGT_COMPOSE) { if (ctx->rotation != 90 && ctx->rotation != 270) align_h = 1; max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3)); max_sc_v = min(SCALER_MAX_VRATIO, 1 << (ffs(sink->height) - 1)); min_sz = var->min_out_pixsize; } else { u32 depth = fimc_get_format_depth(sink->fmt); align_sz = 64/ALIGN(depth, 8); min_sz = var->min_inp_pixsize; min_w = min_h = min_sz; max_sc_h = max_sc_v = 1; } /* * For the compose rectangle the following constraints must be met: * - it must fit in the sink pad format rectangle (f_width/f_height); * - maximum downscaling ratio is 64; * - maximum crop size depends if the rotator is used or not; * - the sink pad format width/height must be 4 multiple of the * prescaler ratios determined by sink pad size and source pad crop, * the prescaler ratio is returned by fimc_get_scaler_factor(). */ max_w = min_t(u32, rotate ? pl->out_rot_en_w : pl->out_rot_dis_w, rotate ? sink->f_height : sink->f_width); max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height); if (target == V4L2_SEL_TGT_COMPOSE) { min_w = min_t(u32, max_w, sink->f_width / max_sc_h); min_h = min_t(u32, max_h, sink->f_height / max_sc_v); if (rotate) { swap(max_sc_h, max_sc_v); swap(min_w, min_h); } } v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1, &r->height, min_h, max_h, align_h, align_sz); /* Adjust left/top if crop/compose rectangle is out of bounds */ r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width); r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height); r->left = round_down(r->left, var->hor_offs_align); dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d", target, r->left, r->top, r->width, r->height, sink->f_width, sink->f_height); } /* * The video node ioctl operations */ static int fimc_cap_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct fimc_dev *fimc = video_drvdata(file); __fimc_vidioc_querycap(&fimc->pdev->dev, cap, V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE); return 0; } static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct fimc_fmt *fmt; fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M, f->index); if (!fmt) return -EINVAL; strncpy(f->description, fmt->name, sizeof(f->description) - 1); f->pixelformat = fmt->fourcc; if (fmt->fourcc == MEDIA_BUS_FMT_JPEG_1X8) f->flags |= V4L2_FMT_FLAG_COMPRESSED; return 0; } static struct media_entity *fimc_pipeline_get_head(struct media_entity *me) { struct media_pad *pad = &me->pads[0]; while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) { pad = media_entity_remote_pad(pad); if (!pad) break; me = pad->entity; pad = &me->pads[0]; } return me; } /** * fimc_pipeline_try_format - negotiate and/or set formats at pipeline * elements * @ctx: FIMC capture context * @tfmt: media bus format to try/set on subdevs * @fmt_id: fimc pixel format id corresponding to returned @tfmt (output) * @set: true to set format on subdevs, false to try only */ static int fimc_pipeline_try_format(struct fimc_ctx *ctx, struct v4l2_mbus_framefmt *tfmt, struct fimc_fmt **fmt_id, bool set) { struct fimc_dev *fimc = ctx->fimc_dev; struct fimc_pipeline *p = to_fimc_pipeline(fimc->vid_cap.ve.pipe); struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR]; struct v4l2_subdev_format sfmt; struct v4l2_mbus_framefmt *mf = &sfmt.format; struct media_entity *me; struct fimc_fmt *ffmt; struct media_pad *pad; int ret, i = 1; u32 fcc; if (WARN_ON(!sd || !tfmt)) return -EINVAL; memset(&sfmt, 0, sizeof(sfmt)); sfmt.format = *tfmt; sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY; me = fimc_pipeline_get_head(&sd->entity); while (1) { ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL, FMT_FLAGS_CAM, i++); if (ffmt == NULL) { /* * Notify user-space if common pixel code for * host and sensor does not exist. */ return -EINVAL; } mf->code = tfmt->code = ffmt->mbus_code; /* set format on all pipeline subdevs */ while (me != &fimc->vid_cap.subdev.entity) { sd = media_entity_to_v4l2_subdev(me); sfmt.pad = 0; ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt); if (ret) return ret; if (me->pads[0].flags & MEDIA_PAD_FL_SINK) { sfmt.pad = me->num_pads - 1; mf->code = tfmt->code; ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt); if (ret) return ret; } pad = media_entity_remote_pad(&me->pads[sfmt.pad]); if (!pad) return -EINVAL; me = pad->entity; } if (mf->code != tfmt->code) continue; fcc = ffmt->fourcc; tfmt->width = mf->width; tfmt->height = mf->height; ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height, NULL, &fcc, FIMC_SD_PAD_SINK_CAM); ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height, NULL, &fcc, FIMC_SD_PAD_SOURCE); if (ffmt && ffmt->mbus_code) mf->code = ffmt->mbus_code; if (mf->width != tfmt->width || mf->height != tfmt->height) continue; tfmt->code = mf->code; break; } if (fmt_id && ffmt) *fmt_id = ffmt; *tfmt = *mf; return 0; } /** * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters * @sensor: pointer to the sensor subdev * @plane_fmt: provides plane sizes corresponding to the frame layout entries * @try: true to set the frame parameters, false to query only * * This function is used by this driver only for compressed/blob data formats. */ static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor, struct v4l2_plane_pix_format *plane_fmt, unsigned int num_planes, bool try) { struct v4l2_mbus_frame_desc fd; int i, ret; int pad; for (i = 0; i < num_planes; i++) fd.entry[i].length = plane_fmt[i].sizeimage; pad = sensor->entity.num_pads - 1; if (try) ret = v4l2_subdev_call(sensor, pad, set_frame_desc, pad, &fd); else ret = v4l2_subdev_call(sensor, pad, get_frame_desc, pad, &fd); if (ret < 0) return ret; if (num_planes != fd.num_entries) return -EINVAL; for (i = 0; i < num_planes; i++) plane_fmt[i].sizeimage = fd.entry[i].length; if (fd.entry[0].length > FIMC_MAX_JPEG_BUF_SIZE) { v4l2_err(sensor->v4l2_dev, "Unsupported buffer size: %u\n", fd.entry[0].length); return -EINVAL; } return 0; } static int fimc_cap_g_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_dev *fimc = video_drvdata(file); __fimc_get_format(&fimc->vid_cap.ctx->d_frame, f); return 0; } /* * Try or set format on the fimc.X.capture video node and additionally * on the whole pipeline if @try is false. * Locking: the caller must _not_ hold the graph mutex. */ static int __video_try_or_set_format(struct fimc_dev *fimc, struct v4l2_format *f, bool try, struct fimc_fmt **inp_fmt, struct fimc_fmt **out_fmt) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct fimc_vid_cap *vc = &fimc->vid_cap; struct exynos_video_entity *ve = &vc->ve; struct fimc_ctx *ctx = vc->ctx; unsigned int width = 0, height = 0; int ret = 0; /* Pre-configure format at the camera input interface, for JPEG only */ if (fimc_jpeg_fourcc(pix->pixelformat)) { fimc_capture_try_format(ctx, &pix->width, &pix->height, NULL, &pix->pixelformat, FIMC_SD_PAD_SINK_CAM); if (try) { width = pix->width; height = pix->height; } else { ctx->s_frame.f_width = pix->width; ctx->s_frame.f_height = pix->height; } } /* Try the format at the scaler and the DMA output */ *out_fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height, NULL, &pix->pixelformat, FIMC_SD_PAD_SOURCE); if (*out_fmt == NULL) return -EINVAL; /* Restore image width/height for JPEG (no resizing supported). */ if (try && fimc_jpeg_fourcc(pix->pixelformat)) { pix->width = width; pix->height = height; } /* Try to match format at the host and the sensor */ if (!vc->user_subdev_api) { struct v4l2_mbus_framefmt mbus_fmt; struct v4l2_mbus_framefmt *mf; mf = try ? &mbus_fmt : &fimc->vid_cap.ci_fmt; mf->code = (*out_fmt)->mbus_code; mf->width = pix->width; mf->height = pix->height; fimc_md_graph_lock(ve); ret = fimc_pipeline_try_format(ctx, mf, inp_fmt, try); fimc_md_graph_unlock(ve); if (ret < 0) return ret; pix->width = mf->width; pix->height = mf->height; } fimc_adjust_mplane_format(*out_fmt, pix->width, pix->height, pix); if ((*out_fmt)->flags & FMT_FLAGS_COMPRESSED) { struct v4l2_subdev *sensor; fimc_md_graph_lock(ve); sensor = __fimc_md_get_subdev(ve->pipe, IDX_SENSOR); if (sensor) fimc_get_sensor_frame_desc(sensor, pix->plane_fmt, (*out_fmt)->memplanes, try); else ret = -EPIPE; fimc_md_graph_unlock(ve); } return ret; } static int fimc_cap_try_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_fmt *out_fmt = NULL, *inp_fmt = NULL; return __video_try_or_set_format(fimc, f, true, &inp_fmt, &out_fmt); } static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx, enum fimc_color_fmt color) { bool jpeg = fimc_fmt_is_user_defined(color); ctx->scaler.enabled = !jpeg; fimc_ctrls_activate(ctx, !jpeg); if (jpeg) set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state); else clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state); } static int __fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f) { struct fimc_vid_cap *vc = &fimc->vid_cap; struct fimc_ctx *ctx = vc->ctx; struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct fimc_frame *ff = &ctx->d_frame; struct fimc_fmt *inp_fmt = NULL; int ret, i; if (vb2_is_busy(&fimc->vid_cap.vbq)) return -EBUSY; ret = __video_try_or_set_format(fimc, f, false, &inp_fmt, &ff->fmt); if (ret < 0) return ret; /* Update RGB Alpha control state and value range */ fimc_alpha_ctrl_update(ctx); for (i = 0; i < ff->fmt->memplanes; i++) { ff->bytesperline[i] = pix->plane_fmt[i].bytesperline; ff->payload[i] = pix->plane_fmt[i].sizeimage; } set_frame_bounds(ff, pix->width, pix->height); /* Reset the composition rectangle if not yet configured */ if (!(ctx->state & FIMC_COMPOSE)) set_frame_crop(ff, 0, 0, pix->width, pix->height); fimc_capture_mark_jpeg_xfer(ctx, ff->fmt->color); /* Reset cropping and set format at the camera interface input */ if (!vc->user_subdev_api) { ctx->s_frame.fmt = inp_fmt; set_frame_bounds(&ctx->s_frame, pix->width, pix->height); set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height); } return ret; } static int fimc_cap_s_fmt_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct fimc_dev *fimc = video_drvdata(file); return __fimc_capture_set_format(fimc, f); } static int fimc_cap_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct fimc_dev *fimc = video_drvdata(file); struct exynos_video_entity *ve = &fimc->vid_cap.ve; struct v4l2_subdev *sd; if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; fimc_md_graph_lock(ve); sd = __fimc_md_get_subdev(ve->pipe, IDX_SENSOR); fimc_md_graph_unlock(ve); if (sd) strlcpy(i->name, sd->name, sizeof(i->name)); return 0; } static int fimc_cap_s_input(struct file *file, void *priv, unsigned int i) { return i == 0 ? i : -EINVAL; } static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } /** * fimc_pipeline_validate - check for formats inconsistencies * between source and sink pad of each link * * Return 0 if all formats match or -EPIPE otherwise. */ static int fimc_pipeline_validate(struct fimc_dev *fimc) { struct v4l2_subdev_format sink_fmt, src_fmt; struct fimc_vid_cap *vc = &fimc->vid_cap; struct v4l2_subdev *sd = &vc->subdev; struct fimc_pipeline *p = to_fimc_pipeline(vc->ve.pipe); struct media_pad *sink_pad, *src_pad; int i, ret; while (1) { /* * Find current entity sink pad and any remote sink pad linked * to it. We stop if there is no sink pad in current entity or * it is not linked to any other remote entity. */ src_pad = NULL; for (i = 0; i < sd->entity.num_pads; i++) { struct media_pad *p = &sd->entity.pads[i]; if (p->flags & MEDIA_PAD_FL_SINK) { sink_pad = p; src_pad = media_entity_remote_pad(sink_pad); if (src_pad) break; } } if (src_pad == NULL || media_entity_type(src_pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; /* Don't call FIMC subdev operation to avoid nested locking */ if (sd == &vc->subdev) { struct fimc_frame *ff = &vc->ctx->s_frame; sink_fmt.format.width = ff->f_width; sink_fmt.format.height = ff->f_height; sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0; } else { sink_fmt.pad = sink_pad->index; sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; } /* Retrieve format at the source pad */ sd = media_entity_to_v4l2_subdev(src_pad->entity); src_fmt.pad = src_pad->index; src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; if (src_fmt.format.width != sink_fmt.format.width || src_fmt.format.height != sink_fmt.format.height || src_fmt.format.code != sink_fmt.format.code) return -EPIPE; if (sd == p->subdevs[IDX_SENSOR] && fimc_user_defined_mbus_fmt(src_fmt.format.code)) { struct v4l2_plane_pix_format plane_fmt[FIMC_MAX_PLANES]; struct fimc_frame *frame = &vc->ctx->d_frame; unsigned int i; ret = fimc_get_sensor_frame_desc(sd, plane_fmt, frame->fmt->memplanes, false); if (ret < 0) return -EPIPE; for (i = 0; i < frame->fmt->memplanes; i++) if (frame->payload[i] < plane_fmt[i].sizeimage) return -EPIPE; } } return 0; } static int fimc_cap_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_vid_cap *vc = &fimc->vid_cap; struct media_entity *entity = &vc->ve.vdev.entity; struct fimc_source_info *si = NULL; struct v4l2_subdev *sd; int ret; if (fimc_capture_active(fimc)) return -EBUSY; ret = media_entity_pipeline_start(entity, &vc->ve.pipe->mp); if (ret < 0) return ret; sd = __fimc_md_get_subdev(vc->ve.pipe, IDX_SENSOR); if (sd) si = v4l2_get_subdev_hostdata(sd); if (si == NULL) { ret = -EPIPE; goto err_p_stop; } /* * Save configuration data related to currently attached image * sensor or other data source, e.g. FIMC-IS. */ vc->source_config = *si; if (vc->input == GRP_ID_FIMC_IS) vc->source_config.fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK; if (vc->user_subdev_api) { ret = fimc_pipeline_validate(fimc); if (ret < 0) goto err_p_stop; } ret = vb2_ioctl_streamon(file, priv, type); if (!ret) { vc->streaming = true; return ret; } err_p_stop: media_entity_pipeline_stop(entity); return ret; } static int fimc_cap_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_vid_cap *vc = &fimc->vid_cap; int ret; ret = vb2_ioctl_streamoff(file, priv, type); if (ret < 0) return ret; media_entity_pipeline_stop(&vc->ve.vdev.entity); vc->streaming = false; return 0; } static int fimc_cap_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct fimc_dev *fimc = video_drvdata(file); int ret; ret = vb2_ioctl_reqbufs(file, priv, reqbufs); if (!ret) fimc->vid_cap.reqbufs_count = reqbufs->count; return ret; } static int fimc_cap_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *f = &ctx->s_frame; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: f = &ctx->d_frame; case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: s->r.left = 0; s->r.top = 0; s->r.width = f->o_width; s->r.height = f->o_height; return 0; case V4L2_SEL_TGT_COMPOSE: f = &ctx->d_frame; case V4L2_SEL_TGT_CROP: s->r.left = f->offs_h; s->r.top = f->offs_v; s->r.width = f->width; s->r.height = f->height; return 0; } return -EINVAL; } /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */ static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b) { if (a->left < b->left || a->top < b->top) return 0; if (a->left + a->width > b->left + b->width) return 0; if (a->top + a->height > b->top + b->height) return 0; return 1; } static int fimc_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_rect rect = s->r; struct fimc_frame *f; unsigned long flags; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; if (s->target == V4L2_SEL_TGT_COMPOSE) f = &ctx->d_frame; else if (s->target == V4L2_SEL_TGT_CROP) f = &ctx->s_frame; else return -EINVAL; fimc_capture_try_selection(ctx, &rect, s->target); if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&rect, &s->r)) return -ERANGE; if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &rect)) return -ERANGE; s->r = rect; spin_lock_irqsave(&fimc->slock, flags); set_frame_crop(f, s->r.left, s->r.top, s->r.width, s->r.height); spin_unlock_irqrestore(&fimc->slock, flags); set_bit(ST_CAPT_APPLY_CFG, &fimc->state); return 0; } static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = { .vidioc_querycap = fimc_cap_querycap, .vidioc_enum_fmt_vid_cap_mplane = fimc_cap_enum_fmt_mplane, .vidioc_try_fmt_vid_cap_mplane = fimc_cap_try_fmt_mplane, .vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane, .vidioc_g_fmt_vid_cap_mplane = fimc_cap_g_fmt_mplane, .vidioc_reqbufs = fimc_cap_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_streamon = fimc_cap_streamon, .vidioc_streamoff = fimc_cap_streamoff, .vidioc_g_selection = fimc_cap_g_selection, .vidioc_s_selection = fimc_cap_s_selection, .vidioc_enum_input = fimc_cap_enum_input, .vidioc_s_input = fimc_cap_s_input, .vidioc_g_input = fimc_cap_g_input, }; /* Capture subdev media entity operations */ static int fimc_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_vid_cap *vc = &fimc->vid_cap; struct v4l2_subdev *sensor; if (media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) return -EINVAL; if (WARN_ON(fimc == NULL)) return 0; dbg("%s --> %s, flags: 0x%x. input: 0x%x", local->entity->name, remote->entity->name, flags, fimc->vid_cap.input); if (!(flags & MEDIA_LNK_FL_ENABLED)) { fimc->vid_cap.input = 0; return 0; } if (vc->input != 0) return -EBUSY; vc->input = sd->grp_id; if (vc->user_subdev_api || vc->inh_sensor_ctrls) return 0; /* Inherit V4L2 controls from the image sensor subdev. */ sensor = fimc_find_remote_sensor(&vc->subdev.entity); if (sensor == NULL) return 0; return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler, sensor->ctrl_handler, NULL); } static const struct media_entity_operations fimc_sd_media_ops = { .link_setup = fimc_link_setup, }; /** * fimc_sensor_notify - v4l2_device notification from a sensor subdev * @sd: pointer to a subdev generating the notification * @notification: the notification type, must be S5P_FIMC_TX_END_NOTIFY * @arg: pointer to an u32 type integer that stores the frame payload value * * The End Of Frame notification sent by sensor subdev in its still capture * mode. If there is only a single VSYNC generated by the sensor at the * beginning of a frame transmission, FIMC does not issue the LastIrq * (end of frame) interrupt. And this notification is used to complete the * frame capture and returning a buffer to user-space. Subdev drivers should * call this notification from their last 'End of frame capture' interrupt. */ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg) { struct fimc_source_info *si; struct fimc_vid_buffer *buf; struct fimc_md *fmd; struct fimc_dev *fimc; unsigned long flags; if (sd == NULL) return; si = v4l2_get_subdev_hostdata(sd); fmd = entity_to_fimc_mdev(&sd->entity); spin_lock_irqsave(&fmd->slock, flags); fimc = si ? source_to_sensor_info(si)->host : NULL; if (fimc && arg && notification == S5P_FIMC_TX_END_NOTIFY && test_bit(ST_CAPT_PEND, &fimc->state)) { unsigned long irq_flags; spin_lock_irqsave(&fimc->slock, irq_flags); if (!list_empty(&fimc->vid_cap.active_buf_q)) { buf = list_entry(fimc->vid_cap.active_buf_q.next, struct fimc_vid_buffer, list); vb2_set_plane_payload(&buf->vb.vb2_buf, 0, *((u32 *)arg)); } fimc_capture_irq_handler(fimc, 1); fimc_deactivate_capture(fimc); spin_unlock_irqrestore(&fimc->slock, irq_flags); } spin_unlock_irqrestore(&fmd->slock, flags); } static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) { struct fimc_fmt *fmt; fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index); if (!fmt) return -EINVAL; code->code = fmt->mbus_code; return 0; } static int fimc_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *ff = &ctx->s_frame; struct v4l2_mbus_framefmt *mf; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); fmt->format = *mf; return 0; } mf = &fmt->format; mutex_lock(&fimc->lock); switch (fmt->pad) { case FIMC_SD_PAD_SOURCE: if (!WARN_ON(ff->fmt == NULL)) mf->code = ff->fmt->mbus_code; /* Sink pads crop rectangle size */ mf->width = ff->width; mf->height = ff->height; break; case FIMC_SD_PAD_SINK_FIFO: *mf = fimc->vid_cap.wb_fmt; break; case FIMC_SD_PAD_SINK_CAM: default: *mf = fimc->vid_cap.ci_fmt; break; } mutex_unlock(&fimc->lock); mf->colorspace = V4L2_COLORSPACE_JPEG; return 0; } static int fimc_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *mf = &fmt->format; struct fimc_vid_cap *vc = &fimc->vid_cap; struct fimc_ctx *ctx = vc->ctx; struct fimc_frame *ff; struct fimc_fmt *ffmt; dbg("pad%d: code: 0x%x, %dx%d", fmt->pad, mf->code, mf->width, mf->height); if (fmt->pad == FIMC_SD_PAD_SOURCE && vb2_is_busy(&vc->vbq)) return -EBUSY; mutex_lock(&fimc->lock); ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height, &mf->code, NULL, fmt->pad); mutex_unlock(&fimc->lock); mf->colorspace = V4L2_COLORSPACE_JPEG; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); *mf = fmt->format; return 0; } /* There must be a bug in the driver if this happens */ if (WARN_ON(ffmt == NULL)) return -EINVAL; /* Update RGB Alpha control state and value range */ fimc_alpha_ctrl_update(ctx); fimc_capture_mark_jpeg_xfer(ctx, ffmt->color); if (fmt->pad == FIMC_SD_PAD_SOURCE) { ff = &ctx->d_frame; /* Sink pads crop rectangle size */ mf->width = ctx->s_frame.width; mf->height = ctx->s_frame.height; } else { ff = &ctx->s_frame; } mutex_lock(&fimc->lock); set_frame_bounds(ff, mf->width, mf->height); if (fmt->pad == FIMC_SD_PAD_SINK_FIFO) vc->wb_fmt = *mf; else if (fmt->pad == FIMC_SD_PAD_SINK_CAM) vc->ci_fmt = *mf; ff->fmt = ffmt; /* Reset the crop rectangle if required. */ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE))) set_frame_crop(ff, 0, 0, mf->width, mf->height); if (fmt->pad != FIMC_SD_PAD_SOURCE) ctx->state &= ~FIMC_COMPOSE; mutex_unlock(&fimc->lock); return 0; } static int fimc_subdev_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_selection *sel) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *f = &ctx->s_frame; struct v4l2_rect *r = &sel->r; struct v4l2_rect *try_sel; if (sel->pad == FIMC_SD_PAD_SOURCE) return -EINVAL; mutex_lock(&fimc->lock); switch (sel->target) { case V4L2_SEL_TGT_COMPOSE_BOUNDS: f = &ctx->d_frame; case V4L2_SEL_TGT_CROP_BOUNDS: r->width = f->o_width; r->height = f->o_height; r->left = 0; r->top = 0; mutex_unlock(&fimc->lock); return 0; case V4L2_SEL_TGT_CROP: try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad); break; case V4L2_SEL_TGT_COMPOSE: try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad); f = &ctx->d_frame; break; default: mutex_unlock(&fimc->lock); return -EINVAL; } if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { sel->r = *try_sel; } else { r->left = f->offs_h; r->top = f->offs_v; r->width = f->width; r->height = f->height; } dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d", sel->pad, r->left, r->top, r->width, r->height, f->f_width, f->f_height); mutex_unlock(&fimc->lock); return 0; } static int fimc_subdev_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_selection *sel) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *f = &ctx->s_frame; struct v4l2_rect *r = &sel->r; struct v4l2_rect *try_sel; unsigned long flags; if (sel->pad == FIMC_SD_PAD_SOURCE) return -EINVAL; mutex_lock(&fimc->lock); fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP); switch (sel->target) { case V4L2_SEL_TGT_CROP: try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad); break; case V4L2_SEL_TGT_COMPOSE: try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad); f = &ctx->d_frame; break; default: mutex_unlock(&fimc->lock); return -EINVAL; } if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { *try_sel = sel->r; } else { spin_lock_irqsave(&fimc->slock, flags); set_frame_crop(f, r->left, r->top, r->width, r->height); set_bit(ST_CAPT_APPLY_CFG, &fimc->state); if (sel->target == V4L2_SEL_TGT_COMPOSE) ctx->state |= FIMC_COMPOSE; spin_unlock_irqrestore(&fimc->slock, flags); } dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top, r->width, r->height); mutex_unlock(&fimc->lock); return 0; } static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = { .enum_mbus_code = fimc_subdev_enum_mbus_code, .get_selection = fimc_subdev_get_selection, .set_selection = fimc_subdev_set_selection, .get_fmt = fimc_subdev_get_fmt, .set_fmt = fimc_subdev_set_fmt, }; static struct v4l2_subdev_ops fimc_subdev_ops = { .pad = &fimc_subdev_pad_ops, }; /* Set default format at the sensor and host interface */ static int fimc_capture_set_default_format(struct fimc_dev *fimc) { struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, .fmt.pix_mp = { .width = FIMC_DEFAULT_WIDTH, .height = FIMC_DEFAULT_HEIGHT, .pixelformat = V4L2_PIX_FMT_YUYV, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_JPEG, }, }; return __fimc_capture_set_format(fimc, &fmt); } /* fimc->lock must be already initialized */ static int fimc_register_capture_device(struct fimc_dev *fimc, struct v4l2_device *v4l2_dev) { struct video_device *vfd = &fimc->vid_cap.ve.vdev; struct vb2_queue *q = &fimc->vid_cap.vbq; struct fimc_ctx *ctx; struct fimc_vid_cap *vid_cap; struct fimc_fmt *fmt; int ret = -ENOMEM; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->fimc_dev = fimc; ctx->in_path = FIMC_IO_CAMERA; ctx->out_path = FIMC_IO_DMA; ctx->state = FIMC_CTX_CAP; ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0); ctx->d_frame.fmt = ctx->s_frame.fmt; memset(vfd, 0, sizeof(*vfd)); snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.capture", fimc->id); vfd->fops = &fimc_capture_fops; vfd->ioctl_ops = &fimc_capture_ioctl_ops; vfd->v4l2_dev = v4l2_dev; vfd->minor = -1; vfd->release = video_device_release_empty; vfd->queue = q; vfd->lock = &fimc->lock; video_set_drvdata(vfd, fimc); vid_cap = &fimc->vid_cap; vid_cap->active_buf_cnt = 0; vid_cap->reqbufs_count = 0; vid_cap->ctx = ctx; INIT_LIST_HEAD(&vid_cap->pending_buf_q); INIT_LIST_HEAD(&vid_cap->active_buf_q); memset(q, 0, sizeof(*q)); q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = ctx; q->ops = &fimc_capture_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct fimc_vid_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &fimc->lock; ret = vb2_queue_init(q); if (ret) goto err_free_ctx; /* Default format configuration */ fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0); vid_cap->ci_fmt.width = FIMC_DEFAULT_WIDTH; vid_cap->ci_fmt.height = FIMC_DEFAULT_HEIGHT; vid_cap->ci_fmt.code = fmt->mbus_code; ctx->s_frame.width = FIMC_DEFAULT_WIDTH; ctx->s_frame.height = FIMC_DEFAULT_HEIGHT; ctx->s_frame.fmt = fmt; fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_WRITEBACK, 0); vid_cap->wb_fmt = vid_cap->ci_fmt; vid_cap->wb_fmt.code = fmt->mbus_code; vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0); if (ret) goto err_free_ctx; ret = fimc_ctrls_create(ctx); if (ret) goto err_me_cleanup; ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1); if (ret) goto err_ctrl_free; v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n", vfd->name, video_device_node_name(vfd)); vfd->ctrl_handler = &ctx->ctrls.handler; return 0; err_ctrl_free: fimc_ctrls_delete(ctx); err_me_cleanup: media_entity_cleanup(&vfd->entity); err_free_ctx: kfree(ctx); return ret; } static int fimc_capture_subdev_registered(struct v4l2_subdev *sd) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); int ret; if (fimc == NULL) return -ENXIO; ret = fimc_register_m2m_device(fimc, sd->v4l2_dev); if (ret) return ret; fimc->vid_cap.ve.pipe = v4l2_get_subdev_hostdata(sd); ret = fimc_register_capture_device(fimc, sd->v4l2_dev); if (ret) { fimc_unregister_m2m_device(fimc); fimc->vid_cap.ve.pipe = NULL; } return ret; } static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct video_device *vdev; if (fimc == NULL) return; mutex_lock(&fimc->lock); fimc_unregister_m2m_device(fimc); vdev = &fimc->vid_cap.ve.vdev; if (video_is_registered(vdev)) { video_unregister_device(vdev); media_entity_cleanup(&vdev->entity); fimc_ctrls_delete(fimc->vid_cap.ctx); fimc->vid_cap.ve.pipe = NULL; } kfree(fimc->vid_cap.ctx); fimc->vid_cap.ctx = NULL; mutex_unlock(&fimc->lock); } static const struct v4l2_subdev_internal_ops fimc_capture_sd_internal_ops = { .registered = fimc_capture_subdev_registered, .unregistered = fimc_capture_subdev_unregistered, }; int fimc_initialize_capture_subdev(struct fimc_dev *fimc) { struct v4l2_subdev *sd = &fimc->vid_cap.subdev; int ret; v4l2_subdev_init(sd, &fimc_subdev_ops); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->id); fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK_CAM].flags = MEDIA_PAD_FL_SINK; fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK_FIFO].flags = MEDIA_PAD_FL_SINK; fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM, fimc->vid_cap.sd_pads, 0); if (ret) return ret; sd->entity.ops = &fimc_sd_media_ops; sd->internal_ops = &fimc_capture_sd_internal_ops; v4l2_set_subdevdata(sd, fimc); return 0; } void fimc_unregister_capture_subdev(struct fimc_dev *fimc) { struct v4l2_subdev *sd = &fimc->vid_cap.subdev; v4l2_device_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_set_subdevdata(sd, NULL); }
gpl-2.0
tjwei/acer-stream-minimal-kernel
sound/soc/s3c24xx/neo1973_wm8753.c
146
18745
/* * neo1973_wm8753.c -- SoC audio for Neo1973 * * Copyright 2007 Wolfson Microelectronics PLC. * Author: Graeme Gregory * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/tlv.h> #include <asm/mach-types.h> #include <asm/hardware/scoop.h> #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <mach/hardware.h> #include <mach/audio.h> #include <linux/io.h> #include <mach/spi-gpio.h> #include <asm/plat-s3c24xx/regs-iis.h> #include "../codecs/wm8753.h" #include "lm4857.h" #include "s3c24xx-pcm.h" #include "s3c24xx-i2s.h" /* Debugging stuff */ #define S3C24XX_SOC_NEO1973_WM8753_DEBUG 0 #if S3C24XX_SOC_NEO1973_WM8753_DEBUG #define DBG(x...) printk(KERN_DEBUG "s3c24xx-soc-neo1973-wm8753: " x) #else #define DBG(x...) #endif /* define the scenarios */ #define NEO_AUDIO_OFF 0 #define NEO_GSM_CALL_AUDIO_HANDSET 1 #define NEO_GSM_CALL_AUDIO_HEADSET 2 #define NEO_GSM_CALL_AUDIO_BLUETOOTH 3 #define NEO_STEREO_TO_SPEAKERS 4 #define NEO_STEREO_TO_HEADPHONES 5 #define NEO_CAPTURE_HANDSET 6 #define NEO_CAPTURE_HEADSET 7 #define NEO_CAPTURE_BLUETOOTH 8 static struct snd_soc_card neo1973; static struct i2c_client *i2c; static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; unsigned int pll_out = 0, bclk = 0; int ret = 0; unsigned long iis_clkrate; DBG("Entered %s\n", __func__); iis_clkrate = s3c24xx_i2s_get_clockrate(); switch (params_rate(params)) { case 8000: case 16000: pll_out = 12288000; break; case 48000: bclk = WM8753_BCLK_DIV_4; pll_out = 12288000; break; case 96000: bclk = WM8753_BCLK_DIV_2; pll_out = 12288000; break; case 11025: bclk = WM8753_BCLK_DIV_16; pll_out = 11289600; break; case 22050: bclk = WM8753_BCLK_DIV_8; pll_out = 11289600; break; case 44100: bclk = WM8753_BCLK_DIV_4; pll_out = 11289600; break; case 88200: bclk = WM8753_BCLK_DIV_2; pll_out = 11289600; break; } /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set MCLK division for sample rate */ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, S3C2410_IISMOD_32FS); if (ret < 0) return ret; /* set codec BCLK division for sample rate */ ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_BCLKDIV, bclk); if (ret < 0) return ret; /* set prescaler division for sample rate */ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER, S3C24XX_PRESCALE(4, 4)); if (ret < 0) return ret; /* codec PLL input is PCLK/4 */ ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, iis_clkrate / 4, pll_out); if (ret < 0) return ret; return 0; } static int neo1973_hifi_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; DBG("Entered %s\n", __func__); /* disable the PLL */ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0); } /* * Neo1973 WM8753 HiFi DAI opserations. */ static struct snd_soc_ops neo1973_hifi_ops = { .hw_params = neo1973_hifi_hw_params, .hw_free = neo1973_hifi_hw_free, }; static int neo1973_voice_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; unsigned int pcmdiv = 0; int ret = 0; unsigned long iis_clkrate; DBG("Entered %s\n", __func__); iis_clkrate = s3c24xx_i2s_get_clockrate(); if (params_rate(params) != 8000) return -EINVAL; if (params_channels(params) != 1) return -EINVAL; pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */ /* todo: gg check mode (DSP_B) against CSR datasheet */ /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK, 12288000, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set codec PCM division for sample rate */ ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV, pcmdiv); if (ret < 0) return ret; /* configue and enable PLL for 12.288MHz output */ ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, iis_clkrate / 4, 12288000); if (ret < 0) return ret; return 0; } static int neo1973_voice_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; DBG("Entered %s\n", __func__); /* disable the PLL */ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0); } static struct snd_soc_ops neo1973_voice_ops = { .hw_params = neo1973_voice_hw_params, .hw_free = neo1973_voice_hw_free, }; static int neo1973_scenario; static int neo1973_get_scenario(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = neo1973_scenario; return 0; } static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario) { DBG("Entered %s\n", __func__); switch (neo1973_scenario) { case NEO_AUDIO_OFF: snd_soc_dapm_disable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; case NEO_GSM_CALL_AUDIO_HANDSET: snd_soc_dapm_enable_pin(codec, "Audio Out"); snd_soc_dapm_enable_pin(codec, "GSM Line Out"); snd_soc_dapm_enable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_enable_pin(codec, "Call Mic"); break; case NEO_GSM_CALL_AUDIO_HEADSET: snd_soc_dapm_enable_pin(codec, "Audio Out"); snd_soc_dapm_enable_pin(codec, "GSM Line Out"); snd_soc_dapm_enable_pin(codec, "GSM Line In"); snd_soc_dapm_enable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; case NEO_GSM_CALL_AUDIO_BLUETOOTH: snd_soc_dapm_disable_pin(codec, "Audio Out"); snd_soc_dapm_enable_pin(codec, "GSM Line Out"); snd_soc_dapm_enable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; case NEO_STEREO_TO_SPEAKERS: snd_soc_dapm_enable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; case NEO_STEREO_TO_HEADPHONES: snd_soc_dapm_enable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; case NEO_CAPTURE_HANDSET: snd_soc_dapm_disable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_enable_pin(codec, "Call Mic"); break; case NEO_CAPTURE_HEADSET: snd_soc_dapm_disable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_enable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; case NEO_CAPTURE_BLUETOOTH: snd_soc_dapm_disable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); break; default: snd_soc_dapm_disable_pin(codec, "Audio Out"); snd_soc_dapm_disable_pin(codec, "GSM Line Out"); snd_soc_dapm_disable_pin(codec, "GSM Line In"); snd_soc_dapm_disable_pin(codec, "Headset Mic"); snd_soc_dapm_disable_pin(codec, "Call Mic"); } snd_soc_dapm_sync(codec); return 0; } static int neo1973_set_scenario(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); DBG("Entered %s\n", __func__); if (neo1973_scenario == ucontrol->value.integer.value[0]) return 0; neo1973_scenario = ucontrol->value.integer.value[0]; set_scenario_endpoints(codec, neo1973_scenario); return 1; } static u8 lm4857_regs[4] = {0x00, 0x40, 0x80, 0xC0}; static void lm4857_write_regs(void) { DBG("Entered %s\n", __func__); if (i2c_master_send(i2c, lm4857_regs, 4) != 4) printk(KERN_ERR "lm4857: i2c write failed\n"); } static int lm4857_get_reg(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int reg = kcontrol->private_value & 0xFF; int shift = (kcontrol->private_value >> 8) & 0x0F; int mask = (kcontrol->private_value >> 16) & 0xFF; DBG("Entered %s\n", __func__); ucontrol->value.integer.value[0] = (lm4857_regs[reg] >> shift) & mask; return 0; } static int lm4857_set_reg(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int reg = kcontrol->private_value & 0xFF; int shift = (kcontrol->private_value >> 8) & 0x0F; int mask = (kcontrol->private_value >> 16) & 0xFF; if (((lm4857_regs[reg] >> shift) & mask) == ucontrol->value.integer.value[0]) return 0; lm4857_regs[reg] &= ~(mask << shift); lm4857_regs[reg] |= ucontrol->value.integer.value[0] << shift; lm4857_write_regs(); return 1; } static int lm4857_get_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 value = lm4857_regs[LM4857_CTRL] & 0x0F; DBG("Entered %s\n", __func__); if (value) value -= 5; ucontrol->value.integer.value[0] = value; return 0; } static int lm4857_set_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 value = ucontrol->value.integer.value[0]; DBG("Entered %s\n", __func__); if (value) value += 5; if ((lm4857_regs[LM4857_CTRL] & 0x0F) == value) return 0; lm4857_regs[LM4857_CTRL] &= 0xF0; lm4857_regs[LM4857_CTRL] |= value; lm4857_write_regs(); return 1; } static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = { SND_SOC_DAPM_LINE("Audio Out", NULL), SND_SOC_DAPM_LINE("GSM Line Out", NULL), SND_SOC_DAPM_LINE("GSM Line In", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Call Mic", NULL), }; static const struct snd_soc_dapm_route dapm_routes[] = { /* Connections to the lm4857 amp */ {"Audio Out", NULL, "LOUT1"}, {"Audio Out", NULL, "ROUT1"}, /* Connections to the GSM Module */ {"GSM Line Out", NULL, "MONO1"}, {"GSM Line Out", NULL, "MONO2"}, {"RXP", NULL, "GSM Line In"}, {"RXN", NULL, "GSM Line In"}, /* Connections to Headset */ {"MIC1", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Headset Mic"}, /* Call Mic */ {"MIC2", NULL, "Mic Bias"}, {"MIC2N", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Call Mic"}, /* Connect the ALC pins */ {"ACIN", NULL, "ACOP"}, }; static const char *lm4857_mode[] = { "Off", "Call Speaker", "Stereo Speakers", "Stereo Speakers + Headphones", "Headphones" }; static const struct soc_enum lm4857_mode_enum[] = { SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lm4857_mode), lm4857_mode), }; static const char *neo_scenarios[] = { "Off", "GSM Handset", "GSM Headset", "GSM Bluetooth", "Speakers", "Headphones", "Capture Handset", "Capture Headset", "Capture Bluetooth" }; static const struct soc_enum neo_scenario_enum[] = { SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(neo_scenarios), neo_scenarios), }; static const DECLARE_TLV_DB_SCALE(stereo_tlv, -4050, 150, 0); static const DECLARE_TLV_DB_SCALE(mono_tlv, -3450, 150, 0); static const struct snd_kcontrol_new wm8753_neo1973_controls[] = { SOC_SINGLE_EXT_TLV("Amp Left Playback Volume", LM4857_LVOL, 0, 31, 0, lm4857_get_reg, lm4857_set_reg, stereo_tlv), SOC_SINGLE_EXT_TLV("Amp Right Playback Volume", LM4857_RVOL, 0, 31, 0, lm4857_get_reg, lm4857_set_reg, stereo_tlv), SOC_SINGLE_EXT_TLV("Amp Mono Playback Volume", LM4857_MVOL, 0, 31, 0, lm4857_get_reg, lm4857_set_reg, mono_tlv), SOC_ENUM_EXT("Amp Mode", lm4857_mode_enum[0], lm4857_get_mode, lm4857_set_mode), SOC_ENUM_EXT("Neo Mode", neo_scenario_enum[0], neo1973_get_scenario, neo1973_set_scenario), SOC_SINGLE_EXT("Amp Spk 3D Playback Switch", LM4857_LVOL, 5, 1, 0, lm4857_get_reg, lm4857_set_reg), SOC_SINGLE_EXT("Amp HP 3d Playback Switch", LM4857_RVOL, 5, 1, 0, lm4857_get_reg, lm4857_set_reg), SOC_SINGLE_EXT("Amp Fast Wakeup Playback Switch", LM4857_CTRL, 5, 1, 0, lm4857_get_reg, lm4857_set_reg), SOC_SINGLE_EXT("Amp Earpiece 6dB Playback Switch", LM4857_CTRL, 4, 1, 0, lm4857_get_reg, lm4857_set_reg), }; /* * This is an example machine initialisation for a wm8753 connected to a * neo1973 II. It is missing logic to detect hp/mic insertions and logic * to re-route the audio in such an event. */ static int neo1973_wm8753_init(struct snd_soc_codec *codec) { int i, err; DBG("Entered %s\n", __func__); /* set up NC codec pins */ snd_soc_dapm_nc_pin(codec, "LOUT2"); snd_soc_dapm_nc_pin(codec, "ROUT2"); snd_soc_dapm_nc_pin(codec, "OUT3"); snd_soc_dapm_nc_pin(codec, "OUT4"); snd_soc_dapm_nc_pin(codec, "LINE1"); snd_soc_dapm_nc_pin(codec, "LINE2"); /* Add neo1973 specific widgets */ snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets, ARRAY_SIZE(wm8753_dapm_widgets)); /* set endpoints to default mode */ set_scenario_endpoints(codec, NEO_AUDIO_OFF); /* add neo1973 specific controls */ for (i = 0; i < ARRAY_SIZE(wm8753_neo1973_controls); i++) { err = snd_ctl_add(codec->card, snd_soc_cnew(&wm8753_neo1973_controls[i], codec, NULL)); if (err < 0) return err; } /* set up neo1973 specific audio routes */ err = snd_soc_dapm_add_routes(codec, dapm_routes, ARRAY_SIZE(dapm_routes)); snd_soc_dapm_sync(codec); return 0; } /* * BT Codec DAI */ static struct snd_soc_dai bt_dai = { .name = "Bluetooth", .id = 0, .playback = { .channels_min = 1, .channels_max = 1, .rates = SNDRV_PCM_RATE_8000, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .capture = { .channels_min = 1, .channels_max = 1, .rates = SNDRV_PCM_RATE_8000, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, }; static struct snd_soc_dai_link neo1973_dai[] = { { /* Hifi Playback - for similatious use with voice below */ .name = "WM8753", .stream_name = "WM8753 HiFi", .cpu_dai = &s3c24xx_i2s_dai, .codec_dai = &wm8753_dai[WM8753_DAI_HIFI], .init = neo1973_wm8753_init, .ops = &neo1973_hifi_ops, }, { /* Voice via BT */ .name = "Bluetooth", .stream_name = "Voice", .cpu_dai = &bt_dai, .codec_dai = &wm8753_dai[WM8753_DAI_VOICE], .ops = &neo1973_voice_ops, }, }; static struct snd_soc_card neo1973 = { .name = "neo1973", .platform = &s3c24xx_soc_platform, .dai_link = neo1973_dai, .num_links = ARRAY_SIZE(neo1973_dai), }; static struct wm8753_setup_data neo1973_wm8753_setup = { .i2c_bus = 0, .i2c_address = 0x1a, }; static struct snd_soc_device neo1973_snd_devdata = { .card = &neo1973, .codec_dev = &soc_codec_dev_wm8753, .codec_data = &neo1973_wm8753_setup, }; static int lm4857_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { DBG("Entered %s\n", __func__); i2c = client; lm4857_write_regs(); return 0; } static int lm4857_i2c_remove(struct i2c_client *client) { DBG("Entered %s\n", __func__); i2c = NULL; return 0; } static u8 lm4857_state; static int lm4857_suspend(struct i2c_client *dev, pm_message_t state) { DBG("Entered %s\n", __func__); dev_dbg(&dev->dev, "lm4857_suspend\n"); lm4857_state = lm4857_regs[LM4857_CTRL] & 0xf; if (lm4857_state) { lm4857_regs[LM4857_CTRL] &= 0xf0; lm4857_write_regs(); } return 0; } static int lm4857_resume(struct i2c_client *dev) { DBG("Entered %s\n", __func__); if (lm4857_state) { lm4857_regs[LM4857_CTRL] |= (lm4857_state & 0x0f); lm4857_write_regs(); } return 0; } static void lm4857_shutdown(struct i2c_client *dev) { DBG("Entered %s\n", __func__); dev_dbg(&dev->dev, "lm4857_shutdown\n"); lm4857_regs[LM4857_CTRL] &= 0xf0; lm4857_write_regs(); } static const struct i2c_device_id lm4857_i2c_id[] = { { "neo1973_lm4857", 0 }, { } }; static struct i2c_driver lm4857_i2c_driver = { .driver = { .name = "LM4857 I2C Amp", .owner = THIS_MODULE, }, .suspend = lm4857_suspend, .resume = lm4857_resume, .shutdown = lm4857_shutdown, .probe = lm4857_i2c_probe, .remove = lm4857_i2c_remove, .id_table = lm4857_i2c_id, }; static struct platform_device *neo1973_snd_device; static int __init neo1973_init(void) { int ret; DBG("Entered %s\n", __func__); if (!machine_is_neo1973_gta01()) { printk(KERN_INFO "Only GTA01 hardware supported by ASoC driver\n"); return -ENODEV; } neo1973_snd_device = platform_device_alloc("soc-audio", -1); if (!neo1973_snd_device) return -ENOMEM; platform_set_drvdata(neo1973_snd_device, &neo1973_snd_devdata); neo1973_snd_devdata.dev = &neo1973_snd_device->dev; ret = platform_device_add(neo1973_snd_device); if (ret) { platform_device_put(neo1973_snd_device); return ret; } ret = i2c_add_driver(&lm4857_i2c_driver); if (ret != 0) platform_device_unregister(neo1973_snd_device); return ret; } static void __exit neo1973_exit(void) { DBG("Entered %s\n", __func__); i2c_del_driver(&lm4857_i2c_driver); platform_device_unregister(neo1973_snd_device); } module_init(neo1973_init); module_exit(neo1973_exit); /* Module information */ MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org, www.openmoko.org"); MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973"); MODULE_LICENSE("GPL");
gpl-2.0
SubhrajyotiSen/HelioxKernelOnyx
net/x25/af_x25.c
658
41783
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor Centralised disconnect handling. * New timer architecture. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of * facilities negotiation and increased * the throughput upper limit. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). * Fixed x25_output() related skb leakage. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to * x25_proc.c, using seq_file * 2005-04-02 Shaun Pereira Selective sub address matching * with call user data * 2005-04-15 Shaun Pereira Fast select with no restriction on * response */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/notifier.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ctype.h> #include <net/x25.h> #include <net/compat.h> #ifdef KW_TAINT_ANALYSIS extern void * get_tainted_stuff(); #endif int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; int sysctl_x25_forward = 0; HLIST_HEAD(x25_list); DEFINE_RWLOCK(x25_list_lock); static const struct proto_ops x25_proto_ops; static struct x25_address null_x25_address = {" "}; #ifdef CONFIG_COMPAT struct compat_x25_subscrip_struct { char device[200-sizeof(compat_ulong_t)]; compat_ulong_t global_facil_mask; compat_uint_t extended; }; #endif int x25_parse_address_block(struct sk_buff *skb, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned char len; int needed; int rc; if (!pskb_may_pull(skb, 1)) { /* packet has no address block */ rc = 0; goto empty; } len = *skb->data; needed = 1 + (len >> 4) + (len & 0x0f); if (!pskb_may_pull(skb, needed)) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; goto empty; } return x25_addr_ntoa(skb->data, called_addr, calling_addr); empty: *called_addr->x25_addr = 0; *calling_addr->x25_addr = 0; return rc; } int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; unsigned int i; called_len = (*p >> 0) & 0x0F; calling_len = (*p >> 4) & 0x0F; called = called_addr->x25_addr; calling = calling_addr->x25_addr; p++; for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *called++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *called++ = ((*p >> 4) & 0x0F) + '0'; } } else { if (i % 2 != 0) { *calling++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *calling++ = ((*p >> 4) & 0x0F) + '0'; } } } *called = *calling = '\0'; return 1 + (called_len + calling_len + 1) / 2; } int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; int i; called = called_addr->x25_addr; calling = calling_addr->x25_addr; called_len = strlen(called); calling_len = strlen(calling); *p++ = (calling_len << 4) | (called_len << 0); for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *p |= (*called++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*called++ - '0') << 4; } } else { if (i % 2 != 0) { *p |= (*calling++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*calling++ - '0') << 4; } } } return 1 + (called_len + calling_len + 1) / 2; } /* * Socket removal during an interrupt is now safe. */ static void x25_remove_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_del_node_init(sk); write_unlock_bh(&x25_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void x25_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; write_lock_bh(&x25_list_lock); sk_for_each(s, node, &x25_list) if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); } /* * Handle device status changes. */ static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type == ARPHRD_X25 #if IS_ENABLED(CONFIG_LLC) || dev->type == ARPHRD_ETHER #endif ) { switch (event) { case NETDEV_UP: x25_link_device_up(dev); break; case NETDEV_GOING_DOWN: nb = x25_get_neigh(dev); if (nb) { x25_terminate_link(nb); x25_neigh_put(nb); } break; case NETDEV_DOWN: x25_kill_by_device(dev); x25_route_device_down(dev); x25_link_device_down(dev); break; } } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void x25_insert_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_add_node(sk, &x25_list); write_unlock_bh(&x25_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. Check the full list for an address/cud match. * If no cuds match return the next_best thing, an address match. * Note: if a listening socket has cud set it must only get calls * with matching cud. */ static struct sock *x25_find_listener(struct x25_address *addr, struct sk_buff *skb) { struct sock *s; struct sock *next_best; struct hlist_node *node; read_lock_bh(&x25_list_lock); next_best = NULL; sk_for_each(s, node, &x25_list) if ((!strcmp(addr->x25_addr, x25_sk(s)->source_addr.x25_addr) || !strcmp(addr->x25_addr, null_x25_address.x25_addr)) && s->sk_state == TCP_LISTEN) { /* * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ if (x25_sk(s)->cudmatchlength > 0 && skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { sock_hold(s); goto found; } } else next_best = s; } if (next_best) { s = next_best; sock_hold(s); goto found; } s = NULL; found: read_unlock_bh(&x25_list_lock); return s; } /* * Find a connected X.25 socket given my LCI and neighbour. */ static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; struct hlist_node *node; sk_for_each(s, node, &x25_list) if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { sock_hold(s); goto found; } s = NULL; found: return s; } struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; read_lock_bh(&x25_list_lock); s = __x25_find_socket(lci, nb); read_unlock_bh(&x25_list_lock); return s; } /* * Find a unique LCI for a given device. */ static unsigned int x25_new_lci(struct x25_neigh *nb) { unsigned int lci = 1; struct sock *sk; read_lock_bh(&x25_list_lock); while ((sk = __x25_find_socket(lci, nb)) != NULL) { sock_put(sk); if (++lci == 4096) { lci = 0; break; } } read_unlock_bh(&x25_list_lock); return lci; } /* * Deferred destroy. */ static void __x25_destroy_socket(struct sock *); /* * handler for deferred kills. */ static void x25_destroy_timer(unsigned long data) { x25_destroy_socket_from_timer((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. * Not static as it's used by the timer */ static void __x25_destroy_socket(struct sock *sk) { struct sk_buff *skb; x25_stop_heartbeat(sk); x25_stop_timer(sk); x25_remove_socket(sk); x25_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* * Queue the unaccepted socket for death */ skb->sk->sk_state = TCP_LISTEN; sock_set_flag(skb->sk, SOCK_DEAD); x25_start_heartbeat(skb->sk); x25_sk(skb->sk)->state = X25_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.expires = jiffies + 10 * HZ; sk->sk_timer.function = x25_destroy_timer; sk->sk_timer.data = (unsigned long)sk; add_timer(&sk->sk_timer); } else { /* drop last reference so sock_put will free */ __sock_put(sk); } } void x25_destroy_socket_from_timer(struct sock *sk) { sock_hold(sk); bh_lock_sock(sk); __x25_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * X.25 socket object. */ static int x25_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; int rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EINVAL; if (optlen < sizeof(int)) goto out; rc = -EFAULT; if (get_user(opt, (int __user *)optval)) goto out; if (opt) set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); else clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = 0; out: return rc; } static int x25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val, len, rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EFAULT; if (get_user(len, optlen)) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EINVAL; if (len < 0) goto out; rc = -EFAULT; if (put_user(len, optlen)) goto out; val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; out: return rc; } static int x25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; rc = 0; } release_sock(sk); return rc; } static struct proto x25_proto = { .name = "X25", .owner = THIS_MODULE, .obj_size = sizeof(struct x25_sock), }; static struct sock *x25_alloc_socket(struct net *net) { struct x25_sock *x25; struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); if (!sk) goto out; sock_init_data(NULL, sk); x25 = x25_sk(sk); skb_queue_head_init(&x25->ack_queue); skb_queue_head_init(&x25->fragment_queue); skb_queue_head_init(&x25->interrupt_in_queue); skb_queue_head_init(&x25->interrupt_out_queue); out: return sk; } static int x25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct x25_sock *x25; int rc = -EAFNOSUPPORT; if (!net_eq(net, &init_net)) goto out; rc = -ESOCKTNOSUPPORT; if (sock->type != SOCK_SEQPACKET) goto out; rc = -EINVAL; if (protocol) goto out; rc = -ENOBUFS; if ((sk = x25_alloc_socket(net)) == NULL) goto out; x25 = x25_sk(sk); sock_init_data(sock, sk); x25_init_timers(sk); sock->ops = &x25_proto_ops; sk->sk_protocol = protocol; sk->sk_backlog_rcv = x25_backlog_rcv; x25->t21 = sysctl_x25_call_request_timeout; x25->t22 = sysctl_x25_reset_request_timeout; x25->t23 = sysctl_x25_clear_request_timeout; x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; x25->facilities.throughput = 0; /* by default don't negotiate throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; memset(x25->dte_facilities.called_ae, '\0', sizeof(x25->dte_facilities.called_ae)); memset(x25->dte_facilities.calling_ae, '\0', sizeof(x25->dte_facilities.calling_ae)); rc = 0; out: return rc; } static struct sock *x25_make_new(struct sock *osk) { struct sock *sk = NULL; struct x25_sock *x25, *ox25; if (osk->sk_type != SOCK_SEQPACKET) goto out; if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) goto out; x25 = x25_sk(sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sk->sk_backlog_rcv = osk->sk_backlog_rcv; sock_copy_flags(sk, osk); ox25 = x25_sk(osk); x25->t21 = ox25->t21; x25->t22 = ox25->t22; x25->t23 = ox25->t23; x25->t2 = ox25->t2; x25->flags = ox25->flags; x25->facilities = ox25->facilities; x25->dte_facilities = ox25->dte_facilities; x25->cudmatchlength = ox25->cudmatchlength; clear_bit(X25_INTERRUPT_FLAG, &x25->flags); x25_init_timers(sk); out: return sk; } static int x25_release(struct socket *sock) { struct sock *sk = sock->sk; struct x25_sock *x25; if (!sk) return 0; x25 = x25_sk(sk); sock_hold(sk); lock_sock(sk); switch (x25->state) { case X25_STATE_0: case X25_STATE_2: x25_disconnect(sk, 0, 0, 0); __x25_destroy_socket(sk); goto out; case X25_STATE_1: case X25_STATE_3: case X25_STATE_4: x25_clear_queues(sk); x25_write_internal(sk, X25_CLEAR_REQUEST); x25_start_t23timer(sk); x25->state = X25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; } sock_orphan(sk); out: release_sock(sk); sock_put(sk); return 0; } static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; } len = strlen(addr->sx25_addr.x25_addr); for (i = 0; i < len; i++) { if (!isdigit(addr->sx25_addr.x25_addr[i])) { rc = -EINVAL; goto out; } } lock_sock(sk); x25_sk(sk)->source_addr = addr->sx25_addr; x25_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); release_sock(sk); SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); out: return rc; } static int x25_wait_for_connection_establishment(struct sock *sk) { DECLARE_WAITQUEUE(wait, current); int rc; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = sock_error(sk); if (rc) { sk->sk_socket->state = SS_UNCONNECTED; break; } rc = 0; if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); schedule(); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; struct x25_route *rt; int rc = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out; /* Connect completed during a ERESTARTSYS event */ } rc = -ECONNREFUSED; if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; goto out; } rc = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) goto out; rc = -ENETUNREACH; rt = x25_get_route(&addr->sx25_addr); if (!rt) goto out; x25->neighbour = x25_get_neigh(rt->dev); if (!x25->neighbour) goto out_put_route; x25_limit_facilities(&x25->facilities, x25->neighbour); x25->lci = x25_new_lci(x25->neighbour); if (!x25->lci) goto out_put_neigh; rc = -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ goto out_put_neigh; if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) memset(&x25->source_addr, '\0', X25_ADDR_LEN); x25->dest_addr = addr->sx25_addr; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; x25->state = X25_STATE_1; x25_write_internal(sk, X25_CALL_REQUEST); x25_start_heartbeat(sk); x25_start_t21timer(sk); /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out_put_neigh; rc = x25_wait_for_connection_establishment(sk); if (rc) goto out_put_neigh; sock->state = SS_CONNECTED; rc = 0; out_put_neigh: if (rc) x25_neigh_put(x25->neighbour); out_put_route: x25_route_put(rt); out: release_sock(sk); return rc; } static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sock *newsk; struct sk_buff *skb; int rc = -EINVAL; if (!sk) goto out; rc = -EOPNOTSUPP; if (sk->sk_type != SOCK_SEQPACKET) goto out; lock_sock(sk); rc = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out2; rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); if (rc) goto out2; skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto out2; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; rc = 0; out2: release_sock(sk); out: return rc; } static int x25_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); int rc = 0; if (peer) { if (sk->sk_state != TCP_ESTABLISHED) { rc = -ENOTCONN; goto out; } sx25->sx25_addr = x25->dest_addr; } else sx25->sx25_addr = x25->source_addr; sx25->sx25_family = AF_X25; *uaddr_len = sizeof(*sx25); out: return rc; } int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, unsigned int lci) { struct sock *sk; struct sock *make; struct x25_sock *makex25; struct x25_address source_addr, dest_addr; struct x25_facilities facilities; struct x25_dte_facilities dte_facilities; int len, addr_len, rc; /* * Remove the LCI and frame type. */ skb_pull(skb, X25_STD_MIN_LEN); /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. * * Address block is mandatory in call request packets */ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); if (addr_len <= 0) goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener * * Facilities length is mandatory in call request packets */ if (!pskb_may_pull(skb, 1)) goto out_clear_request; len = skb->data[0] + 1; if (!pskb_may_pull(skb, len)) goto out_clear_request; skb_pull(skb,len); /* * Ensure that the amount of call user data is valid. */ if (skb->len > X25_MAX_CUD_LEN) goto out_clear_request; /* * Get all the call user data so it can be used in * x25_find_listener and skb_copy_from_linear_data up ahead. */ if (!pskb_may_pull(skb, skb->len)) goto out_clear_request; /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); skb_push(skb,len); if (sk != NULL && sk_acceptq_is_full(sk)) { goto out_sock_put; } /* * We dont have any listeners for this incoming call. * Try forwarding it. */ if (sk == NULL) { skb_push(skb, addr_len + X25_STD_MIN_LEN); if (sysctl_x25_forward && x25_forward_call(&dest_addr, nb, skb, lci) > 0) { /* Call was forwarded, dont process it any more */ kfree_skb(skb); rc = 1; goto out; } else { /* No listeners, can't forward, clear the call */ goto out_clear_request; } } /* * Try to reach a compromise on the requested facilities. */ len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); if (len == -1) goto out_sock_put; /* * current neighbour/link might impose additional limits * on certain facilties */ x25_limit_facilities(&facilities, nb); /* * Try to create a new socket. */ make = x25_make_new(sk); if (!make) goto out_sock_put; /* * Remove the facilities */ skb_pull(skb, len); skb->sk = make; make->sk_state = TCP_ESTABLISHED; makex25 = x25_sk(make); makex25->lci = lci; makex25->dest_addr = dest_addr; makex25->source_addr = source_addr; makex25->neighbour = nb; makex25->facilities = facilities; makex25->dte_facilities= dte_facilities; makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; /* ensure no reverse facil on accept */ makex25->vc_facil_mask &= ~X25_MASK_REVERSE; /* ensure no calling address extension on accept */ makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; /* Normally all calls are accepted immediately */ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; } /* * Incoming Call User Data. */ skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); makex25->calluserdata.cudlength = skb->len; sk->sk_ack_backlog++; x25_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); x25_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); rc = 1; sock_put(sk); out: return rc; out_sock_put: sock_put(sk); out_clear_request: rc = 0; x25_transmit_clear_request(nb, lci, 0x01); goto out; } static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name; struct sockaddr_x25 sx25; struct sk_buff *skb; unsigned char *asmptr; int noblock = msg->msg_flags & MSG_DONTWAIT; size_t size; int qbit = 0, rc = -EINVAL; lock_sock(sk); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) goto out; /* we currently don't support segmented records at the user interface */ if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) goto out; rc = -EADDRNOTAVAIL; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } rc = -ENETUNREACH; if (!x25->neighbour) goto out; if (usx25) { rc = -EINVAL; if (msg->msg_namelen < sizeof(sx25)) goto out; memcpy(&sx25, usx25, sizeof(sx25)); rc = -EISCONN; if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) goto out; rc = -EINVAL; if (sx25.sx25_family != AF_X25) goto out; } else { /* * FIXME 1003.1g - if the socket is like this because * it has become closed (not started closed) we ought * to SIGPIPE, EPIPE; */ rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; sx25.sx25_family = AF_X25; sx25.sx25_addr = x25->dest_addr; } /* Sanity check the packet size */ if (len > 65535) { rc = -EMSGSIZE; goto out; } SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); if ((msg->msg_flags & MSG_OOB) && len > 32) len = 32; size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto out; X25_SKB_CB(skb)->flags = msg->msg_flags; skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (rc) goto out_kfree_skb; /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { if (!pskb_may_pull(skb, 1)) goto out_kfree_skb; qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the X.25 header */ SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); if (msg->msg_flags & MSG_OOB) { if (x25->neighbour->extended) { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } else { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } } else { if (x25->neighbour->extended) { /* Build an Extended X.25 header */ asmptr = skb_push(skb, X25_EXT_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; *asmptr++ = X25_DATA; } else { /* Build an Standard X.25 header */ asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; } if (qbit) skb->data[0] |= X25_Q_BIT; } SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out_kfree_skb; if (msg->msg_flags & MSG_OOB) skb_queue_tail(&x25->interrupt_out_queue, skb); else { rc = x25_output(sk, skb); len = rc; if (rc < 0) kfree_skb(skb); else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) len++; } x25_kick(sk); rc = len; out: release_sock(sk); return rc; out_kfree_skb: kfree_skb(skb); goto out; } static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); if (x25->neighbour == NULL) goto out; header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) goto out_free_dgram; skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); lock_sock(sk); if (!skb) goto out; if (!pskb_may_pull(skb, header_len)) goto out_free_dgram; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; } msg->msg_namelen = sizeof(struct sockaddr_x25); x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; } static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); void __user *argp = (void __user *)arg; int rc; switch (cmd) { case TIOCOUTQ: { int amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (unsigned int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; int amount = 0; /* * These two are safe on a single CPU system as * only user tasks fiddle here */ lock_sock(sk); if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); rc = put_user(amount, (unsigned int __user *)argp); break; } case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = sock_get_timestamp(sk, (struct timeval __user *)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = sock_get_timestampns(sk, (struct timespec __user *)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->facilities, sizeof(x25->facilities)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SFACILITIES: { struct x25_facilities facilities; rc = -EFAULT; if (copy_from_user(&facilities, argp, sizeof(facilities))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_fac_release; if (facilities.pacsize_in < X25_PS16 || facilities.pacsize_in > X25_PS4096) goto out_fac_release; if (facilities.pacsize_out < X25_PS16 || facilities.pacsize_out > X25_PS4096) goto out_fac_release; if (facilities.winsize_in < 1 || facilities.winsize_in > 127) goto out_fac_release; if (facilities.throughput) { int out = facilities.throughput & 0xf0; int in = facilities.throughput & 0x0f; if (!out) facilities.throughput |= X25_DEFAULT_THROUGHPUT << 4; else if (out < 0x30 || out > 0xD0) goto out_fac_release; if (!in) facilities.throughput |= X25_DEFAULT_THROUGHPUT; else if (in < 0x03 || in > 0x0D) goto out_fac_release; } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) goto out_fac_release; x25->facilities = facilities; rc = 0; out_fac_release: release_sock(sk); break; } case SIOCX25GDTEFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->dte_facilities, sizeof(x25->dte_facilities)); release_sock(sk); if (rc) rc = -EFAULT; break; } case SIOCX25SDTEFACILITIES: { struct x25_dte_facilities dtefacs; rc = -EFAULT; if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_dtefac_release; if (dtefacs.calling_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.calling_ae == NULL) goto out_dtefac_release; if (dtefacs.called_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.called_ae == NULL) goto out_dtefac_release; x25->dte_facilities = dtefacs; rc = 0; out_dtefac_release: release_sock(sk); break; } case SIOCX25GCALLUSERDATA: { lock_sock(sk); rc = copy_to_user(argp, &x25->calluserdata, sizeof(x25->calluserdata)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCALLUSERDATA: { struct x25_calluserdata calluserdata; rc = -EFAULT; if (copy_from_user(&calluserdata, argp, sizeof(calluserdata))) break; rc = -EINVAL; if (calluserdata.cudlength > X25_MAX_CUD_LEN) break; lock_sock(sk); x25->calluserdata = calluserdata; release_sock(sk); rc = 0; break; } case SIOCX25GCAUSEDIAG: { lock_sock(sk); rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCAUSEDIAG: { struct x25_causediag causediag; rc = -EFAULT; if (copy_from_user(&causediag, argp, sizeof(causediag))) break; lock_sock(sk); x25->causediag = causediag; release_sock(sk); rc = 0; break; } case SIOCX25SCUDMATCHLEN: { struct x25_subaddr sub_addr; rc = -EINVAL; lock_sock(sk); if(sk->sk_state != TCP_CLOSE) goto out_cud_release; rc = -EFAULT; if (copy_from_user(&sub_addr, argp, sizeof(sub_addr))) goto out_cud_release; rc = -EINVAL; if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN) goto out_cud_release; x25->cudmatchlength = sub_addr.cudmatchlength; rc = 0; out_cud_release: release_sock(sk); break; } case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_CLOSE) break; clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); release_sock(sk); rc = 0; break; } case SIOCX25SENDCALLACCPT: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) break; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) break; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; release_sock(sk); rc = 0; break; } default: rc = -ENOIOCTLCMD; break; } return rc; } static const struct net_proto_family x25_family_ops = { .family = AF_X25, .create = x25_create, .owner = THIS_MODULE, }; #ifdef CONFIG_COMPAT static int compat_x25_subscr_ioctl(unsigned int cmd, struct compat_x25_subscrip_struct __user *x25_subscr32_actual) { struct compat_x25_subscrip_struct x25_subscr; struct x25_neigh *nb; struct net_device *dev; int rc = -EINVAL; rc = -EFAULT; #ifdef KW_TAINT_ANALYSIS struct compat_x25_subscrip_struct __user *x25_subscr32 = (struct compat_x25_subscrip_struct __user *)get_tainted_stuff(); #else struct compat_x25_subscrip_struct __user *x25_subscr32 = x25_subscr32_actual; #endif if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) goto out; rc = -EINVAL; dev = x25_dev_get(x25_subscr.device); if (dev == NULL) goto out; nb = x25_get_neigh(dev); if (nb == NULL) goto out_dev_put; dev_put(dev); if (cmd == SIOCX25GSUBSCRIP) { read_lock_bh(&x25_neigh_list_lock); x25_subscr.extended = nb->extended; x25_subscr.global_facil_mask = nb->global_facil_mask; read_unlock_bh(&x25_neigh_list_lock); rc = copy_to_user(x25_subscr32, &x25_subscr, sizeof(*x25_subscr32)) ? -EFAULT : 0; } else { rc = -EINVAL; if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { rc = 0; write_lock_bh(&x25_neigh_list_lock); nb->extended = x25_subscr.extended; nb->global_facil_mask = x25_subscr.global_facil_mask; write_unlock_bh(&x25_neigh_list_lock); } } x25_neigh_put(nb); out: return rc; out_dev_put: dev_put(dev); goto out; } static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { #ifdef KW_TAINT_ANALYSIS void __user *argp = (void __user *)get_tainted_stuff(); #else void __user *argp = compat_ptr(arg); #endif struct sock *sk = sock->sk; int rc = -ENOIOCTLCMD; switch(cmd) { case TIOCOUTQ: case TIOCINQ: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = compat_sock_get_timestamp(sk, (struct timeval __user*)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = compat_sock_get_timestampns(sk, (struct timespec __user*)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: case SIOCX25SFACILITIES: case SIOCX25GDTEFACILITIES: case SIOCX25SDTEFACILITIES: case SIOCX25GCALLUSERDATA: case SIOCX25SCALLUSERDATA: case SIOCX25GCAUSEDIAG: case SIOCX25SCAUSEDIAG: case SIOCX25SCUDMATCHLEN: case SIOCX25CALLACCPTAPPRV: case SIOCX25SENDCALLACCPT: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #endif static const struct proto_ops x25_proto_ops = { .family = AF_X25, .owner = THIS_MODULE, .release = x25_release, .bind = x25_bind, .connect = x25_connect, .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, #endif .listen = x25_listen, .shutdown = sock_no_shutdown, .setsockopt = x25_setsockopt, .getsockopt = x25_getsockopt, .sendmsg = x25_sendmsg, .recvmsg = x25_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct packet_type x25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_X25), .func = x25_lapb_receive_frame, }; static struct notifier_block x25_dev_notifier = { .notifier_call = x25_device_event, }; void x25_kill_by_neigh(struct x25_neigh *nb) { struct sock *s; struct hlist_node *node; write_lock_bh(&x25_list_lock); sk_for_each(s, node, &x25_list) if (x25_sk(s)->neighbour == nb) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); /* Remove any related forwards */ x25_clear_forward_by_dev(nb->dev); } static int __init x25_init(void) { int rc = proto_register(&x25_proto, 0); if (rc != 0) goto out; rc = sock_register(&x25_family_ops); if (rc != 0) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); if (rc != 0) goto out_sock; printk(KERN_INFO "X.25 for Linux Version 0.2\n"); x25_register_sysctl(); rc = x25_proc_init(); if (rc != 0) goto out_dev; out: return rc; out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); goto out; } module_init(x25_init); static void __exit x25_exit(void) { x25_proc_exit(); x25_link_free(); x25_route_free(); x25_unregister_sysctl(); unregister_netdevice_notifier(&x25_dev_notifier); dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); proto_unregister(&x25_proto); } module_exit(x25_exit); MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_X25);
gpl-2.0
zzicewind/linux
net/irda/irlan/irlan_client_event.c
1426
12991
/********************************************************************* * * Filename: irlan_client_event.c * Version: 0.9 * Description: IrLAN client state machine * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Aug 31 20:14:37 1997 * Modified at: Sun Dec 26 21:52:24 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <net/irda/irda.h> #include <net/irda/timer.h> #include <net/irda/irmod.h> #include <net/irda/iriap.h> #include <net/irda/irlmp.h> #include <net/irda/irttp.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_client.h> #include <net/irda/irlan_event.h> static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb); static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) = { irlan_client_state_idle, irlan_client_state_query, irlan_client_state_conn, irlan_client_state_info, irlan_client_state_media, irlan_client_state_open, irlan_client_state_wait, irlan_client_state_arb, irlan_client_state_data, irlan_client_state_close, irlan_client_state_sync }; void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); (*state[ self->client.state]) (self, event, skb); } /* * Function irlan_client_state_idle (event, skb, info) * * IDLE, We are waiting for an indication that there is a provider * available. */ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch (event) { case IRLAN_DISCOVERY_INDICATION: if (self->client.iriap) { net_warn_ratelimited("%s(), busy with a previous query\n", __func__); return -EBUSY; } self->client.iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irlan_client_get_value_confirm); /* Get some values from peer IAS */ irlan_next_client_state(self, IRLAN_QUERY); iriap_getvaluebyclass_request(self->client.iriap, self->saddr, self->daddr, "IrLAN", "IrDA:TinyTP:LsapSel"); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_query (event, skb, info) * * QUERY, We have queryed the remote IAS and is ready to connect * to provider, just waiting for the confirm. * */ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch(event) { case IRLAN_IAS_PROVIDER_AVAIL: IRDA_ASSERT(self->dtsap_sel_ctrl != 0, return -1;); self->client.open_retries = 0; irttp_connect_request(self->client.tsap_ctrl, self->dtsap_sel_ctrl, self->saddr, self->daddr, NULL, IRLAN_MTU, NULL); irlan_next_client_state(self, IRLAN_CONN); break; case IRLAN_IAS_PROVIDER_NOT_AVAIL: pr_debug("%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__); irlan_next_client_state(self, IRLAN_IDLE); /* Give the client a kick! */ if ((self->provider.access_type == ACCESS_PEER) && (self->provider.state != IRLAN_IDLE)) irlan_client_wakeup(self, self->saddr, self->daddr); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_conn (event, skb, info) * * CONN, We have connected to a provider but has not issued any * commands yet. * */ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); switch (event) { case IRLAN_CONNECT_COMPLETE: /* Send getinfo cmd */ irlan_get_provider_info(self); irlan_next_client_state(self, IRLAN_INFO); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_info (self, event, skb, info) * * INFO, We have issued a GetInfo command and is awaiting a reply. */ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); switch (event) { case IRLAN_DATA_INDICATION: IRDA_ASSERT(skb != NULL, return -1;); irlan_client_parse_response(self, skb); irlan_next_client_state(self, IRLAN_MEDIA); irlan_get_media_char(self); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_media (self, event, skb, info) * * MEDIA, The irlan_client has issued a GetMedia command and is awaiting a * reply. * */ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_DATA_INDICATION: irlan_client_parse_response(self, skb); irlan_open_data_channel(self); irlan_next_client_state(self, IRLAN_OPEN); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_open (self, event, skb, info) * * OPEN, The irlan_client has issued a OpenData command and is awaiting a * reply * */ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { struct qos_info qos; IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_DATA_INDICATION: irlan_client_parse_response(self, skb); /* * Check if we have got the remote TSAP for data * communications */ IRDA_ASSERT(self->dtsap_sel_data != 0, return -1;); /* Check which access type we are dealing with */ switch (self->client.access_type) { case ACCESS_PEER: if (self->provider.state == IRLAN_OPEN) { irlan_next_client_state(self, IRLAN_ARB); irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, NULL); } else { irlan_next_client_state(self, IRLAN_WAIT); } break; case ACCESS_DIRECT: case ACCESS_HOSTED: qos.link_disc_time.bits = 0x01; /* 3 secs */ irttp_connect_request(self->tsap_data, self->dtsap_sel_data, self->saddr, self->daddr, &qos, IRLAN_MTU, NULL); irlan_next_client_state(self, IRLAN_DATA); break; default: pr_debug("%s(), unknown access type!\n", __func__); break; } break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_wait (self, event, skb, info) * * WAIT, The irlan_client is waiting for the local provider to enter the * provider OPEN state. * */ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_PROVIDER_SIGNAL: irlan_next_client_state(self, IRLAN_ARB); irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, NULL); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { struct qos_info qos; IRDA_ASSERT(self != NULL, return -1;); switch(event) { case IRLAN_CHECK_CON_ARB: if (self->client.recv_arb_val == self->provider.send_arb_val) { irlan_next_client_state(self, IRLAN_CLOSE); irlan_close_data_channel(self); } else if (self->client.recv_arb_val < self->provider.send_arb_val) { qos.link_disc_time.bits = 0x01; /* 3 secs */ irlan_next_client_state(self, IRLAN_DATA); irttp_connect_request(self->tsap_data, self->dtsap_sel_data, self->saddr, self->daddr, &qos, IRLAN_MTU, NULL); } else if (self->client.recv_arb_val > self->provider.send_arb_val) { pr_debug("%s(), lost the battle :-(\n", __func__); } break; case IRLAN_DATA_CONNECT_INDICATION: irlan_next_client_state(self, IRLAN_DATA); break; case IRLAN_LMP_DISCONNECT: case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_data (self, event, skb, info) * * DATA, The data channel is connected, allowing data transfers between * the local and remote machines. * */ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch(event) { case IRLAN_DATA_INDICATION: irlan_client_parse_response(self, skb); break; case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */ case IRLAN_LAP_DISCONNECT: irlan_next_client_state(self, IRLAN_IDLE); break; default: pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_close (self, event, skb, info) * * * */ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { if (skb) dev_kfree_skb(skb); return 0; } /* * Function irlan_client_state_sync (self, event, skb, info) * * * */ static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { if (skb) dev_kfree_skb(skb); return 0; }
gpl-2.0
Krabappel2548/kernel_msm8x60
arch/arm/plat-samsung/dev-keypad.c
2706
1294
/* * linux/arch/arm/plat-samsung/dev-keypad.c * * Copyright (C) 2010 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/keypad.h> static struct resource samsung_keypad_resources[] = { [0] = { .start = SAMSUNG_PA_KEYPAD, .end = SAMSUNG_PA_KEYPAD + 0x20 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_KEYPAD, .end = IRQ_KEYPAD, .flags = IORESOURCE_IRQ, }, }; struct platform_device samsung_device_keypad = { .name = "samsung-keypad", .id = -1, .num_resources = ARRAY_SIZE(samsung_keypad_resources), .resource = samsung_keypad_resources, }; void __init samsung_keypad_set_platdata(struct samsung_keypad_platdata *pd) { struct samsung_keypad_platdata *npd; npd = s3c_set_platdata(pd, sizeof(struct samsung_keypad_platdata), &samsung_device_keypad); if (!npd->cfg_gpio) npd->cfg_gpio = samsung_keypad_cfg_gpio; }
gpl-2.0
LorDClockaN/shooter-ics
arch/m68k/platform/coldfire/gpio.c
4498
3317
/* * Coldfire generic GPIO support. * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sysdev.h> #include <asm/gpio.h> #include <asm/pinmux.h> #include <asm/mcfgpio.h> #define MCF_CHIP(chip) container_of(chip, struct mcf_gpio_chip, gpio_chip) int mcf_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { unsigned long flags; MCFGPIO_PORTTYPE dir; struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); local_irq_save(flags); dir = mcfgpio_read(mcf_chip->pddr); dir &= ~mcfgpio_bit(chip->base + offset); mcfgpio_write(dir, mcf_chip->pddr); local_irq_restore(flags); return 0; } int mcf_gpio_get_value(struct gpio_chip *chip, unsigned offset) { struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); return mcfgpio_read(mcf_chip->ppdr) & mcfgpio_bit(chip->base + offset); } int mcf_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { unsigned long flags; MCFGPIO_PORTTYPE data; struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); local_irq_save(flags); /* write the value to the output latch */ data = mcfgpio_read(mcf_chip->podr); if (value) data |= mcfgpio_bit(chip->base + offset); else data &= ~mcfgpio_bit(chip->base + offset); mcfgpio_write(data, mcf_chip->podr); /* now set the direction to output */ data = mcfgpio_read(mcf_chip->pddr); data |= mcfgpio_bit(chip->base + offset); mcfgpio_write(data, mcf_chip->pddr); local_irq_restore(flags); return 0; } void mcf_gpio_set_value(struct gpio_chip *chip, unsigned offset, int value) { struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); unsigned long flags; MCFGPIO_PORTTYPE data; local_irq_save(flags); data = mcfgpio_read(mcf_chip->podr); if (value) data |= mcfgpio_bit(chip->base + offset); else data &= ~mcfgpio_bit(chip->base + offset); mcfgpio_write(data, mcf_chip->podr); local_irq_restore(flags); } void mcf_gpio_set_value_fast(struct gpio_chip *chip, unsigned offset, int value) { struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); if (value) mcfgpio_write(mcfgpio_bit(chip->base + offset), mcf_chip->setr); else mcfgpio_write(~mcfgpio_bit(chip->base + offset), mcf_chip->clrr); } int mcf_gpio_request(struct gpio_chip *chip, unsigned offset) { struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); return mcf_chip->gpio_to_pinmux ? mcf_pinmux_request(mcf_chip->gpio_to_pinmux[offset], 0) : 0; } void mcf_gpio_free(struct gpio_chip *chip, unsigned offset) { struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip); mcf_gpio_direction_input(chip, offset); if (mcf_chip->gpio_to_pinmux) mcf_pinmux_release(mcf_chip->gpio_to_pinmux[offset], 0); } struct sysdev_class mcf_gpio_sysclass = { .name = "gpio", }; static int __init mcf_gpio_sysinit(void) { return sysdev_class_register(&mcf_gpio_sysclass); } core_initcall(mcf_gpio_sysinit);
gpl-2.0
coolbho3k/Xoom-OC
arch/mips/bcm63xx/setup.c
4498
2842
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/pm.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <asm/reboot.h> #include <asm/cacheflush.h> #include <bcm63xx_board.h> #include <bcm63xx_cpu.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> void bcm63xx_machine_halt(void) { printk(KERN_INFO "System halted\n"); while (1) ; } static void bcm6348_a1_reboot(void) { u32 reg; /* soft reset all blocks */ printk(KERN_INFO "soft-reseting all blocks ...\n"); reg = bcm_perf_readl(PERF_SOFTRESET_REG); reg &= ~SOFTRESET_6348_ALL; bcm_perf_writel(reg, PERF_SOFTRESET_REG); mdelay(10); reg = bcm_perf_readl(PERF_SOFTRESET_REG); reg |= SOFTRESET_6348_ALL; bcm_perf_writel(reg, PERF_SOFTRESET_REG); mdelay(10); /* Jump to the power on address. */ printk(KERN_INFO "jumping to reset vector.\n"); /* set high vectors (base at 0xbfc00000 */ set_c0_status(ST0_BEV | ST0_ERL); /* run uncached in kseg0 */ change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED); __flush_cache_all(); /* remove all wired TLB entries */ write_c0_wired(0); __asm__ __volatile__( "jr\t%0" : : "r" (0xbfc00000)); while (1) ; } void bcm63xx_machine_reboot(void) { u32 reg; /* mask and clear all external irq */ reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG); reg &= ~EXTIRQ_CFG_MASK_ALL; reg |= EXTIRQ_CFG_CLEAR_ALL; bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG); if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() == 0xa1)) bcm6348_a1_reboot(); printk(KERN_INFO "triggering watchdog soft-reset...\n"); reg = bcm_perf_readl(PERF_SYS_PLL_CTL_REG); reg |= SYS_PLL_SOFT_RESET; bcm_perf_writel(reg, PERF_SYS_PLL_CTL_REG); while (1) ; } static void __bcm63xx_machine_reboot(char *p) { bcm63xx_machine_reboot(); } /* * return system type in /proc/cpuinfo */ const char *get_system_type(void) { static char buf[128]; snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)", board_get_name(), bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev()); return buf; } void __init plat_time_init(void) { mips_hpt_frequency = bcm63xx_get_cpu_freq() / 2; } void __init plat_mem_setup(void) { add_memory_region(0, bcm63xx_get_memory_size(), BOOT_MEM_RAM); _machine_halt = bcm63xx_machine_halt; _machine_restart = __bcm63xx_machine_reboot; pm_power_off = bcm63xx_machine_halt; set_io_port_base(0); ioport_resource.start = 0; ioport_resource.end = ~0; board_setup(); } int __init bcm63xx_register_devices(void) { return board_register_devices(); } arch_initcall(bcm63xx_register_devices);
gpl-2.0
Droid-Concepts/DC-Elite_kernel_jf
drivers/video/backlight/atmel-pwm-bl.c
7058
6308
/* * Copyright (C) 2008 Atmel Corporation * * Backlight driver using Atmel PWM peripheral. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/backlight.h> #include <linux/atmel_pwm.h> #include <linux/atmel-pwm-bl.h> #include <linux/slab.h> struct atmel_pwm_bl { const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct platform_device *pdev; struct pwm_channel pwmc; int gpio_on; }; static int atmel_pwm_bl_set_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); int intensity = bd->props.brightness; int pwm_duty; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (pwmbl->pdata->pwm_active_low) pwm_duty = pwmbl->pdata->pwm_duty_min + intensity; else pwm_duty = pwmbl->pdata->pwm_duty_max - intensity; if (pwm_duty > pwmbl->pdata->pwm_duty_max) pwm_duty = pwmbl->pdata->pwm_duty_max; if (pwm_duty < pwmbl->pdata->pwm_duty_min) pwm_duty = pwmbl->pdata->pwm_duty_min; if (!intensity) { if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0 ^ pwmbl->pdata->on_active_low); } pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); pwm_channel_disable(&pwmbl->pwmc); } else { pwm_channel_enable(&pwmbl->pwmc); pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 1 ^ pwmbl->pdata->on_active_low); } } return 0; } static int atmel_pwm_bl_get_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); u8 intensity; if (pwmbl->pdata->pwm_active_low) { intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) - pwmbl->pdata->pwm_duty_min; } else { intensity = pwmbl->pdata->pwm_duty_max - pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY); } return intensity; } static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl) { unsigned long pwm_rate = pwmbl->pwmc.mck; unsigned long prescale = DIV_ROUND_UP(pwm_rate, (pwmbl->pdata->pwm_frequency * pwmbl->pdata->pwm_compare_max)) - 1; /* * Prescale must be power of two and maximum 0xf in size because of * hardware limit. PWM speed will be: * PWM module clock speed / (2 ^ prescale). */ prescale = fls(prescale); if (prescale > 0xf) prescale = 0xf; pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale); pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY, pwmbl->pdata->pwm_duty_min + pwmbl->bldev->props.brightness); pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD, pwmbl->pdata->pwm_compare_max); dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver " "(%lu Hz)\n", pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max / (1 << prescale)); return pwm_channel_enable(&pwmbl->pwmc); } static const struct backlight_ops atmel_pwm_bl_ops = { .get_brightness = atmel_pwm_bl_get_intensity, .update_status = atmel_pwm_bl_set_intensity, }; static int atmel_pwm_bl_probe(struct platform_device *pdev) { struct backlight_properties props; const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct atmel_pwm_bl *pwmbl; int retval; pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL); if (!pwmbl) return -ENOMEM; pwmbl->pdev = pdev; pdata = pdev->dev.platform_data; if (!pdata) { retval = -ENODEV; goto err_free_mem; } if (pdata->pwm_compare_max < pdata->pwm_duty_max || pdata->pwm_duty_min > pdata->pwm_duty_max || pdata->pwm_frequency == 0) { retval = -EINVAL; goto err_free_mem; } pwmbl->pdata = pdata; pwmbl->gpio_on = pdata->gpio_on; retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc); if (retval) goto err_free_mem; if (pwmbl->gpio_on != -1) { retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl"); if (retval) { pwmbl->gpio_on = -1; goto err_free_pwm; } /* Turn display off by default. */ retval = gpio_direction_output(pwmbl->gpio_on, 0 ^ pdata->on_active_low); if (retval) goto err_free_gpio; } memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, &atmel_pwm_bl_ops, &props); if (IS_ERR(bldev)) { retval = PTR_ERR(bldev); goto err_free_gpio; } pwmbl->bldev = bldev; platform_set_drvdata(pdev, pwmbl); /* Power up the backlight by default at middle intesity. */ bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = bldev->props.max_brightness / 2; retval = atmel_pwm_bl_init_pwm(pwmbl); if (retval) goto err_free_bl_dev; atmel_pwm_bl_set_intensity(bldev); return 0; err_free_bl_dev: platform_set_drvdata(pdev, NULL); backlight_device_unregister(bldev); err_free_gpio: if (pwmbl->gpio_on != -1) gpio_free(pwmbl->gpio_on); err_free_pwm: pwm_channel_free(&pwmbl->pwmc); err_free_mem: kfree(pwmbl); return retval; } static int __exit atmel_pwm_bl_remove(struct platform_device *pdev) { struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0); gpio_free(pwmbl->gpio_on); } pwm_channel_disable(&pwmbl->pwmc); pwm_channel_free(&pwmbl->pwmc); backlight_device_unregister(pwmbl->bldev); platform_set_drvdata(pdev, NULL); kfree(pwmbl); return 0; } static struct platform_driver atmel_pwm_bl_driver = { .driver = { .name = "atmel-pwm-bl", }, /* REVISIT add suspend() and resume() */ .remove = __exit_p(atmel_pwm_bl_remove), }; static int __init atmel_pwm_bl_init(void) { return platform_driver_probe(&atmel_pwm_bl_driver, atmel_pwm_bl_probe); } module_init(atmel_pwm_bl_init); static void __exit atmel_pwm_bl_exit(void) { platform_driver_unregister(&atmel_pwm_bl_driver); } module_exit(atmel_pwm_bl_exit); MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>"); MODULE_DESCRIPTION("Atmel PWM backlight driver"); MODULE_LICENSE("GPL");
gpl-2.0
croniccorey/cronmod-kernel
arch/powerpc/platforms/cell/pervasive.c
7314
3256
/* * CBE Pervasive Monitor and Debug * * (C) Copyright IBM Corporation 2005 * * Authors: Maximino Aguilar (maguilar@us.ibm.com) * Michael N. Day (mnday@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef DEBUG #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/pgtable.h> #include <asm/reg.h> #include <asm/cell-regs.h> #include "pervasive.h" static void cbe_power_save(void) { unsigned long ctrl, thread_switch_control; /* * We need to hard disable interrupts, the local_irq_enable() done by * our caller upon return will hard re-enable. */ hard_irq_disable(); ctrl = mfspr(SPRN_CTRLF); /* Enable DEC and EE interrupt request */ thread_switch_control = mfspr(SPRN_TSC_CELL); thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; switch (ctrl & CTRL_CT) { case CTRL_CT0: thread_switch_control |= TSC_CELL_DEC_ENABLE_0; break; case CTRL_CT1: thread_switch_control |= TSC_CELL_DEC_ENABLE_1; break; default: printk(KERN_WARNING "%s: unknown configuration\n", __func__); break; } mtspr(SPRN_TSC_CELL, thread_switch_control); /* * go into low thread priority, medium priority will be * restored for us after wake-up. */ HMT_low(); /* * atomically disable thread execution and runlatch. * External and Decrementer exceptions are still handled when the * thread is disabled but now enter in cbe_system_reset_exception() */ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); } static int cbe_system_reset_exception(struct pt_regs *regs) { switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEEE: do_IRQ(regs); break; case SRR1_WAKEDEC: timer_interrupt(regs); break; case SRR1_WAKEMT: return cbe_sysreset_hack(); #ifdef CONFIG_CBE_RAS case SRR1_WAKESYSERR: cbe_system_error_exception(regs); break; case SRR1_WAKETHERM: cbe_thermal_exception(regs); break; #endif /* CONFIG_CBE_RAS */ default: /* do system reset */ return 0; } /* everything handled */ return 1; } void __init cbe_pervasive_init(void) { int cpu; if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) return; for_each_possible_cpu(cpu) { struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu); if (!regs) continue; /* Enable Pause(0) control bit */ out_be64(&regs->pmcr, in_be64(&regs->pmcr) | CBE_PMD_PAUSE_ZERO_CONTROL); } ppc_md.power_save = cbe_power_save; ppc_md.system_reset_exception = cbe_system_reset_exception; }
gpl-2.0
AOSPA-L/android_kernel_oneplus_msm8974
arch/powerpc/platforms/40x/walnut.c
8082
1687
/* * Architecture- / platform-specific boot-time initialization code for * IBM PowerPC 4xx based boards. Adapted from original * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek * <dan@net4x.com>. * * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> * * Rewritten and ported to the merged powerpc tree: * Copyright 2007 IBM Corporation * Josh Boyer <jwboyer@linux.vnet.ibm.com> * * 2002 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/init.h> #include <linux/of_platform.h> #include <linux/rtc.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> static __initdata struct of_device_id walnut_of_bus[] = { { .compatible = "ibm,plb3", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, {}, }; static int __init walnut_device_probe(void) { of_platform_bus_probe(NULL, walnut_of_bus, NULL); of_instantiate_rtc(); return 0; } machine_device_initcall(walnut, walnut_device_probe); static int __init walnut_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "ibm,walnut")) return 0; pci_set_flags(PCI_REASSIGN_ALL_RSRC); return 1; } define_machine(walnut) { .name = "Walnut", .probe = walnut_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
Luquidtester/DirtyKernel-3.4.76
drivers/staging/go7007/wis-ov7640.c
8338
2462
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include "wis-i2c.h" struct wis_ov7640 { int brightness; int contrast; int saturation; int hue; }; static u8 initial_registers[] = { 0x12, 0x80, 0x12, 0x54, 0x14, 0x24, 0x15, 0x01, 0x28, 0x20, 0x75, 0x82, 0xFF, 0xFF, /* Terminator (reg 0xFF is unused) */ }; static int write_regs(struct i2c_client *client, u8 *regs) { int i; for (i = 0; regs[i] != 0xFF; i += 2) if (i2c_smbus_write_byte_data(client, regs[i], regs[i + 1]) < 0) return -1; return 0; } static int wis_ov7640_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; client->flags = I2C_CLIENT_SCCB; printk(KERN_DEBUG "wis-ov7640: initializing OV7640 at address %d on %s\n", client->addr, adapter->name); if (write_regs(client, initial_registers) < 0) { printk(KERN_ERR "wis-ov7640: error initializing OV7640\n"); return -ENODEV; } return 0; } static int wis_ov7640_remove(struct i2c_client *client) { return 0; } static const struct i2c_device_id wis_ov7640_id[] = { { "wis_ov7640", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wis_ov7640_id); static struct i2c_driver wis_ov7640_driver = { .driver = { .name = "WIS OV7640 I2C driver", }, .probe = wis_ov7640_probe, .remove = wis_ov7640_remove, .id_table = wis_ov7640_id, }; static int __init wis_ov7640_init(void) { return i2c_add_driver(&wis_ov7640_driver); } static void __exit wis_ov7640_cleanup(void) { i2c_del_driver(&wis_ov7640_driver); } module_init(wis_ov7640_init); module_exit(wis_ov7640_cleanup); MODULE_LICENSE("GPL v2");
gpl-2.0
arasilinux/arasievm-kernel
arch/x86/kernel/cpu/perf_event_intel_ds.c
147
17640
#include <linux/bitops.h> #include <linux/types.h> #include <linux/slab.h> #include <asm/perf_event.h> #include "perf_event.h" /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_BUFFER_SIZE PAGE_SIZE /* * pebs_record_32 for p4 and core not supported struct pebs_record_32 { u32 flags, ip; u32 ax, bc, cx, dx; u32 si, di, bp, sp; }; */ struct pebs_record_core { u64 flags, ip; u64 ax, bx, cx, dx; u64 si, di, bp, sp; u64 r8, r9, r10, r11; u64 r12, r13, r14, r15; }; struct pebs_record_nhm { u64 flags, ip; u64 ax, bx, cx, dx; u64 si, di, bp, sp; u64 r8, r9, r10, r11; u64 r12, r13, r14, r15; u64 status, dla, dse, lat; }; void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds) return; wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, (u32)((u64)(unsigned long)ds), (u32)((u64)(unsigned long)ds >> 32)); } void fini_debug_store_on_cpu(int cpu) { if (!per_cpu(cpu_hw_events, cpu).ds) return; wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); } static int alloc_pebs_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; int node = cpu_to_node(cpu); int max, thresh = 1; /* always use a single PEBS record */ void *buffer; if (!x86_pmu.pebs) return 0; buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!buffer)) return -ENOMEM; max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; ds->pebs_buffer_base = (u64)(unsigned long)buffer; ds->pebs_index = ds->pebs_buffer_base; ds->pebs_absolute_maximum = ds->pebs_buffer_base + max * x86_pmu.pebs_record_size; ds->pebs_interrupt_threshold = ds->pebs_buffer_base + thresh * x86_pmu.pebs_record_size; return 0; } static void release_pebs_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds || !x86_pmu.pebs) return; kfree((void *)(unsigned long)ds->pebs_buffer_base); ds->pebs_buffer_base = 0; } static int alloc_bts_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; int node = cpu_to_node(cpu); int max, thresh; void *buffer; if (!x86_pmu.bts) return 0; buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!buffer)) return -ENOMEM; max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; thresh = max / 16; ds->bts_buffer_base = (u64)(unsigned long)buffer; ds->bts_index = ds->bts_buffer_base; ds->bts_absolute_maximum = ds->bts_buffer_base + max * BTS_RECORD_SIZE; ds->bts_interrupt_threshold = ds->bts_absolute_maximum - thresh * BTS_RECORD_SIZE; return 0; } static void release_bts_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds || !x86_pmu.bts) return; kfree((void *)(unsigned long)ds->bts_buffer_base); ds->bts_buffer_base = 0; } static int alloc_ds_buffer(int cpu) { int node = cpu_to_node(cpu); struct debug_store *ds; ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!ds)) return -ENOMEM; per_cpu(cpu_hw_events, cpu).ds = ds; return 0; } static void release_ds_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; if (!ds) return; per_cpu(cpu_hw_events, cpu).ds = NULL; kfree(ds); } void release_ds_buffers(void) { int cpu; if (!x86_pmu.bts && !x86_pmu.pebs) return; get_online_cpus(); for_each_online_cpu(cpu) fini_debug_store_on_cpu(cpu); for_each_possible_cpu(cpu) { release_pebs_buffer(cpu); release_bts_buffer(cpu); release_ds_buffer(cpu); } put_online_cpus(); } void reserve_ds_buffers(void) { int bts_err = 0, pebs_err = 0; int cpu; x86_pmu.bts_active = 0; x86_pmu.pebs_active = 0; if (!x86_pmu.bts && !x86_pmu.pebs) return; if (!x86_pmu.bts) bts_err = 1; if (!x86_pmu.pebs) pebs_err = 1; get_online_cpus(); for_each_possible_cpu(cpu) { if (alloc_ds_buffer(cpu)) { bts_err = 1; pebs_err = 1; } if (!bts_err && alloc_bts_buffer(cpu)) bts_err = 1; if (!pebs_err && alloc_pebs_buffer(cpu)) pebs_err = 1; if (bts_err && pebs_err) break; } if (bts_err) { for_each_possible_cpu(cpu) release_bts_buffer(cpu); } if (pebs_err) { for_each_possible_cpu(cpu) release_pebs_buffer(cpu); } if (bts_err && pebs_err) { for_each_possible_cpu(cpu) release_ds_buffer(cpu); } else { if (x86_pmu.bts && !bts_err) x86_pmu.bts_active = 1; if (x86_pmu.pebs && !pebs_err) x86_pmu.pebs_active = 1; for_each_online_cpu(cpu) init_debug_store_on_cpu(cpu); } put_online_cpus(); } /* * BTS */ struct event_constraint bts_constraint = EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); void intel_pmu_enable_bts(u64 config) { unsigned long debugctlmsr; debugctlmsr = get_debugctlmsr(); debugctlmsr |= DEBUGCTLMSR_TR; debugctlmsr |= DEBUGCTLMSR_BTS; debugctlmsr |= DEBUGCTLMSR_BTINT; if (!(config & ARCH_PERFMON_EVENTSEL_OS)) debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS; if (!(config & ARCH_PERFMON_EVENTSEL_USR)) debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR; update_debugctlmsr(debugctlmsr); } void intel_pmu_disable_bts(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long debugctlmsr; if (!cpuc->ds) return; debugctlmsr = get_debugctlmsr(); debugctlmsr &= ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT | DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR); update_debugctlmsr(debugctlmsr); } int intel_pmu_drain_bts_buffer(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct bts_record { u64 from; u64 to; u64 flags; }; struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; struct bts_record *at, *top; struct perf_output_handle handle; struct perf_event_header header; struct perf_sample_data data; struct pt_regs regs; if (!event) return 0; if (!x86_pmu.bts_active) return 0; at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; top = (struct bts_record *)(unsigned long)ds->bts_index; if (top <= at) return 0; ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. * We will overwrite the from and to address before we output * the sample. */ perf_prepare_sample(&header, &data, event, &regs); if (perf_output_begin(&handle, event, header.size * (top - at))) return 1; for (; at < top; at++) { data.ip = at->from; data.addr = at->to; perf_output_sample(&handle, &header, &data, event); } perf_output_end(&handle); /* There's new data available. */ event->hw.interrupts++; event->pending_kill = POLL_IN; return 1; } /* * PEBS */ struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ EVENT_CONSTRAINT_END }; struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ EVENT_CONSTRAINT_END }; struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; if (!event->attr.precise_ip) return NULL; if (x86_pmu.pebs_constraints) { for_each_event_constraint(c, x86_pmu.pebs_constraints) { if ((event->hw.config & c->cmask) == c->code) return c; } } return &emptyconstraint; } void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; cpuc->pebs_enabled |= 1ULL << hwc->idx; WARN_ON_ONCE(cpuc->enabled); if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) intel_pmu_lbr_enable(event); } void intel_pmu_pebs_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; cpuc->pebs_enabled &= ~(1ULL << hwc->idx); if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); hwc->config |= ARCH_PERFMON_EVENTSEL_INT; if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) intel_pmu_lbr_disable(event); } void intel_pmu_pebs_enable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->pebs_enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); } void intel_pmu_pebs_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->pebs_enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } #include <asm/insn.h> static inline bool kernel_ip(unsigned long ip) { #ifdef CONFIG_X86_32 return ip > PAGE_OFFSET; #else return (long)ip < 0; #endif } static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long from = cpuc->lbr_entries[0].from; unsigned long old_to, to = cpuc->lbr_entries[0].to; unsigned long ip = regs->ip; int is_64bit = 0; /* * We don't need to fixup if the PEBS assist is fault like */ if (!x86_pmu.intel_cap.pebs_trap) return 1; /* * No LBR entry, no basic block, no rewinding */ if (!cpuc->lbr_stack.nr || !from || !to) return 0; /* * Basic blocks should never cross user/kernel boundaries */ if (kernel_ip(ip) != kernel_ip(to)) return 0; /* * unsigned math, either ip is before the start (impossible) or * the basic block is larger than 1 page (sanity) */ if ((ip - to) > PAGE_SIZE) return 0; /* * We sampled a branch insn, rewind using the LBR stack */ if (ip == to) { regs->ip = from; return 1; } do { struct insn insn; u8 buf[MAX_INSN_SIZE]; void *kaddr; old_to = to; if (!kernel_ip(ip)) { int bytes, size = MAX_INSN_SIZE; bytes = copy_from_user_nmi(buf, (void __user *)to, size); if (bytes != size) return 0; kaddr = buf; } else kaddr = (void *)to; #ifdef CONFIG_X86_64 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); #endif insn_init(&insn, kaddr, is_64bit); insn_get_length(&insn); to += insn.length; } while (to < ip); if (to == ip) { regs->ip = old_to; return 1; } /* * Even though we decoded the basic block, the instruction stream * never matched the given IP, either the TO or the IP got corrupted. */ return 0; } static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *__pebs) { /* * We cast to pebs_record_core since that is a subset of * both formats and we don't use the other fields in this * routine. */ struct pebs_record_core *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; if (!intel_pmu_save_and_restart(event)) return; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; /* * We use the interrupt regs as a base because the PEBS record * does not contain a full regs set, specifically it seems to * lack segment descriptors, which get used by things like * user_mode(). * * In the simple case fix up only the IP and BP,SP regs, for * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; regs.ip = pebs->ip; regs.bp = pebs->bp; regs.sp = pebs->sp; if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) regs.flags |= PERF_EFLAGS_EXACT; else regs.flags &= ~PERF_EFLAGS_EXACT; if (perf_event_overflow(event, &data, &regs)) x86_pmu_stop(event, 0); } static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct perf_event *event = cpuc->events[0]; /* PMC0 only */ struct pebs_record_core *at, *top; int n; if (!x86_pmu.pebs_active) return; at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; /* * Whatever else happens, drain the thing */ ds->pebs_index = ds->pebs_buffer_base; if (!test_bit(0, cpuc->active_mask)) return; WARN_ON_ONCE(!event); if (!event->attr.precise_ip) return; n = top - at; if (n <= 0) return; /* * Should not happen, we program the threshold at 1 and do not * set a reset value. */ WARN_ON_ONCE(n > 1); at += n - 1; __intel_pmu_pebs_event(event, iregs, at); } static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct debug_store *ds = cpuc->ds; struct pebs_record_nhm *at, *top; struct perf_event *event = NULL; u64 status = 0; int bit, n; if (!x86_pmu.pebs_active) return; at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; ds->pebs_index = ds->pebs_buffer_base; n = top - at; if (n <= 0) return; /* * Should not happen, we program the threshold at 1 and do not * set a reset value. */ WARN_ON_ONCE(n > MAX_PEBS_EVENTS); for ( ; at < top; at++) { for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { event = cpuc->events[bit]; if (!test_bit(bit, cpuc->active_mask)) continue; WARN_ON_ONCE(!event); if (!event->attr.precise_ip) continue; if (__test_and_set_bit(bit, (unsigned long *)&status)) continue; break; } if (!event || bit >= MAX_PEBS_EVENTS) continue; __intel_pmu_pebs_event(event, iregs, at); } } /* * BTS, PEBS probe and setup */ void intel_ds_init(void) { /* * No support for 32bit formats */ if (!boot_cpu_has(X86_FEATURE_DTES64)) return; x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; int format = x86_pmu.intel_cap.pebs_format; switch (format) { case 0: printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; break; case 1: printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; break; default: printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; } } }
gpl-2.0
ps06756/linux-3.17.2
arch/arm/mach-at91/board-stamp9g20.c
147
6806
/* * Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de> * taskit GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/w1-gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/at91sam9_smc.h> #include <mach/hardware.h> #include "at91_aic.h" #include "board.h" #include "sam9_smc.h" #include "generic.h" #include "gpio.h" void __init stamp9g20_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); } /* * NAND flash */ static struct atmel_nand_data __initdata nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .bus_width_16 = 0, .det_pin = -EINVAL, .ecc_mode = NAND_ECC_SOFT, }; static struct sam9_smc_config __initdata nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 2, .ncs_write_setup = 0, .nwe_setup = 2, .ncs_read_pulse = 4, .nrd_pulse = 4, .ncs_write_pulse = 4, .nwe_pulse = 4, .read_cycle = 7, .write_cycle = 7, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 3, }; static void __init add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(0, 3, &nand_smc_config); at91_add_device_nand(&nand_data); } /* * MCI (SD/MMC) * det_pin, wp_pin and vcc_pin are not connected */ static struct mci_platform_data __initdata mmc_data = { .slot[0] = { .bus_width = 4, .detect_pin = -1, .wp_pin = -1, }, }; /* * USB Host port */ static struct at91_usbh_data __initdata usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device port */ static struct at91_udc_data __initdata portuxg20_udc_data = { .vbus_pin = AT91_PIN_PC7, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; static struct at91_udc_data __initdata stamp9g20evb_udc_data = { .vbus_pin = AT91_PIN_PA22, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; /* * MACB Ethernet device */ static struct macb_platform_data __initdata macb_data = { .phy_irq_pin = AT91_PIN_PA28, .is_rmii = 1, }; /* * LEDs */ static struct gpio_led portuxg20_leds[] = { { .name = "LED2", .gpio = AT91_PIN_PC5, .default_trigger = "none", }, { .name = "LED3", .gpio = AT91_PIN_PC4, .default_trigger = "none", }, { .name = "LED4", .gpio = AT91_PIN_PC10, .default_trigger = "heartbeat", } }; static struct gpio_led stamp9g20evb_leds[] = { { .name = "D8", .gpio = AT91_PIN_PB18, .active_low = 1, .default_trigger = "none", }, { .name = "D9", .gpio = AT91_PIN_PB19, .active_low = 1, .default_trigger = "none", }, { .name = "D10", .gpio = AT91_PIN_PB20, .active_low = 1, .default_trigger = "heartbeat", } }; /* * SPI devices */ static struct spi_board_info portuxg20_spi_devices[] = { { .modalias = "spidev", .chip_select = 0, .max_speed_hz = 1 * 1000 * 1000, .bus_num = 0, }, { .modalias = "spidev", .chip_select = 0, .max_speed_hz = 1 * 1000 * 1000, .bus_num = 1, }, }; /* * Dallas 1-Wire */ static struct w1_gpio_platform_data w1_gpio_pdata = { .pin = AT91_PIN_PA29, .is_open_drain = 1, .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { .name = "w1-gpio", .id = -1, .dev.platform_data = &w1_gpio_pdata, }; void add_w1(void) { at91_set_GPIO_periph(w1_gpio_pdata.pin, 1); at91_set_multi_drive(w1_gpio_pdata.pin, 1); platform_device_register(&w1_device); } void __init stamp9g20_board_init(void) { /* Serial */ /* DGBU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); at91_add_device_serial(); /* NAND */ add_device_nand(); /* MMC */ at91_add_device_mci(0, &mmc_data); /* W1 */ add_w1(); } static void __init portuxg20_board_init(void) { /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART2 on ttyS3. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91SAM9260_ID_US2, 3, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART4 on ttyS5. (Rx, Tx only) */ at91_register_uart(AT91SAM9260_ID_US4, 5, 0); /* USART5 on ttyS6. (Rx, Tx only) */ at91_register_uart(AT91SAM9260_ID_US5, 6, 0); stamp9g20_board_init(); /* USB Host */ at91_add_device_usbh(&usbh_data); /* USB Device */ at91_add_device_udc(&portuxg20_udc_data); /* Ethernet */ at91_add_device_eth(&macb_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* SPI */ at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices)); /* LEDs */ at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds)); } static void __init stamp9g20evb_board_init(void) { /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); stamp9g20_board_init(); /* USB Host */ at91_add_device_usbh(&usbh_data); /* USB Device */ at91_add_device_udc(&stamp9g20evb_udc_data); /* Ethernet */ at91_add_device_eth(&macb_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* LEDs */ at91_gpio_leds(stamp9g20evb_leds, ARRAY_SIZE(stamp9g20evb_leds)); } MACHINE_START(PORTUXG20, "taskit PortuxG20") /* Maintainer: taskit GmbH */ .init_time = at91sam926x_pit_init, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = stamp9g20_init_early, .init_irq = at91_init_irq_default, .init_machine = portuxg20_board_init, MACHINE_END MACHINE_START(STAMP9G20, "taskit Stamp9G20") /* Maintainer: taskit GmbH */ .init_time = at91sam926x_pit_init, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = stamp9g20_init_early, .init_irq = at91_init_irq_default, .init_machine = stamp9g20evb_board_init, MACHINE_END
gpl-2.0
FreeScienceCommunity/rt-thread
components/external/freetype/src/gxvalid/gxvkern.c
147
28381
/***************************************************************************/ /* */ /* gxvkern.c */ /* */ /* TrueTypeGX/AAT kern table validation (body). */ /* */ /* Copyright 2004-2007, 2013 */ /* by suzuki toshiya, Masatake YAMATO, Red Hat K.K., */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ /***************************************************************************/ /* */ /* gxvalid is derived from both gxlayout module and otvalid module. */ /* Development of gxlayout is supported by the Information-technology */ /* Promotion Agency(IPA), Japan. */ /* */ /***************************************************************************/ #include "gxvalid.h" #include "gxvcommn.h" #include FT_SFNT_NAMES_H #include FT_SERVICE_GX_VALIDATE_H /*************************************************************************/ /* */ /* The macro FT_COMPONENT is used in trace mode. It is an implicit */ /* parameter of the FT_TRACE() and FT_ERROR() macros, used to print/log */ /* messages during execution. */ /* */ #undef FT_COMPONENT #define FT_COMPONENT trace_gxvkern /*************************************************************************/ /*************************************************************************/ /***** *****/ /***** Data and Types *****/ /***** *****/ /*************************************************************************/ /*************************************************************************/ typedef enum GXV_kern_Version_ { KERN_VERSION_CLASSIC = 0x0000, KERN_VERSION_NEW = 0x0001 } GXV_kern_Version; typedef enum GXV_kern_Dialect_ { KERN_DIALECT_UNKNOWN = 0, KERN_DIALECT_MS = FT_VALIDATE_MS, KERN_DIALECT_APPLE = FT_VALIDATE_APPLE, KERN_DIALECT_ANY = FT_VALIDATE_CKERN } GXV_kern_Dialect; typedef struct GXV_kern_DataRec_ { GXV_kern_Version version; void *subtable_data; GXV_kern_Dialect dialect_request; } GXV_kern_DataRec, *GXV_kern_Data; #define GXV_KERN_DATA( field ) GXV_TABLE_DATA( kern, field ) #define KERN_IS_CLASSIC( gxvalid ) \ ( KERN_VERSION_CLASSIC == GXV_KERN_DATA( version ) ) #define KERN_IS_NEW( gxvalid ) \ ( KERN_VERSION_NEW == GXV_KERN_DATA( version ) ) #define KERN_DIALECT( gxvalid ) \ GXV_KERN_DATA( dialect_request ) #define KERN_ALLOWS_MS( gxvalid ) \ ( KERN_DIALECT( gxvalid ) & KERN_DIALECT_MS ) #define KERN_ALLOWS_APPLE( gxvalid ) \ ( KERN_DIALECT( gxvalid ) & KERN_DIALECT_APPLE ) #define GXV_KERN_HEADER_SIZE ( KERN_IS_NEW( gxvalid ) ? 8 : 4 ) #define GXV_KERN_SUBTABLE_HEADER_SIZE ( KERN_IS_NEW( gxvalid ) ? 8 : 6 ) /*************************************************************************/ /*************************************************************************/ /***** *****/ /***** SUBTABLE VALIDATORS *****/ /***** *****/ /*************************************************************************/ /*************************************************************************/ /* ============================= format 0 ============================== */ static void gxv_kern_subtable_fmt0_pairs_validate( FT_Bytes table, FT_Bytes limit, FT_UShort nPairs, GXV_Validator gxvalid ) { FT_Bytes p = table; FT_UShort i; FT_UShort last_gid_left = 0; FT_UShort last_gid_right = 0; FT_UNUSED( limit ); GXV_NAME_ENTER( "kern format 0 pairs" ); for ( i = 0; i < nPairs; i++ ) { FT_UShort gid_left; FT_UShort gid_right; #ifdef GXV_LOAD_UNUSED_VARS FT_Short kernValue; #endif /* left */ gid_left = FT_NEXT_USHORT( p ); gxv_glyphid_validate( gid_left, gxvalid ); /* right */ gid_right = FT_NEXT_USHORT( p ); gxv_glyphid_validate( gid_right, gxvalid ); /* Pairs of left and right GIDs must be unique and sorted. */ GXV_TRACE(( "left gid = %u, right gid = %u\n", gid_left, gid_right )); if ( gid_left == last_gid_left ) { if ( last_gid_right < gid_right ) last_gid_right = gid_right; else FT_INVALID_DATA; } else if ( last_gid_left < gid_left ) { last_gid_left = gid_left; last_gid_right = gid_right; } else FT_INVALID_DATA; /* skip the kern value */ #ifdef GXV_LOAD_UNUSED_VARS kernValue = FT_NEXT_SHORT( p ); #else p += 2; #endif } GXV_EXIT; } static void gxv_kern_subtable_fmt0_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table + GXV_KERN_SUBTABLE_HEADER_SIZE; FT_UShort nPairs; FT_UShort unitSize; GXV_NAME_ENTER( "kern subtable format 0" ); unitSize = 2 + 2 + 2; nPairs = 0; /* nPairs, searchRange, entrySelector, rangeShift */ GXV_LIMIT_CHECK( 2 + 2 + 2 + 2 ); gxv_BinSrchHeader_validate( p, limit, &unitSize, &nPairs, gxvalid ); p += 2 + 2 + 2 + 2; gxv_kern_subtable_fmt0_pairs_validate( p, limit, nPairs, gxvalid ); GXV_EXIT; } /* ============================= format 1 ============================== */ typedef struct GXV_kern_fmt1_StateOptRec_ { FT_UShort valueTable; FT_UShort valueTable_length; } GXV_kern_fmt1_StateOptRec, *GXV_kern_fmt1_StateOptRecData; static void gxv_kern_subtable_fmt1_valueTable_load( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; GXV_kern_fmt1_StateOptRecData optdata = (GXV_kern_fmt1_StateOptRecData)gxvalid->statetable.optdata; GXV_LIMIT_CHECK( 2 ); optdata->valueTable = FT_NEXT_USHORT( p ); } /* * passed tables_size covers whole StateTable, including kern fmt1 header */ static void gxv_kern_subtable_fmt1_subtable_setup( FT_UShort table_size, FT_UShort classTable, FT_UShort stateArray, FT_UShort entryTable, FT_UShort* classTable_length_p, FT_UShort* stateArray_length_p, FT_UShort* entryTable_length_p, GXV_Validator gxvalid ) { FT_UShort o[4]; FT_UShort *l[4]; FT_UShort buff[5]; GXV_kern_fmt1_StateOptRecData optdata = (GXV_kern_fmt1_StateOptRecData)gxvalid->statetable.optdata; o[0] = classTable; o[1] = stateArray; o[2] = entryTable; o[3] = optdata->valueTable; l[0] = classTable_length_p; l[1] = stateArray_length_p; l[2] = entryTable_length_p; l[3] = &(optdata->valueTable_length); gxv_set_length_by_ushort_offset( o, l, buff, 4, table_size, gxvalid ); } /* * passed table & limit are of whole StateTable, not including subtables */ static void gxv_kern_subtable_fmt1_entry_validate( FT_Byte state, FT_UShort flags, GXV_StateTable_GlyphOffsetCPtr glyphOffset_p, FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { #ifdef GXV_LOAD_UNUSED_VARS FT_UShort push; FT_UShort dontAdvance; #endif FT_UShort valueOffset; #ifdef GXV_LOAD_UNUSED_VARS FT_UShort kernAction; FT_UShort kernValue; #endif FT_UNUSED( state ); FT_UNUSED( glyphOffset_p ); #ifdef GXV_LOAD_UNUSED_VARS push = (FT_UShort)( ( flags >> 15 ) & 1 ); dontAdvance = (FT_UShort)( ( flags >> 14 ) & 1 ); #endif valueOffset = (FT_UShort)( flags & 0x3FFF ); { GXV_kern_fmt1_StateOptRecData vt_rec = (GXV_kern_fmt1_StateOptRecData)gxvalid->statetable.optdata; FT_Bytes p; if ( valueOffset < vt_rec->valueTable ) FT_INVALID_OFFSET; p = table + valueOffset; limit = table + vt_rec->valueTable + vt_rec->valueTable_length; GXV_LIMIT_CHECK( 2 + 2 ); #ifdef GXV_LOAD_UNUSED_VARS kernAction = FT_NEXT_USHORT( p ); kernValue = FT_NEXT_USHORT( p ); #endif } } static void gxv_kern_subtable_fmt1_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; GXV_kern_fmt1_StateOptRec vt_rec; GXV_NAME_ENTER( "kern subtable format 1" ); gxvalid->statetable.optdata = &vt_rec; gxvalid->statetable.optdata_load_func = gxv_kern_subtable_fmt1_valueTable_load; gxvalid->statetable.subtable_setup_func = gxv_kern_subtable_fmt1_subtable_setup; gxvalid->statetable.entry_glyphoffset_fmt = GXV_GLYPHOFFSET_NONE; gxvalid->statetable.entry_validate_func = gxv_kern_subtable_fmt1_entry_validate; gxv_StateTable_validate( p, limit, gxvalid ); GXV_EXIT; } /* ================ Data for Class-Based Subtables 2, 3 ================ */ typedef enum GXV_kern_ClassSpec_ { GXV_KERN_CLS_L = 0, GXV_KERN_CLS_R } GXV_kern_ClassSpec; /* ============================= format 2 ============================== */ /* ---------------------- format 2 specific data ----------------------- */ typedef struct GXV_kern_subtable_fmt2_DataRec_ { FT_UShort rowWidth; FT_UShort array; FT_UShort offset_min[2]; FT_UShort offset_max[2]; const FT_String* class_tag[2]; GXV_odtect_Range odtect; } GXV_kern_subtable_fmt2_DataRec, *GXV_kern_subtable_fmt2_Data; #define GXV_KERN_FMT2_DATA( field ) \ ( ( (GXV_kern_subtable_fmt2_DataRec *) \ ( GXV_KERN_DATA( subtable_data ) ) )->field ) /* -------------------------- utility functions ----------------------- */ static void gxv_kern_subtable_fmt2_clstbl_validate( FT_Bytes table, FT_Bytes limit, GXV_kern_ClassSpec spec, GXV_Validator gxvalid ) { const FT_String* tag = GXV_KERN_FMT2_DATA( class_tag[spec] ); GXV_odtect_Range odtect = GXV_KERN_FMT2_DATA( odtect ); FT_Bytes p = table; FT_UShort firstGlyph; FT_UShort nGlyphs; GXV_NAME_ENTER( "kern format 2 classTable" ); GXV_LIMIT_CHECK( 2 + 2 ); firstGlyph = FT_NEXT_USHORT( p ); nGlyphs = FT_NEXT_USHORT( p ); GXV_TRACE(( " %s firstGlyph=%d, nGlyphs=%d\n", tag, firstGlyph, nGlyphs )); gxv_glyphid_validate( firstGlyph, gxvalid ); gxv_glyphid_validate( (FT_UShort)( firstGlyph + nGlyphs - 1 ), gxvalid ); gxv_array_getlimits_ushort( p, p + ( 2 * nGlyphs ), &( GXV_KERN_FMT2_DATA( offset_min[spec] ) ), &( GXV_KERN_FMT2_DATA( offset_max[spec] ) ), gxvalid ); gxv_odtect_add_range( table, 2 * nGlyphs, tag, odtect ); GXV_EXIT; } static void gxv_kern_subtable_fmt2_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { GXV_ODTECT( 3, odtect ); GXV_kern_subtable_fmt2_DataRec fmt2_rec = { 0, 0, { 0, 0 }, { 0, 0 }, { "leftClass", "rightClass" }, NULL }; FT_Bytes p = table + GXV_KERN_SUBTABLE_HEADER_SIZE; FT_UShort leftOffsetTable; FT_UShort rightOffsetTable; GXV_NAME_ENTER( "kern subtable format 2" ); GXV_ODTECT_INIT( odtect ); fmt2_rec.odtect = odtect; GXV_KERN_DATA( subtable_data ) = &fmt2_rec; GXV_LIMIT_CHECK( 2 + 2 + 2 + 2 ); GXV_KERN_FMT2_DATA( rowWidth ) = FT_NEXT_USHORT( p ); leftOffsetTable = FT_NEXT_USHORT( p ); rightOffsetTable = FT_NEXT_USHORT( p ); GXV_KERN_FMT2_DATA( array ) = FT_NEXT_USHORT( p ); GXV_TRACE(( "rowWidth = %d\n", GXV_KERN_FMT2_DATA( rowWidth ) )); GXV_LIMIT_CHECK( leftOffsetTable ); GXV_LIMIT_CHECK( rightOffsetTable ); GXV_LIMIT_CHECK( GXV_KERN_FMT2_DATA( array ) ); gxv_kern_subtable_fmt2_clstbl_validate( table + leftOffsetTable, limit, GXV_KERN_CLS_L, gxvalid ); gxv_kern_subtable_fmt2_clstbl_validate( table + rightOffsetTable, limit, GXV_KERN_CLS_R, gxvalid ); if ( GXV_KERN_FMT2_DATA( offset_min[GXV_KERN_CLS_L] ) + GXV_KERN_FMT2_DATA( offset_min[GXV_KERN_CLS_R] ) < GXV_KERN_FMT2_DATA( array ) ) FT_INVALID_OFFSET; gxv_odtect_add_range( table + GXV_KERN_FMT2_DATA( array ), GXV_KERN_FMT2_DATA( offset_max[GXV_KERN_CLS_L] ) + GXV_KERN_FMT2_DATA( offset_max[GXV_KERN_CLS_R] ) - GXV_KERN_FMT2_DATA( array ), "array", odtect ); gxv_odtect_validate( odtect, gxvalid ); GXV_EXIT; } /* ============================= format 3 ============================== */ static void gxv_kern_subtable_fmt3_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table + GXV_KERN_SUBTABLE_HEADER_SIZE; FT_UShort glyphCount; FT_Byte kernValueCount; FT_Byte leftClassCount; FT_Byte rightClassCount; FT_Byte flags; GXV_NAME_ENTER( "kern subtable format 3" ); GXV_LIMIT_CHECK( 2 + 1 + 1 + 1 + 1 ); glyphCount = FT_NEXT_USHORT( p ); kernValueCount = FT_NEXT_BYTE( p ); leftClassCount = FT_NEXT_BYTE( p ); rightClassCount = FT_NEXT_BYTE( p ); flags = FT_NEXT_BYTE( p ); if ( gxvalid->face->num_glyphs != glyphCount ) { GXV_TRACE(( "maxGID=%d, but glyphCount=%d\n", gxvalid->face->num_glyphs, glyphCount )); GXV_SET_ERR_IF_PARANOID( FT_INVALID_GLYPH_ID ); } if ( flags != 0 ) GXV_TRACE(( "kern subtable fmt3 has nonzero value" " (%d) in unused flag\n", flags )); /* * just skip kernValue[kernValueCount] */ GXV_LIMIT_CHECK( 2 * kernValueCount ); p += 2 * kernValueCount; /* * check leftClass[gid] < leftClassCount */ { FT_Byte min, max; GXV_LIMIT_CHECK( glyphCount ); gxv_array_getlimits_byte( p, p + glyphCount, &min, &max, gxvalid ); p += gxvalid->subtable_length; if ( leftClassCount < max ) FT_INVALID_DATA; } /* * check rightClass[gid] < rightClassCount */ { FT_Byte min, max; GXV_LIMIT_CHECK( glyphCount ); gxv_array_getlimits_byte( p, p + glyphCount, &min, &max, gxvalid ); p += gxvalid->subtable_length; if ( rightClassCount < max ) FT_INVALID_DATA; } /* * check kernIndex[i, j] < kernValueCount */ { FT_UShort i, j; for ( i = 0; i < leftClassCount; i++ ) { for ( j = 0; j < rightClassCount; j++ ) { GXV_LIMIT_CHECK( 1 ); if ( kernValueCount < FT_NEXT_BYTE( p ) ) FT_INVALID_OFFSET; } } } gxvalid->subtable_length = p - table; GXV_EXIT; } static FT_Bool gxv_kern_coverage_new_apple_validate( FT_UShort coverage, FT_UShort* format, GXV_Validator gxvalid ) { /* new Apple-dialect */ #ifdef GXV_LOAD_TRACE_VARS FT_Bool kernVertical; FT_Bool kernCrossStream; FT_Bool kernVariation; #endif FT_UNUSED( gxvalid ); /* reserved bits = 0 */ if ( coverage & 0x1FFC ) return FALSE; #ifdef GXV_LOAD_TRACE_VARS kernVertical = FT_BOOL( ( coverage >> 15 ) & 1 ); kernCrossStream = FT_BOOL( ( coverage >> 14 ) & 1 ); kernVariation = FT_BOOL( ( coverage >> 13 ) & 1 ); #endif *format = (FT_UShort)( coverage & 0x0003 ); GXV_TRACE(( "new Apple-dialect: " "horizontal=%d, cross-stream=%d, variation=%d, format=%d\n", !kernVertical, kernCrossStream, kernVariation, *format )); GXV_TRACE(( "kerning values in Apple format subtable are ignored\n" )); return TRUE; } static FT_Bool gxv_kern_coverage_classic_apple_validate( FT_UShort coverage, FT_UShort* format, GXV_Validator gxvalid ) { /* classic Apple-dialect */ #ifdef GXV_LOAD_TRACE_VARS FT_Bool horizontal; FT_Bool cross_stream; #endif /* check expected flags, but don't check if MS-dialect is impossible */ if ( !( coverage & 0xFD00 ) && KERN_ALLOWS_MS( gxvalid ) ) return FALSE; /* reserved bits = 0 */ if ( coverage & 0x02FC ) return FALSE; #ifdef GXV_LOAD_TRACE_VARS horizontal = FT_BOOL( ( coverage >> 15 ) & 1 ); cross_stream = FT_BOOL( ( coverage >> 13 ) & 1 ); #endif *format = (FT_UShort)( coverage & 0x0003 ); GXV_TRACE(( "classic Apple-dialect: " "horizontal=%d, cross-stream=%d, format=%d\n", horizontal, cross_stream, *format )); /* format 1 requires GX State Machine, too new for classic */ if ( *format == 1 ) return FALSE; GXV_TRACE(( "kerning values in Apple format subtable are ignored\n" )); return TRUE; } static FT_Bool gxv_kern_coverage_classic_microsoft_validate( FT_UShort coverage, FT_UShort* format, GXV_Validator gxvalid ) { /* classic Microsoft-dialect */ #ifdef GXV_LOAD_TRACE_VARS FT_Bool horizontal; FT_Bool minimum; FT_Bool cross_stream; FT_Bool override; #endif FT_UNUSED( gxvalid ); /* reserved bits = 0 */ if ( coverage & 0xFDF0 ) return FALSE; #ifdef GXV_LOAD_TRACE_VARS horizontal = FT_BOOL( coverage & 1 ); minimum = FT_BOOL( ( coverage >> 1 ) & 1 ); cross_stream = FT_BOOL( ( coverage >> 2 ) & 1 ); override = FT_BOOL( ( coverage >> 3 ) & 1 ); #endif *format = (FT_UShort)( ( coverage >> 8 ) & 0x0003 ); GXV_TRACE(( "classic Microsoft-dialect: " "horizontal=%d, minimum=%d, cross-stream=%d, " "override=%d, format=%d\n", horizontal, minimum, cross_stream, override, *format )); if ( *format == 2 ) GXV_TRACE(( "kerning values in Microsoft format 2 subtable are ignored\n" )); return TRUE; } /*************************************************************************/ /*************************************************************************/ /***** *****/ /***** MAIN *****/ /***** *****/ /*************************************************************************/ /*************************************************************************/ static GXV_kern_Dialect gxv_kern_coverage_validate( FT_UShort coverage, FT_UShort* format, GXV_Validator gxvalid ) { GXV_kern_Dialect result = KERN_DIALECT_UNKNOWN; GXV_NAME_ENTER( "validating coverage" ); GXV_TRACE(( "interprete coverage 0x%04x by Apple style\n", coverage )); if ( KERN_IS_NEW( gxvalid ) ) { if ( gxv_kern_coverage_new_apple_validate( coverage, format, gxvalid ) ) { result = KERN_DIALECT_APPLE; goto Exit; } } if ( KERN_IS_CLASSIC( gxvalid ) && KERN_ALLOWS_APPLE( gxvalid ) ) { if ( gxv_kern_coverage_classic_apple_validate( coverage, format, gxvalid ) ) { result = KERN_DIALECT_APPLE; goto Exit; } } if ( KERN_IS_CLASSIC( gxvalid ) && KERN_ALLOWS_MS( gxvalid ) ) { if ( gxv_kern_coverage_classic_microsoft_validate( coverage, format, gxvalid ) ) { result = KERN_DIALECT_MS; goto Exit; } } GXV_TRACE(( "cannot interprete coverage, broken kern subtable\n" )); Exit: GXV_EXIT; return result; } static void gxv_kern_subtable_validate( FT_Bytes table, FT_Bytes limit, GXV_Validator gxvalid ) { FT_Bytes p = table; #ifdef GXV_LOAD_TRACE_VARS FT_UShort version = 0; /* MS only: subtable version, unused */ #endif FT_ULong length; /* MS: 16bit, Apple: 32bit*/ FT_UShort coverage; #ifdef GXV_LOAD_TRACE_VARS FT_UShort tupleIndex = 0; /* Apple only */ #endif FT_UShort u16[2]; FT_UShort format = 255; /* subtable format */ GXV_NAME_ENTER( "kern subtable" ); GXV_LIMIT_CHECK( 2 + 2 + 2 ); u16[0] = FT_NEXT_USHORT( p ); /* Apple: length_hi MS: version */ u16[1] = FT_NEXT_USHORT( p ); /* Apple: length_lo MS: length */ coverage = FT_NEXT_USHORT( p ); switch ( gxv_kern_coverage_validate( coverage, &format, gxvalid ) ) { case KERN_DIALECT_MS: #ifdef GXV_LOAD_TRACE_VARS version = u16[0]; #endif length = u16[1]; #ifdef GXV_LOAD_TRACE_VARS tupleIndex = 0; #endif GXV_TRACE(( "Subtable version = %d\n", version )); GXV_TRACE(( "Subtable length = %d\n", length )); break; case KERN_DIALECT_APPLE: #ifdef GXV_LOAD_TRACE_VARS version = 0; #endif length = ( u16[0] << 16 ) + u16[1]; #ifdef GXV_LOAD_TRACE_VARS tupleIndex = 0; #endif GXV_TRACE(( "Subtable length = %d\n", length )); if ( KERN_IS_NEW( gxvalid ) ) { GXV_LIMIT_CHECK( 2 ); #ifdef GXV_LOAD_TRACE_VARS tupleIndex = FT_NEXT_USHORT( p ); #else p += 2; #endif GXV_TRACE(( "Subtable tupleIndex = %d\n", tupleIndex )); } break; default: length = u16[1]; GXV_TRACE(( "cannot detect subtable dialect, " "just skip %d byte\n", length )); goto Exit; } /* formats 1, 2, 3 require the position of the start of this subtable */ if ( format == 0 ) gxv_kern_subtable_fmt0_validate( table, table + length, gxvalid ); else if ( format == 1 ) gxv_kern_subtable_fmt1_validate( table, table + length, gxvalid ); else if ( format == 2 ) gxv_kern_subtable_fmt2_validate( table, table + length, gxvalid ); else if ( format == 3 ) gxv_kern_subtable_fmt3_validate( table, table + length, gxvalid ); else FT_INVALID_DATA; Exit: gxvalid->subtable_length = length; GXV_EXIT; } /*************************************************************************/ /*************************************************************************/ /***** *****/ /***** kern TABLE *****/ /***** *****/ /*************************************************************************/ /*************************************************************************/ static void gxv_kern_validate_generic( FT_Bytes table, FT_Face face, FT_Bool classic_only, GXV_kern_Dialect dialect_request, FT_Validator ftvalid ) { GXV_ValidatorRec gxvalidrec; GXV_Validator gxvalid = &gxvalidrec; GXV_kern_DataRec kernrec; GXV_kern_Data kern = &kernrec; FT_Bytes p = table; FT_Bytes limit = 0; FT_ULong nTables = 0; FT_UInt i; gxvalid->root = ftvalid; gxvalid->table_data = kern; gxvalid->face = face; FT_TRACE3(( "validating `kern' table\n" )); GXV_INIT; KERN_DIALECT( gxvalid ) = dialect_request; GXV_LIMIT_CHECK( 2 ); GXV_KERN_DATA( version ) = (GXV_kern_Version)FT_NEXT_USHORT( p ); GXV_TRACE(( "version 0x%04x (higher 16bit)\n", GXV_KERN_DATA( version ) )); if ( 0x0001 < GXV_KERN_DATA( version ) ) FT_INVALID_FORMAT; else if ( KERN_IS_CLASSIC( gxvalid ) ) { GXV_LIMIT_CHECK( 2 ); nTables = FT_NEXT_USHORT( p ); } else if ( KERN_IS_NEW( gxvalid ) ) { if ( classic_only ) FT_INVALID_FORMAT; if ( 0x0000 != FT_NEXT_USHORT( p ) ) FT_INVALID_FORMAT; GXV_LIMIT_CHECK( 4 ); nTables = FT_NEXT_ULONG( p ); } for ( i = 0; i < nTables; i++ ) { GXV_TRACE(( "validating subtable %d/%d\n", i, nTables )); /* p should be 32bit-aligned? */ gxv_kern_subtable_validate( p, 0, gxvalid ); p += gxvalid->subtable_length; } FT_TRACE4(( "\n" )); } FT_LOCAL_DEF( void ) gxv_kern_validate( FT_Bytes table, FT_Face face, FT_Validator ftvalid ) { gxv_kern_validate_generic( table, face, 0, KERN_DIALECT_ANY, ftvalid ); } FT_LOCAL_DEF( void ) gxv_kern_validate_classic( FT_Bytes table, FT_Face face, FT_Int dialect_flags, FT_Validator ftvalid ) { GXV_kern_Dialect dialect_request; dialect_request = (GXV_kern_Dialect)dialect_flags; gxv_kern_validate_generic( table, face, 1, dialect_request, ftvalid ); } /* END */
gpl-2.0
MassStash/htc_m9_kernel_sense_5.0.2
drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
915
15886
/* * Synaptics DSX touchscreen driver * * Copyright (C) 2012 Synaptics Incorporated * * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com> * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/input/synaptics_dsx_v2.h> #include "synaptics_dsx_core.h" #define PROX_PHYS_NAME "synaptics_dsx/input1" #define HOVER_Z_MAX (255) #define HOVERING_FINGER_EN (1 << 4) static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static struct device_attribute attrs[] = { __ATTR(hover_finger_en, (S_IRUGO | S_IWUGO), synaptics_rmi4_hover_finger_en_show, synaptics_rmi4_hover_finger_en_store), }; struct synaptics_rmi4_f12_query_5 { union { struct { unsigned char size_of_query6; struct { unsigned char ctrl0_is_present:1; unsigned char ctrl1_is_present:1; unsigned char ctrl2_is_present:1; unsigned char ctrl3_is_present:1; unsigned char ctrl4_is_present:1; unsigned char ctrl5_is_present:1; unsigned char ctrl6_is_present:1; unsigned char ctrl7_is_present:1; } __packed; struct { unsigned char ctrl8_is_present:1; unsigned char ctrl9_is_present:1; unsigned char ctrl10_is_present:1; unsigned char ctrl11_is_present:1; unsigned char ctrl12_is_present:1; unsigned char ctrl13_is_present:1; unsigned char ctrl14_is_present:1; unsigned char ctrl15_is_present:1; } __packed; struct { unsigned char ctrl16_is_present:1; unsigned char ctrl17_is_present:1; unsigned char ctrl18_is_present:1; unsigned char ctrl19_is_present:1; unsigned char ctrl20_is_present:1; unsigned char ctrl21_is_present:1; unsigned char ctrl22_is_present:1; unsigned char ctrl23_is_present:1; } __packed; }; unsigned char data[4]; }; }; struct synaptics_rmi4_f12_query_8 { union { struct { unsigned char size_of_query9; struct { unsigned char data0_is_present:1; unsigned char data1_is_present:1; unsigned char data2_is_present:1; unsigned char data3_is_present:1; unsigned char data4_is_present:1; unsigned char data5_is_present:1; unsigned char data6_is_present:1; unsigned char data7_is_present:1; } __packed; }; unsigned char data[2]; }; }; struct prox_finger_data { union { struct { unsigned char object_type_and_status; unsigned char x_lsb; unsigned char x_msb; unsigned char y_lsb; unsigned char y_msb; unsigned char z; } __packed; unsigned char proximity_data[6]; }; }; struct synaptics_rmi4_prox_handle { bool hover_finger_present; bool hover_finger_en; unsigned char intr_mask; unsigned short query_base_addr; unsigned short control_base_addr; unsigned short data_base_addr; unsigned short command_base_addr; unsigned short hover_finger_en_addr; unsigned short hover_finger_data_addr; struct input_dev *prox_dev; struct prox_finger_data *finger_data; struct synaptics_rmi4_data *rmi4_data; }; static struct synaptics_rmi4_prox_handle *prox; DECLARE_COMPLETION(prox_remove_complete); static void prox_hover_finger_lift(void) { input_report_key(prox->prox_dev, BTN_TOUCH, 0); input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0); input_sync(prox->prox_dev); prox->hover_finger_present = false; return; } static void prox_hover_finger_report(void) { int retval; int x; int y; int z; struct prox_finger_data *data; struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data; data = prox->finger_data; retval = synaptics_rmi4_reg_read(rmi4_data, prox->hover_finger_data_addr, data->proximity_data, sizeof(data->proximity_data)); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to read hovering finger data\n", __func__); return; } if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) { if (prox->hover_finger_present) prox_hover_finger_lift(); return; } x = (data->x_msb << 8) | (data->x_lsb); y = (data->y_msb << 8) | (data->y_lsb); z = HOVER_Z_MAX - data->z; input_report_key(prox->prox_dev, BTN_TOUCH, 0); input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1); input_report_abs(prox->prox_dev, ABS_X, x); input_report_abs(prox->prox_dev, ABS_Y, y); input_report_abs(prox->prox_dev, ABS_DISTANCE, z); input_sync(prox->prox_dev); dev_dbg(rmi4_data->pdev->dev.parent, "%s: x = %d y = %d z = %d\n", __func__, x, y, z); prox->hover_finger_present = true; return; } static int prox_set_hover_finger_en(void) { int retval; unsigned char object_report_enable; struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data; retval = synaptics_rmi4_reg_read(rmi4_data, prox->hover_finger_en_addr, &object_report_enable, sizeof(object_report_enable)); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to read from object report enable register\n", __func__); return retval; } if (prox->hover_finger_en) object_report_enable |= HOVERING_FINGER_EN; else object_report_enable &= ~HOVERING_FINGER_EN; retval = synaptics_rmi4_reg_write(rmi4_data, prox->hover_finger_en_addr, &object_report_enable, sizeof(object_report_enable)); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to write to object report enable register\n", __func__); return retval; } return 0; } static void prox_set_params(void) { input_set_abs_params(prox->prox_dev, ABS_X, 0, prox->rmi4_data->sensor_max_x, 0, 0); input_set_abs_params(prox->prox_dev, ABS_Y, 0, prox->rmi4_data->sensor_max_y, 0, 0); input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0, HOVER_Z_MAX, 0, 0); return; } static int prox_reg_init(void) { int retval; unsigned char ctrl_23_offset; unsigned char data_1_offset; struct synaptics_rmi4_f12_query_5 query_5; struct synaptics_rmi4_f12_query_8 query_8; struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data; retval = synaptics_rmi4_reg_read(rmi4_data, prox->query_base_addr + 5, query_5.data, sizeof(query_5.data)); if (retval < 0) return retval; ctrl_23_offset = query_5.ctrl0_is_present + query_5.ctrl1_is_present + query_5.ctrl2_is_present + query_5.ctrl3_is_present + query_5.ctrl4_is_present + query_5.ctrl5_is_present + query_5.ctrl6_is_present + query_5.ctrl7_is_present + query_5.ctrl8_is_present + query_5.ctrl9_is_present + query_5.ctrl10_is_present + query_5.ctrl11_is_present + query_5.ctrl12_is_present + query_5.ctrl13_is_present + query_5.ctrl14_is_present + query_5.ctrl15_is_present + query_5.ctrl16_is_present + query_5.ctrl17_is_present + query_5.ctrl18_is_present + query_5.ctrl19_is_present + query_5.ctrl20_is_present + query_5.ctrl21_is_present + query_5.ctrl22_is_present; prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset; retval = synaptics_rmi4_reg_read(rmi4_data, prox->query_base_addr + 8, query_8.data, sizeof(query_8.data)); if (retval < 0) return retval; data_1_offset = query_8.data0_is_present; prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset; return retval; } static int prox_scan_pdt(void) { int retval; unsigned char ii; unsigned char page; unsigned char intr_count = 0; unsigned char intr_off; unsigned char intr_src; unsigned short addr; struct synaptics_rmi4_fn_desc fd; struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data; for (page = 0; page < PAGES_TO_SERVICE; page++) { for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) { addr |= (page << 8); retval = synaptics_rmi4_reg_read(rmi4_data, addr, (unsigned char *)&fd, sizeof(fd)); if (retval < 0) return retval; addr &= ~(MASK_8BIT << 8); if (fd.fn_number) { dev_dbg(rmi4_data->pdev->dev.parent, "%s: Found F%02x\n", __func__, fd.fn_number); switch (fd.fn_number) { case SYNAPTICS_RMI4_F12: goto f12_found; break; } } else { break; } intr_count += (fd.intr_src_count & MASK_3BIT); } } dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to find F12\n", __func__); return -EINVAL; f12_found: prox->query_base_addr = fd.query_base_addr | (page << 8); prox->control_base_addr = fd.ctrl_base_addr | (page << 8); prox->data_base_addr = fd.data_base_addr | (page << 8); prox->command_base_addr = fd.cmd_base_addr | (page << 8); retval = prox_reg_init(); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to initialize proximity registers\n", __func__); return retval; } prox->intr_mask = 0; intr_src = fd.intr_src_count; intr_off = intr_count % 8; for (ii = intr_off; ii < ((intr_src & MASK_3BIT) + intr_off); ii++) { prox->intr_mask |= 1 << ii; } rmi4_data->intr_mask[0] |= prox->intr_mask; addr = rmi4_data->f01_ctrl_base_addr + 1; retval = synaptics_rmi4_reg_write(rmi4_data, addr, &(rmi4_data->intr_mask[0]), sizeof(rmi4_data->intr_mask[0])); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to set interrupt enable bit\n", __func__); return retval; } return 0; } static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!prox) return -ENODEV; return snprintf(buf, PAGE_SIZE, "%u\n", prox->hover_finger_en); } static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int input; struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data; if (!prox) return -ENODEV; if (sscanf(buf, "%x", &input) != 1) return -EINVAL; if (input == 1) prox->hover_finger_en = true; else if (input == 0) prox->hover_finger_en = false; else return -EINVAL; retval = prox_set_hover_finger_en(); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to change hovering finger enable setting\n", __func__); return retval; } return count; } int synaptics_rmi4_prox_hover_finger_en(bool enable) { int retval; if (!prox) return -ENODEV; prox->hover_finger_en = enable; retval = prox_set_hover_finger_en(); if (retval < 0) return retval; return 0; } EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en); static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask) { if (!prox) return; if (prox->intr_mask & intr_mask) prox_hover_finger_report(); return; } static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char attr_count; prox = kzalloc(sizeof(*prox), GFP_KERNEL); if (!prox) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to alloc mem for prox\n", __func__); retval = -ENOMEM; goto exit; } prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL); if (!prox->finger_data) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to alloc mem for finger_data\n", __func__); retval = -ENOMEM; goto exit_free_prox; } prox->rmi4_data = rmi4_data; retval = prox_scan_pdt(); if (retval < 0) goto exit_free_finger_data; prox->hover_finger_en = true; retval = prox_set_hover_finger_en(); if (retval < 0) return retval; prox->prox_dev = input_allocate_device(); if (prox->prox_dev == NULL) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to allocate proximity device\n", __func__); retval = -ENOMEM; goto exit_free_finger_data; } prox->prox_dev->name = PLATFORM_DRIVER_NAME; prox->prox_dev->phys = PROX_PHYS_NAME; prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT; prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION; prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent; input_set_drvdata(prox->prox_dev, rmi4_data); set_bit(EV_KEY, prox->prox_dev->evbit); set_bit(EV_ABS, prox->prox_dev->evbit); set_bit(BTN_TOUCH, prox->prox_dev->keybit); set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit); #ifdef INPUT_PROP_DIRECT set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit); #endif prox_set_params(); retval = input_register_device(prox->prox_dev); if (retval) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to register proximity device\n", __func__); goto exit_free_input_device; } for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); if (retval < 0) { dev_err(rmi4_data->pdev->dev.parent, "%s: Failed to create sysfs attributes\n", __func__); goto exit_free_sysfs; } } return 0; exit_free_sysfs: for (attr_count--; attr_count >= 0; attr_count--) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } input_unregister_device(prox->prox_dev); prox->prox_dev = NULL; exit_free_input_device: if (prox->prox_dev) input_free_device(prox->prox_dev); exit_free_finger_data: kfree(prox->finger_data); exit_free_prox: kfree(prox); prox = NULL; exit: return retval; } static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data) { unsigned char attr_count; if (!prox) goto exit; for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } input_unregister_device(prox->prox_dev); kfree(prox->finger_data); kfree(prox); prox = NULL; exit: complete(&prox_remove_complete); return; } static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data) { if (!prox) return; prox_hover_finger_lift(); prox_scan_pdt(); prox_set_hover_finger_en(); prox_set_params(); return; } static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data) { if (!prox) return; prox_hover_finger_lift(); prox_set_hover_finger_en(); return; } static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data) { if (!prox) return; prox_hover_finger_lift(); return; } static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data) { if (!prox) return; prox_hover_finger_lift(); return; } static struct synaptics_rmi4_exp_fn proximity_module = { .fn_type = RMI_PROXIMITY, .init = synaptics_rmi4_prox_init, .remove = synaptics_rmi4_prox_remove, .reset = synaptics_rmi4_prox_reset, .reinit = synaptics_rmi4_prox_reinit, .early_suspend = synaptics_rmi4_prox_e_suspend, .suspend = synaptics_rmi4_prox_suspend, .resume = NULL, .late_resume = NULL, .attn = synaptics_rmi4_prox_attn, }; static int __init rmi4_proximity_module_init(void) { synaptics_rmi4_dsx_new_function(&proximity_module, true); return 0; } static void __exit rmi4_proximity_module_exit(void) { synaptics_rmi4_dsx_new_function(&proximity_module, false); wait_for_completion(&prox_remove_complete); return; } module_init(rmi4_proximity_module_init); module_exit(rmi4_proximity_module_exit); MODULE_AUTHOR("Synaptics, Inc."); MODULE_DESCRIPTION("Synaptics DSX Proximity Module"); MODULE_LICENSE("GPL v2");
gpl-2.0
Huexxx/diana
drivers/staging/dream/qdsp5/adsp_lpm_verify_cmd.c
1683
2043
/* arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c * * Verificion code for aDSP LPM packets from userspace. * * Copyright (c) 2008 QUALCOMM Incorporated * Copyright (C) 2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <mach/qdsp5/qdsp5lpmcmdi.h> #include "adsp.h" int adsp_lpm_verify_cmd(struct msm_adsp_module *module, unsigned int queue_id, void *cmd_data, size_t cmd_size) { uint32_t cmd_id, col_height, input_row_incr, output_row_incr, input_size, output_size; uint32_t size_mask = 0x0fff; lpm_cmd_start *cmd; if (queue_id != QDSP_lpmCommandQueue) { printk(KERN_ERR "adsp: module %s: wrong queue id %d\n", module->name, queue_id); return -1; } cmd = (lpm_cmd_start *)cmd_data; cmd_id = cmd->cmd_id; if (cmd_id == LPM_CMD_START) { if (cmd_size != sizeof(lpm_cmd_start)) { printk(KERN_ERR "adsp: module %s: wrong size %d, expect %d\n", module->name, cmd_size, sizeof(lpm_cmd_start)); return -1; } col_height = cmd->ip_data_cfg_part1 & size_mask; input_row_incr = cmd->ip_data_cfg_part2 & size_mask; output_row_incr = cmd->op_data_cfg_part1 & size_mask; input_size = col_height * input_row_incr; output_size = col_height * output_row_incr; if ((cmd->ip_data_cfg_part4 && adsp_pmem_fixup(module, (void **)(&cmd->ip_data_cfg_part4), input_size)) || (cmd->op_data_cfg_part3 && adsp_pmem_fixup(module, (void **)(&cmd->op_data_cfg_part3), output_size))) return -1; } else if (cmd_id > 1) { printk(KERN_ERR "adsp: module %s: invalid cmd_id %d\n", module->name, cmd_id); return -1; } return 0; }
gpl-2.0
m0zes/linux
arch/powerpc/kernel/rtas_flash.c
1683
21962
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * /proc/powerpc/rtas/firmware_flash interface * * This file implements a firmware_flash interface to pump a firmware * image into the kernel. At reboot time rtas_restart() will see the * firmware image and flash it as it reboots (see rtas.c). */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/reboot.h> #include <asm/delay.h> #include <asm/uaccess.h> #include <asm/rtas.h> #define MODULE_VERS "1.0" #define MODULE_NAME "rtas_flash" #define FIRMWARE_FLASH_NAME "firmware_flash" #define FIRMWARE_UPDATE_NAME "firmware_update" #define MANAGE_FLASH_NAME "manage_flash" #define VALIDATE_FLASH_NAME "validate_flash" /* General RTAS Status Codes */ #define RTAS_RC_SUCCESS 0 #define RTAS_RC_HW_ERR -1 #define RTAS_RC_BUSY -2 /* Flash image status values */ #define FLASH_AUTH -9002 /* RTAS Not Service Authority Partition */ #define FLASH_NO_OP -1099 /* No operation initiated by user */ #define FLASH_IMG_SHORT -1005 /* Flash image shorter than expected */ #define FLASH_IMG_BAD_LEN -1004 /* Bad length value in flash list block */ #define FLASH_IMG_NULL_DATA -1003 /* Bad data value in flash list block */ #define FLASH_IMG_READY 0 /* Firmware img ready for flash on reboot */ /* Manage image status values */ #define MANAGE_AUTH -9002 /* RTAS Not Service Authority Partition */ #define MANAGE_ACTIVE_ERR -9001 /* RTAS Cannot Overwrite Active Img */ #define MANAGE_NO_OP -1099 /* No operation initiated by user */ #define MANAGE_PARAM_ERR -3 /* RTAS Parameter Error */ #define MANAGE_HW_ERR -1 /* RTAS Hardware Error */ /* Validate image status values */ #define VALIDATE_AUTH -9002 /* RTAS Not Service Authority Partition */ #define VALIDATE_NO_OP -1099 /* No operation initiated by the user */ #define VALIDATE_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */ #define VALIDATE_READY -1001 /* Firmware image ready for validation */ #define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */ #define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */ /* ibm,validate-flash-image update result tokens */ #define VALIDATE_TMP_UPDATE 0 /* T side will be updated */ #define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */ #define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */ #define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */ /* * Current T side will be committed to P side before being replace with new * image, and the new image is downlevel from current image */ #define VALIDATE_TMP_COMMIT_DL 4 /* * Current T side will be committed to P side before being replaced with new * image */ #define VALIDATE_TMP_COMMIT 5 /* * T side will be updated with a downlevel image */ #define VALIDATE_TMP_UPDATE_DL 6 /* * The candidate image's release date is later than the system's firmware * service entitlement date - service warranty period has expired */ #define VALIDATE_OUT_OF_WRNTY 7 /* ibm,manage-flash-image operation tokens */ #define RTAS_REJECT_TMP_IMG 0 #define RTAS_COMMIT_TMP_IMG 1 /* Array sizes */ #define VALIDATE_BUF_SIZE 4096 #define VALIDATE_MSG_LEN 256 #define RTAS_MSG_MAXLEN 64 /* Quirk - RTAS requires 4k list length and block size */ #define RTAS_BLKLIST_LENGTH 4096 #define RTAS_BLK_SIZE 4096 struct flash_block { char *data; unsigned long length; }; /* This struct is very similar but not identical to * that needed by the rtas flash update. * All we need to do for rtas is rewrite num_blocks * into a version/length and translate the pointers * to absolute. */ #define FLASH_BLOCKS_PER_NODE ((RTAS_BLKLIST_LENGTH - 16) / sizeof(struct flash_block)) struct flash_block_list { unsigned long num_blocks; struct flash_block_list *next; struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; }; static struct flash_block_list *rtas_firmware_flash_list; /* Use slab cache to guarantee 4k alignment */ static struct kmem_cache *flash_block_cache = NULL; #define FLASH_BLOCK_LIST_VERSION (1UL) /* * Local copy of the flash block list. * * The rtas_firmware_flash_list varable will be * set once the data is fully read. * * For convenience as we build the list we use virtual addrs, * we do not fill in the version number, and the length field * is treated as the number of entries currently in the block * (i.e. not a byte count). This is all fixed when calling * the flash routine. */ /* Status int must be first member of struct */ struct rtas_update_flash_t { int status; /* Flash update status */ struct flash_block_list *flist; /* Local copy of flash block list */ }; /* Status int must be first member of struct */ struct rtas_manage_flash_t { int status; /* Returned status */ }; /* Status int must be first member of struct */ struct rtas_validate_flash_t { int status; /* Returned status */ char *buf; /* Candidate image buffer */ unsigned int buf_size; /* Size of image buf */ unsigned int update_results; /* Update results token */ }; static struct rtas_update_flash_t rtas_update_flash_data; static struct rtas_manage_flash_t rtas_manage_flash_data; static struct rtas_validate_flash_t rtas_validate_flash_data; static DEFINE_MUTEX(rtas_update_flash_mutex); static DEFINE_MUTEX(rtas_manage_flash_mutex); static DEFINE_MUTEX(rtas_validate_flash_mutex); /* Do simple sanity checks on the flash image. */ static int flash_list_valid(struct flash_block_list *flist) { struct flash_block_list *f; int i; unsigned long block_size, image_size; /* Paranoid self test here. We also collect the image size. */ image_size = 0; for (f = flist; f; f = f->next) { for (i = 0; i < f->num_blocks; i++) { if (f->blocks[i].data == NULL) { return FLASH_IMG_NULL_DATA; } block_size = f->blocks[i].length; if (block_size <= 0 || block_size > RTAS_BLK_SIZE) { return FLASH_IMG_BAD_LEN; } image_size += block_size; } } if (image_size < (256 << 10)) { if (image_size < 2) return FLASH_NO_OP; } printk(KERN_INFO "FLASH: flash image with %ld bytes stored for hardware flash on reboot\n", image_size); return FLASH_IMG_READY; } static void free_flash_list(struct flash_block_list *f) { struct flash_block_list *next; int i; while (f) { for (i = 0; i < f->num_blocks; i++) kmem_cache_free(flash_block_cache, f->blocks[i].data); next = f->next; kmem_cache_free(flash_block_cache, f); f = next; } } static int rtas_flash_release(struct inode *inode, struct file *file) { struct rtas_update_flash_t *const uf = &rtas_update_flash_data; mutex_lock(&rtas_update_flash_mutex); if (uf->flist) { /* File was opened in write mode for a new flash attempt */ /* Clear saved list */ if (rtas_firmware_flash_list) { free_flash_list(rtas_firmware_flash_list); rtas_firmware_flash_list = NULL; } if (uf->status != FLASH_AUTH) uf->status = flash_list_valid(uf->flist); if (uf->status == FLASH_IMG_READY) rtas_firmware_flash_list = uf->flist; else free_flash_list(uf->flist); uf->flist = NULL; } mutex_unlock(&rtas_update_flash_mutex); return 0; } static size_t get_flash_status_msg(int status, char *buf) { const char *msg; size_t len; switch (status) { case FLASH_AUTH: msg = "error: this partition does not have service authority\n"; break; case FLASH_NO_OP: msg = "info: no firmware image for flash\n"; break; case FLASH_IMG_SHORT: msg = "error: flash image short\n"; break; case FLASH_IMG_BAD_LEN: msg = "error: internal error bad length\n"; break; case FLASH_IMG_NULL_DATA: msg = "error: internal error null data\n"; break; case FLASH_IMG_READY: msg = "ready: firmware image ready for flash on reboot\n"; break; default: return sprintf(buf, "error: unexpected status value %d\n", status); } len = strlen(msg); memcpy(buf, msg, len + 1); return len; } /* Reading the proc file will show status (not the firmware contents) */ static ssize_t rtas_flash_read_msg(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct rtas_update_flash_t *const uf = &rtas_update_flash_data; char msg[RTAS_MSG_MAXLEN]; size_t len; int status; mutex_lock(&rtas_update_flash_mutex); status = uf->status; mutex_unlock(&rtas_update_flash_mutex); /* Read as text message */ len = get_flash_status_msg(status, msg); return simple_read_from_buffer(buf, count, ppos, msg, len); } static ssize_t rtas_flash_read_num(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct rtas_update_flash_t *const uf = &rtas_update_flash_data; char msg[RTAS_MSG_MAXLEN]; int status; mutex_lock(&rtas_update_flash_mutex); status = uf->status; mutex_unlock(&rtas_update_flash_mutex); /* Read as number */ sprintf(msg, "%d\n", status); return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg)); } /* We could be much more efficient here. But to keep this function * simple we allocate a page to the block list no matter how small the * count is. If the system is low on memory it will be just as well * that we fail.... */ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer, size_t count, loff_t *off) { struct rtas_update_flash_t *const uf = &rtas_update_flash_data; char *p; int next_free, rc; struct flash_block_list *fl; mutex_lock(&rtas_update_flash_mutex); if (uf->status == FLASH_AUTH || count == 0) goto out; /* discard data */ /* In the case that the image is not ready for flashing, the memory * allocated for the block list will be freed upon the release of the * proc file */ if (uf->flist == NULL) { uf->flist = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL); if (!uf->flist) goto nomem; } fl = uf->flist; while (fl->next) fl = fl->next; /* seek to last block_list for append */ next_free = fl->num_blocks; if (next_free == FLASH_BLOCKS_PER_NODE) { /* Need to allocate another block_list */ fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL); if (!fl->next) goto nomem; fl = fl->next; next_free = 0; } if (count > RTAS_BLK_SIZE) count = RTAS_BLK_SIZE; p = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL); if (!p) goto nomem; if(copy_from_user(p, buffer, count)) { kmem_cache_free(flash_block_cache, p); rc = -EFAULT; goto error; } fl->blocks[next_free].data = p; fl->blocks[next_free].length = count; fl->num_blocks++; out: mutex_unlock(&rtas_update_flash_mutex); return count; nomem: rc = -ENOMEM; error: mutex_unlock(&rtas_update_flash_mutex); return rc; } /* * Flash management routines. */ static void manage_flash(struct rtas_manage_flash_t *args_buf, unsigned int op) { s32 rc; do { rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1, 1, NULL, op); } while (rtas_busy_delay(rc)); args_buf->status = rc; } static ssize_t manage_flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data; char msg[RTAS_MSG_MAXLEN]; int msglen, status; mutex_lock(&rtas_manage_flash_mutex); status = args_buf->status; mutex_unlock(&rtas_manage_flash_mutex); msglen = sprintf(msg, "%d\n", status); return simple_read_from_buffer(buf, count, ppos, msg, msglen); } static ssize_t manage_flash_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data; static const char reject_str[] = "0"; static const char commit_str[] = "1"; char stkbuf[10]; int op, rc; mutex_lock(&rtas_manage_flash_mutex); if ((args_buf->status == MANAGE_AUTH) || (count == 0)) goto out; op = -1; if (buf) { if (count > 9) count = 9; rc = -EFAULT; if (copy_from_user (stkbuf, buf, count)) goto error; if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0) op = RTAS_REJECT_TMP_IMG; else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0) op = RTAS_COMMIT_TMP_IMG; } if (op == -1) { /* buf is empty, or contains invalid string */ rc = -EINVAL; goto error; } manage_flash(args_buf, op); out: mutex_unlock(&rtas_manage_flash_mutex); return count; error: mutex_unlock(&rtas_manage_flash_mutex); return rc; } /* * Validation routines. */ static void validate_flash(struct rtas_validate_flash_t *args_buf) { int token = rtas_token("ibm,validate-flash-image"); int update_results; s32 rc; rc = 0; do { spin_lock(&rtas_data_buf_lock); memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE); rc = rtas_call(token, 2, 2, &update_results, (u32) __pa(rtas_data_buf), args_buf->buf_size); memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE); spin_unlock(&rtas_data_buf_lock); } while (rtas_busy_delay(rc)); args_buf->status = rc; args_buf->update_results = update_results; } static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, char *msg, int msglen) { int n; if (args_buf->status >= VALIDATE_TMP_UPDATE) { n = sprintf(msg, "%d\n", args_buf->update_results); if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || (args_buf->update_results == VALIDATE_TMP_UPDATE)) n += snprintf(msg + n, msglen - n, "%s\n", args_buf->buf); } else { n = sprintf(msg, "%d\n", args_buf->status); } return n; } static ssize_t validate_flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct rtas_validate_flash_t *const args_buf = &rtas_validate_flash_data; char msg[VALIDATE_MSG_LEN]; int msglen; mutex_lock(&rtas_validate_flash_mutex); msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN); mutex_unlock(&rtas_validate_flash_mutex); return simple_read_from_buffer(buf, count, ppos, msg, msglen); } static ssize_t validate_flash_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { struct rtas_validate_flash_t *const args_buf = &rtas_validate_flash_data; int rc; mutex_lock(&rtas_validate_flash_mutex); /* We are only interested in the first 4K of the * candidate image */ if ((*off >= VALIDATE_BUF_SIZE) || (args_buf->status == VALIDATE_AUTH)) { *off += count; mutex_unlock(&rtas_validate_flash_mutex); return count; } if (*off + count >= VALIDATE_BUF_SIZE) { count = VALIDATE_BUF_SIZE - *off; args_buf->status = VALIDATE_READY; } else { args_buf->status = VALIDATE_INCOMPLETE; } if (!access_ok(VERIFY_READ, buf, count)) { rc = -EFAULT; goto done; } if (copy_from_user(args_buf->buf + *off, buf, count)) { rc = -EFAULT; goto done; } *off += count; rc = count; done: mutex_unlock(&rtas_validate_flash_mutex); return rc; } static int validate_flash_release(struct inode *inode, struct file *file) { struct rtas_validate_flash_t *const args_buf = &rtas_validate_flash_data; mutex_lock(&rtas_validate_flash_mutex); if (args_buf->status == VALIDATE_READY) { args_buf->buf_size = VALIDATE_BUF_SIZE; validate_flash(args_buf); } mutex_unlock(&rtas_validate_flash_mutex); return 0; } /* * On-reboot flash update applicator. */ static void rtas_flash_firmware(int reboot_type) { unsigned long image_size; struct flash_block_list *f, *next, *flist; unsigned long rtas_block_list; int i, status, update_token; if (rtas_firmware_flash_list == NULL) return; /* nothing to do */ if (reboot_type != SYS_RESTART) { printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n"); printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n"); return; } update_token = rtas_token("ibm,update-flash-64-and-reboot"); if (update_token == RTAS_UNKNOWN_SERVICE) { printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot " "is not available -- not a service partition?\n"); printk(KERN_ALERT "FLASH: firmware will not be flashed\n"); return; } /* * Just before starting the firmware flash, cancel the event scan work * to avoid any soft lockup issues. */ rtas_cancel_event_scan(); /* * NOTE: the "first" block must be under 4GB, so we create * an entry with no data blocks in the reserved buffer in * the kernel data segment. */ spin_lock(&rtas_data_buf_lock); flist = (struct flash_block_list *)&rtas_data_buf[0]; flist->num_blocks = 0; flist->next = rtas_firmware_flash_list; rtas_block_list = __pa(flist); if (rtas_block_list >= 4UL*1024*1024*1024) { printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); spin_unlock(&rtas_data_buf_lock); return; } printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n"); /* Update the block_list in place. */ rtas_firmware_flash_list = NULL; /* too hard to backout on error */ image_size = 0; for (f = flist; f; f = next) { /* Translate data addrs to absolute */ for (i = 0; i < f->num_blocks; i++) { f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data)); image_size += f->blocks[i].length; f->blocks[i].length = cpu_to_be64(f->blocks[i].length); } next = f->next; /* Don't translate NULL pointer for last entry */ if (f->next) f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next)); else f->next = NULL; /* make num_blocks into the version/length field */ f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); f->num_blocks = cpu_to_be64(f->num_blocks); } printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); printk(KERN_ALERT "FLASH: performing flash and reboot\n"); rtas_progress("Flashing \n", 0x0); rtas_progress("Please Wait... ", 0x0); printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n"); status = rtas_call(update_token, 1, 1, NULL, rtas_block_list); switch (status) { /* should only get "bad" status */ case 0: printk(KERN_ALERT "FLASH: success\n"); break; case -1: printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n"); break; case -3: printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n"); break; case -4: printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n"); break; default: printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status); break; } spin_unlock(&rtas_data_buf_lock); } /* * Manifest of proc files to create */ struct rtas_flash_file { const char *filename; const char *rtas_call_name; int *status; const struct file_operations fops; }; static const struct rtas_flash_file rtas_flash_files[] = { { .filename = "powerpc/rtas/" FIRMWARE_FLASH_NAME, .rtas_call_name = "ibm,update-flash-64-and-reboot", .status = &rtas_update_flash_data.status, .fops.read = rtas_flash_read_msg, .fops.write = rtas_flash_write, .fops.release = rtas_flash_release, .fops.llseek = default_llseek, }, { .filename = "powerpc/rtas/" FIRMWARE_UPDATE_NAME, .rtas_call_name = "ibm,update-flash-64-and-reboot", .status = &rtas_update_flash_data.status, .fops.read = rtas_flash_read_num, .fops.write = rtas_flash_write, .fops.release = rtas_flash_release, .fops.llseek = default_llseek, }, { .filename = "powerpc/rtas/" VALIDATE_FLASH_NAME, .rtas_call_name = "ibm,validate-flash-image", .status = &rtas_validate_flash_data.status, .fops.read = validate_flash_read, .fops.write = validate_flash_write, .fops.release = validate_flash_release, .fops.llseek = default_llseek, }, { .filename = "powerpc/rtas/" MANAGE_FLASH_NAME, .rtas_call_name = "ibm,manage-flash-image", .status = &rtas_manage_flash_data.status, .fops.read = manage_flash_read, .fops.write = manage_flash_write, .fops.llseek = default_llseek, } }; static int __init rtas_flash_init(void) { int i; if (rtas_token("ibm,update-flash-64-and-reboot") == RTAS_UNKNOWN_SERVICE) { pr_info("rtas_flash: no firmware flash support\n"); return -EINVAL; } rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); if (!rtas_validate_flash_data.buf) return -ENOMEM; flash_block_cache = kmem_cache_create("rtas_flash_cache", RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, NULL); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", __func__); goto enomem_buf; } for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) { const struct rtas_flash_file *f = &rtas_flash_files[i]; int token; if (!proc_create(f->filename, S_IRUSR | S_IWUSR, NULL, &f->fops)) goto enomem; /* * This code assumes that the status int is the first member of the * struct */ token = rtas_token(f->rtas_call_name); if (token == RTAS_UNKNOWN_SERVICE) *f->status = FLASH_AUTH; else *f->status = FLASH_NO_OP; } rtas_flash_term_hook = rtas_flash_firmware; return 0; enomem: while (--i >= 0) { const struct rtas_flash_file *f = &rtas_flash_files[i]; remove_proc_entry(f->filename, NULL); } kmem_cache_destroy(flash_block_cache); enomem_buf: kfree(rtas_validate_flash_data.buf); return -ENOMEM; } static void __exit rtas_flash_cleanup(void) { int i; rtas_flash_term_hook = NULL; if (rtas_firmware_flash_list) { free_flash_list(rtas_firmware_flash_list); rtas_firmware_flash_list = NULL; } for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) { const struct rtas_flash_file *f = &rtas_flash_files[i]; remove_proc_entry(f->filename, NULL); } kmem_cache_destroy(flash_block_cache); kfree(rtas_validate_flash_data.buf); } module_init(rtas_flash_init); module_exit(rtas_flash_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
PennPanda/litmus-rt
drivers/usb/serial/symbolserial.c
1939
5161
/* * Symbol USB barcode to serial driver * * Copyright (C) 2013 Johan Hovold <jhovold@gmail.com> * Copyright (C) 2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (C) 2009 Novell Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> static const struct usb_device_id id_table[] = { { USB_DEVICE(0x05e0, 0x0600) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); struct symbol_private { spinlock_t lock; /* protects the following flags */ bool throttled; bool actually_throttled; }; static void symbol_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct symbol_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; int status = urb->status; int result; int data_length; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); if (urb->actual_length > 1) { data_length = urb->actual_length - 1; /* * Data from the device comes with a 1 byte header: * * <size of data>data... * This is real data to be sent to the tty layer * we pretty much just ignore the size and send everything * else to the tty layer. */ tty_insert_flip_string(&port->port, &data[1], data_length); tty_flip_buffer_push(&port->port); } else { dev_dbg(&port->dev, "Improper amount of data received from the device, " "%d bytes", urb->actual_length); } exit: spin_lock(&priv->lock); /* Continue trying to always read if we should */ if (!priv->throttled) { result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); } else priv->actually_throttled = true; spin_unlock(&priv->lock); } static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port) { struct symbol_private *priv = usb_get_serial_data(port->serial); unsigned long flags; int result = 0; spin_lock_irqsave(&priv->lock, flags); priv->throttled = false; priv->actually_throttled = false; spin_unlock_irqrestore(&priv->lock, flags); /* Start reading from the device */ result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); return result; } static void symbol_close(struct usb_serial_port *port) { usb_kill_urb(port->interrupt_in_urb); } static void symbol_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct symbol_private *priv = usb_get_serial_data(port->serial); spin_lock_irq(&priv->lock); priv->throttled = true; spin_unlock_irq(&priv->lock); } static void symbol_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct symbol_private *priv = usb_get_serial_data(port->serial); int result; bool was_throttled; spin_lock_irq(&priv->lock); priv->throttled = false; was_throttled = priv->actually_throttled; priv->actually_throttled = false; spin_unlock_irq(&priv->lock); if (was_throttled) { result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) dev_err(&port->dev, "%s - failed submitting read urb, error %d\n", __func__, result); } } static int symbol_startup(struct usb_serial *serial) { if (!serial->num_interrupt_in) { dev_err(&serial->dev->dev, "no interrupt-in endpoint\n"); return -ENODEV; } return 0; } static int symbol_port_probe(struct usb_serial_port *port) { struct symbol_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); usb_set_serial_port_data(port, priv); return 0; } static int symbol_port_remove(struct usb_serial_port *port) { struct symbol_private *priv = usb_get_serial_port_data(port); kfree(priv); return 0; } static struct usb_serial_driver symbol_device = { .driver = { .owner = THIS_MODULE, .name = "symbol", }, .id_table = id_table, .num_ports = 1, .attach = symbol_startup, .port_probe = symbol_port_probe, .port_remove = symbol_port_remove, .open = symbol_open, .close = symbol_close, .throttle = symbol_throttle, .unthrottle = symbol_unthrottle, .read_int_callback = symbol_int_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &symbol_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_LICENSE("GPL");
gpl-2.0
lenovo-yt2-dev/android_kernel_lenovo_baytrail
arch/arm/mach-at91/at91x40.c
2195
2218
/* * arch/arm/mach-at91/at91x40.c * * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com> * Copyright (C) 2005 SAN People * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/proc-fns.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include <mach/at91x40.h> #include <mach/at91_st.h> #include <mach/timex.h> #include "at91_aic.h" #include "generic.h" /* * Export the clock functions for the AT91X40. Some external code common * to all AT91 family parts relys on this, like the gpio and serial support. */ int clk_enable(struct clk *clk) { return 0; } void clk_disable(struct clk *clk) { } unsigned long clk_get_rate(struct clk *clk) { return AT91X40_MASTER_CLOCK; } static void at91x40_idle(void) { /* * Disable the processor clock. The processor will be automatically * re-enabled by an interrupt or by a reset. */ __raw_writel(AT91_PS_CR_CPU, AT91_IO_P2V(AT91_PS_CR)); cpu_do_idle(); } void __init at91x40_initialize(unsigned long main_clock) { arm_pm_idle = at91x40_idle; at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1) | (1 << AT91X40_ID_IRQ2); } /* * The default interrupt priority levels (0 = lowest, 7 = highest). */ static unsigned int at91x40_default_irq_priority[NR_AIC_IRQS] __initdata = { 7, /* Advanced Interrupt Controller (FIQ) */ 0, /* System Peripherals */ 0, /* USART 0 */ 0, /* USART 1 */ 2, /* Timer Counter 0 */ 2, /* Timer Counter 1 */ 2, /* Timer Counter 2 */ 0, /* Watchdog timer */ 0, /* Parallel IO Controller A */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ 0, /* External IRQ0 */ 0, /* External IRQ1 */ 0, /* External IRQ2 */ }; void __init at91x40_init_interrupts(unsigned int priority[NR_AIC_IRQS]) { if (!priority) priority = at91x40_default_irq_priority; at91_aic_init(priority, at91_extern_irq); }
gpl-2.0
djvoleur/V_920P_BOF7
drivers/mtd/nand/xway_nand.c
3731
5376
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright © 2012 John Crispin <blogic@openwrt.org> */ #include <linux/mtd/nand.h> #include <linux/of_gpio.h> #include <linux/of_platform.h> #include <lantiq_soc.h> /* nand registers */ #define EBU_ADDSEL1 0x24 #define EBU_NAND_CON 0xB0 #define EBU_NAND_WAIT 0xB4 #define EBU_NAND_ECC0 0xB8 #define EBU_NAND_ECC_AC 0xBC /* nand commands */ #define NAND_CMD_ALE (1 << 2) #define NAND_CMD_CLE (1 << 3) #define NAND_CMD_CS (1 << 4) #define NAND_WRITE_CMD_RESET 0xff #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) #define NAND_WRITE_DATA (NAND_CMD_CS) #define NAND_READ_DATA (NAND_CMD_CS) #define NAND_WAIT_WR_C (1 << 3) #define NAND_WAIT_RD (0x1) /* we need to tel the ebu which addr we mapped the nand to */ #define ADDSEL1_MASK(x) (x << 4) #define ADDSEL1_REGEN 1 /* we need to tell the EBU that we have nand attached and set it up properly */ #define BUSCON1_SETUP (1 << 22) #define BUSCON1_BCGEN_RES (0x3 << 12) #define BUSCON1_WAITWRC2 (2 << 8) #define BUSCON1_WAITRDC2 (2 << 6) #define BUSCON1_HOLDC1 (1 << 4) #define BUSCON1_RECOVC1 (1 << 2) #define BUSCON1_CMULT4 1 #define NAND_CON_CE (1 << 20) #define NAND_CON_OUT_CS1 (1 << 10) #define NAND_CON_IN_CS1 (1 << 8) #define NAND_CON_PRE_P (1 << 7) #define NAND_CON_WP_P (1 << 6) #define NAND_CON_SE_P (1 << 5) #define NAND_CON_CS_P (1 << 4) #define NAND_CON_CSMUX (1 << 1) #define NAND_CON_NANDM 1 static void xway_reset_chip(struct nand_chip *chip) { unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; unsigned long flags; nandaddr &= ~NAND_WRITE_ADDR; nandaddr |= NAND_WRITE_CMD; /* finish with a reset */ spin_lock_irqsave(&ebu_lock, flags); writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) ; spin_unlock_irqrestore(&ebu_lock, flags); } static void xway_select_chip(struct mtd_info *mtd, int chip) { switch (chip) { case -1: ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); break; case 0: ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); break; default: BUG(); } } static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; unsigned long flags; if (ctrl & NAND_CTRL_CHANGE) { nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); if (ctrl & NAND_CLE) nandaddr |= NAND_WRITE_CMD; else nandaddr |= NAND_WRITE_ADDR; this->IO_ADDR_W = (void __iomem *) nandaddr; } if (cmd != NAND_CMD_NONE) { spin_lock_irqsave(&ebu_lock, flags); writeb(cmd, this->IO_ADDR_W); while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) ; spin_unlock_irqrestore(&ebu_lock, flags); } } static int xway_dev_ready(struct mtd_info *mtd) { return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD; } static unsigned char xway_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; unsigned long flags; int ret; spin_lock_irqsave(&ebu_lock, flags); ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); spin_unlock_irqrestore(&ebu_lock, flags); return ret; } static int xway_nand_probe(struct platform_device *pdev) { struct nand_chip *this = platform_get_drvdata(pdev); unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; const __be32 *cs = of_get_property(pdev->dev.of_node, "lantiq,cs", NULL); u32 cs_flag = 0; /* load our CS from the DT. Either we find a valid 1 or default to 0 */ if (cs && (*cs == 1)) cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; /* setup the EBU to run in NAND mode on our base addr */ ltq_ebu_w32(CPHYSADDR(nandaddr) | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | cs_flag, EBU_NAND_CON); /* finish with a reset */ xway_reset_chip(this); return 0; } /* allow users to override the partition in DT using the cmdline */ static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL }; static struct platform_nand_data xway_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 30, .part_probe_types = part_probes, }, .ctrl = { .probe = xway_nand_probe, .cmd_ctrl = xway_cmd_ctrl, .dev_ready = xway_dev_ready, .select_chip = xway_select_chip, .read_byte = xway_read_byte, } }; /* * Try to find the node inside the DT. If it is available attach out * platform_nand_data */ static int __init xway_register_nand(void) { struct device_node *node; struct platform_device *pdev; node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); if (!node) return -ENOENT; pdev = of_find_device_by_node(node); if (!pdev) return -EINVAL; pdev->dev.platform_data = &xway_nand_data; of_node_put(node); return 0; } subsys_initcall(xway_register_nand);
gpl-2.0
mopplayer/OK6410-Kernel4.1.4-With-Ubuntu9.04
drivers/hid/usbhid/hiddev.c
3731
22710
/* * Copyright (c) 2001 Paul Stewart * Copyright (c) 2001 Vojtech Pavlik * * HID char devices, giving access to raw HID device events. * */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to Paul Stewart <stewart@wetlogic.net> */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/hiddev.h> #include <linux/compat.h> #include <linux/vmalloc.h> #include "usbhid.h" #ifdef CONFIG_USB_DYNAMIC_MINORS #define HIDDEV_MINOR_BASE 0 #define HIDDEV_MINORS 256 #else #define HIDDEV_MINOR_BASE 96 #define HIDDEV_MINORS 16 #endif #define HIDDEV_BUFFER_SIZE 2048 struct hiddev { int exist; int open; struct mutex existancelock; wait_queue_head_t wait; struct hid_device *hid; struct list_head list; spinlock_t list_lock; }; struct hiddev_list { struct hiddev_usage_ref buffer[HIDDEV_BUFFER_SIZE]; int head; int tail; unsigned flags; struct fasync_struct *fasync; struct hiddev *hiddev; struct list_head node; struct mutex thread_lock; }; /* * Find a report, given the report's type and ID. The ID can be specified * indirectly by REPORT_ID_FIRST (which returns the first report of the given * type) or by (REPORT_ID_NEXT | old_id), which returns the next report of the * given type which follows old_id. */ static struct hid_report * hiddev_lookup_report(struct hid_device *hid, struct hiddev_report_info *rinfo) { unsigned int flags = rinfo->report_id & ~HID_REPORT_ID_MASK; unsigned int rid = rinfo->report_id & HID_REPORT_ID_MASK; struct hid_report_enum *report_enum; struct hid_report *report; struct list_head *list; if (rinfo->report_type < HID_REPORT_TYPE_MIN || rinfo->report_type > HID_REPORT_TYPE_MAX) return NULL; report_enum = hid->report_enum + (rinfo->report_type - HID_REPORT_TYPE_MIN); switch (flags) { case 0: /* Nothing to do -- report_id is already set correctly */ break; case HID_REPORT_ID_FIRST: if (list_empty(&report_enum->report_list)) return NULL; list = report_enum->report_list.next; report = list_entry(list, struct hid_report, list); rinfo->report_id = report->id; break; case HID_REPORT_ID_NEXT: report = report_enum->report_id_hash[rid]; if (!report) return NULL; list = report->list.next; if (list == &report_enum->report_list) return NULL; report = list_entry(list, struct hid_report, list); rinfo->report_id = report->id; break; default: return NULL; } return report_enum->report_id_hash[rinfo->report_id]; } /* * Perform an exhaustive search of the report table for a usage, given its * type and usage id. */ static struct hid_field * hiddev_lookup_usage(struct hid_device *hid, struct hiddev_usage_ref *uref) { int i, j; struct hid_report *report; struct hid_report_enum *report_enum; struct hid_field *field; if (uref->report_type < HID_REPORT_TYPE_MIN || uref->report_type > HID_REPORT_TYPE_MAX) return NULL; report_enum = hid->report_enum + (uref->report_type - HID_REPORT_TYPE_MIN); list_for_each_entry(report, &report_enum->report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) { if (field->usage[j].hid == uref->usage_code) { uref->report_id = report->id; uref->field_index = i; uref->usage_index = j; return field; } } } } return NULL; } static void hiddev_send_event(struct hid_device *hid, struct hiddev_usage_ref *uref) { struct hiddev *hiddev = hid->hiddev; struct hiddev_list *list; unsigned long flags; spin_lock_irqsave(&hiddev->list_lock, flags); list_for_each_entry(list, &hiddev->list, node) { if (uref->field_index != HID_FIELD_INDEX_NONE || (list->flags & HIDDEV_FLAG_REPORT) != 0) { list->buffer[list->head] = *uref; list->head = (list->head + 1) & (HIDDEV_BUFFER_SIZE - 1); kill_fasync(&list->fasync, SIGIO, POLL_IN); } } spin_unlock_irqrestore(&hiddev->list_lock, flags); wake_up_interruptible(&hiddev->wait); } /* * This is where hid.c calls into hiddev to pass an event that occurred over * the interrupt pipe */ void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { unsigned type = field->report_type; struct hiddev_usage_ref uref; uref.report_type = (type == HID_INPUT_REPORT) ? HID_REPORT_TYPE_INPUT : ((type == HID_OUTPUT_REPORT) ? HID_REPORT_TYPE_OUTPUT : ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE : 0)); uref.report_id = field->report->id; uref.field_index = field->index; uref.usage_index = (usage - field->usage); uref.usage_code = usage->hid; uref.value = value; hiddev_send_event(hid, &uref); } EXPORT_SYMBOL_GPL(hiddev_hid_event); void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { unsigned type = report->type; struct hiddev_usage_ref uref; memset(&uref, 0, sizeof(uref)); uref.report_type = (type == HID_INPUT_REPORT) ? HID_REPORT_TYPE_INPUT : ((type == HID_OUTPUT_REPORT) ? HID_REPORT_TYPE_OUTPUT : ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE : 0)); uref.report_id = report->id; uref.field_index = HID_FIELD_INDEX_NONE; hiddev_send_event(hid, &uref); } /* * fasync file op */ static int hiddev_fasync(int fd, struct file *file, int on) { struct hiddev_list *list = file->private_data; return fasync_helper(fd, file, on, &list->fasync); } /* * release file op */ static int hiddev_release(struct inode * inode, struct file * file) { struct hiddev_list *list = file->private_data; unsigned long flags; spin_lock_irqsave(&list->hiddev->list_lock, flags); list_del(&list->node); spin_unlock_irqrestore(&list->hiddev->list_lock, flags); mutex_lock(&list->hiddev->existancelock); if (!--list->hiddev->open) { if (list->hiddev->exist) { usbhid_close(list->hiddev->hid); usbhid_put_power(list->hiddev->hid); } else { mutex_unlock(&list->hiddev->existancelock); kfree(list->hiddev); vfree(list); return 0; } } mutex_unlock(&list->hiddev->existancelock); vfree(list); return 0; } /* * open file op */ static int hiddev_open(struct inode *inode, struct file *file) { struct hiddev_list *list; struct usb_interface *intf; struct hid_device *hid; struct hiddev *hiddev; int res; intf = usbhid_find_interface(iminor(inode)); if (!intf) return -ENODEV; hid = usb_get_intfdata(intf); hiddev = hid->hiddev; if (!(list = vzalloc(sizeof(struct hiddev_list)))) return -ENOMEM; mutex_init(&list->thread_lock); list->hiddev = hiddev; file->private_data = list; /* * no need for locking because the USB major number * is shared which usbcore guards against disconnect */ if (list->hiddev->exist) { if (!list->hiddev->open++) { res = usbhid_open(hiddev->hid); if (res < 0) { res = -EIO; goto bail; } } } else { res = -ENODEV; goto bail; } spin_lock_irq(&list->hiddev->list_lock); list_add_tail(&list->node, &hiddev->list); spin_unlock_irq(&list->hiddev->list_lock); mutex_lock(&hiddev->existancelock); if (!list->hiddev->open++) if (list->hiddev->exist) { struct hid_device *hid = hiddev->hid; res = usbhid_get_power(hid); if (res < 0) { res = -EIO; goto bail_unlock; } usbhid_open(hid); } mutex_unlock(&hiddev->existancelock); return 0; bail_unlock: mutex_unlock(&hiddev->existancelock); bail: file->private_data = NULL; vfree(list); return res; } /* * "write" file op */ static ssize_t hiddev_write(struct file * file, const char __user * buffer, size_t count, loff_t *ppos) { return -EINVAL; } /* * "read" file op */ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos) { DEFINE_WAIT(wait); struct hiddev_list *list = file->private_data; int event_size; int retval; event_size = ((list->flags & HIDDEV_FLAG_UREF) != 0) ? sizeof(struct hiddev_usage_ref) : sizeof(struct hiddev_event); if (count < event_size) return 0; /* lock against other threads */ retval = mutex_lock_interruptible(&list->thread_lock); if (retval) return -ERESTARTSYS; while (retval == 0) { if (list->head == list->tail) { prepare_to_wait(&list->hiddev->wait, &wait, TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (!list->hiddev->exist) { retval = -EIO; break; } if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } /* let O_NONBLOCK tasks run */ mutex_unlock(&list->thread_lock); schedule(); if (mutex_lock_interruptible(&list->thread_lock)) { finish_wait(&list->hiddev->wait, &wait); return -EINTR; } set_current_state(TASK_INTERRUPTIBLE); } finish_wait(&list->hiddev->wait, &wait); } if (retval) { mutex_unlock(&list->thread_lock); return retval; } while (list->head != list->tail && retval + event_size <= count) { if ((list->flags & HIDDEV_FLAG_UREF) == 0) { if (list->buffer[list->tail].field_index != HID_FIELD_INDEX_NONE) { struct hiddev_event event; event.hid = list->buffer[list->tail].usage_code; event.value = list->buffer[list->tail].value; if (copy_to_user(buffer + retval, &event, sizeof(struct hiddev_event))) { mutex_unlock(&list->thread_lock); return -EFAULT; } retval += sizeof(struct hiddev_event); } } else { if (list->buffer[list->tail].field_index != HID_FIELD_INDEX_NONE || (list->flags & HIDDEV_FLAG_REPORT) != 0) { if (copy_to_user(buffer + retval, list->buffer + list->tail, sizeof(struct hiddev_usage_ref))) { mutex_unlock(&list->thread_lock); return -EFAULT; } retval += sizeof(struct hiddev_usage_ref); } } list->tail = (list->tail + 1) & (HIDDEV_BUFFER_SIZE - 1); } } mutex_unlock(&list->thread_lock); return retval; } /* * "poll" file op * No kernel lock - fine */ static unsigned int hiddev_poll(struct file *file, poll_table *wait) { struct hiddev_list *list = file->private_data; poll_wait(file, &list->hiddev->wait, wait); if (list->head != list->tail) return POLLIN | POLLRDNORM; if (!list->hiddev->exist) return POLLERR | POLLHUP; return 0; } /* * "ioctl" file op */ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg) { struct hid_device *hid = hiddev->hid; struct hiddev_report_info rinfo; struct hiddev_usage_ref_multi *uref_multi = NULL; struct hiddev_usage_ref *uref; struct hid_report *report; struct hid_field *field; int i; uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); if (!uref_multi) return -ENOMEM; uref = &uref_multi->uref; if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { if (copy_from_user(uref_multi, user_arg, sizeof(*uref_multi))) goto fault; } else { if (copy_from_user(uref, user_arg, sizeof(*uref))) goto fault; } switch (cmd) { case HIDIOCGUCODE: rinfo.report_type = uref->report_type; rinfo.report_id = uref->report_id; if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL) goto inval; if (uref->field_index >= report->maxfield) goto inval; field = report->field[uref->field_index]; if (uref->usage_index >= field->maxusage) goto inval; uref->usage_code = field->usage[uref->usage_index].hid; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; default: if (cmd != HIDIOCGUSAGE && cmd != HIDIOCGUSAGES && uref->report_type == HID_REPORT_TYPE_INPUT) goto inval; if (uref->report_id == HID_REPORT_ID_UNKNOWN) { field = hiddev_lookup_usage(hid, uref); if (field == NULL) goto inval; } else { rinfo.report_type = uref->report_type; rinfo.report_id = uref->report_id; if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL) goto inval; if (uref->field_index >= report->maxfield) goto inval; field = report->field[uref->field_index]; if (cmd == HIDIOCGCOLLECTIONINDEX) { if (uref->usage_index >= field->maxusage) goto inval; } else if (uref->usage_index >= field->report_count) goto inval; else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && (uref_multi->num_values > HID_MAX_MULTI_USAGES || uref->usage_index + uref_multi->num_values > field->report_count)) goto inval; } switch (cmd) { case HIDIOCGUSAGE: uref->value = field->value[uref->usage_index]; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; case HIDIOCSUSAGE: field->value[uref->usage_index] = uref->value; goto goodreturn; case HIDIOCGCOLLECTIONINDEX: i = field->usage[uref->usage_index].collection_index; kfree(uref_multi); return i; case HIDIOCGUSAGES: for (i = 0; i < uref_multi->num_values; i++) uref_multi->values[i] = field->value[uref->usage_index + i]; if (copy_to_user(user_arg, uref_multi, sizeof(*uref_multi))) goto fault; goto goodreturn; case HIDIOCSUSAGES: for (i = 0; i < uref_multi->num_values; i++) field->value[uref->usage_index + i] = uref_multi->values[i]; goto goodreturn; } goodreturn: kfree(uref_multi); return 0; fault: kfree(uref_multi); return -EFAULT; inval: kfree(uref_multi); return -EINVAL; } } static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg) { struct hid_device *hid = hiddev->hid; struct usb_device *dev = hid_to_usb_dev(hid); int idx, len; char *buf; if (get_user(idx, (int __user *)user_arg)) return -EFAULT; if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL) return -ENOMEM; if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) { kfree(buf); return -EINVAL; } if (copy_to_user(user_arg+sizeof(int), buf, len+1)) { kfree(buf); return -EFAULT; } kfree(buf); return len; } static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hiddev_list *list = file->private_data; struct hiddev *hiddev = list->hiddev; struct hid_device *hid; struct hiddev_collection_info cinfo; struct hiddev_report_info rinfo; struct hiddev_field_info finfo; struct hiddev_devinfo dinfo; struct hid_report *report; struct hid_field *field; void __user *user_arg = (void __user *)arg; int i, r = -EINVAL; /* Called without BKL by compat methods so no BKL taken */ mutex_lock(&hiddev->existancelock); if (!hiddev->exist) { r = -ENODEV; goto ret_unlock; } hid = hiddev->hid; switch (cmd) { case HIDIOCGVERSION: r = put_user(HID_VERSION, (int __user *)arg) ? -EFAULT : 0; break; case HIDIOCAPPLICATION: if (arg >= hid->maxapplication) break; for (i = 0; i < hid->maxcollection; i++) if (hid->collection[i].type == HID_COLLECTION_APPLICATION && arg-- == 0) break; if (i < hid->maxcollection) r = hid->collection[i].usage; break; case HIDIOCGDEVINFO: { struct usb_device *dev = hid_to_usb_dev(hid); struct usbhid_device *usbhid = hid->driver_data; memset(&dinfo, 0, sizeof(dinfo)); dinfo.bustype = BUS_USB; dinfo.busnum = dev->bus->busnum; dinfo.devnum = dev->devnum; dinfo.ifnum = usbhid->ifnum; dinfo.vendor = le16_to_cpu(dev->descriptor.idVendor); dinfo.product = le16_to_cpu(dev->descriptor.idProduct); dinfo.version = le16_to_cpu(dev->descriptor.bcdDevice); dinfo.num_applications = hid->maxapplication; r = copy_to_user(user_arg, &dinfo, sizeof(dinfo)) ? -EFAULT : 0; break; } case HIDIOCGFLAG: r = put_user(list->flags, (int __user *)arg) ? -EFAULT : 0; break; case HIDIOCSFLAG: { int newflags; if (get_user(newflags, (int __user *)arg)) { r = -EFAULT; break; } if ((newflags & ~HIDDEV_FLAGS) != 0 || ((newflags & HIDDEV_FLAG_REPORT) != 0 && (newflags & HIDDEV_FLAG_UREF) == 0)) break; list->flags = newflags; r = 0; break; } case HIDIOCGSTRING: r = hiddev_ioctl_string(hiddev, cmd, user_arg); break; case HIDIOCINITREPORT: usbhid_init_reports(hid); r = 0; break; case HIDIOCGREPORT: if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) { r = -EFAULT; break; } if (rinfo.report_type == HID_REPORT_TYPE_OUTPUT) break; report = hiddev_lookup_report(hid, &rinfo); if (report == NULL) break; hid_hw_request(hid, report, HID_REQ_GET_REPORT); hid_hw_wait(hid); r = 0; break; case HIDIOCSREPORT: if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) { r = -EFAULT; break; } if (rinfo.report_type == HID_REPORT_TYPE_INPUT) break; report = hiddev_lookup_report(hid, &rinfo); if (report == NULL) break; hid_hw_request(hid, report, HID_REQ_SET_REPORT); hid_hw_wait(hid); r = 0; break; case HIDIOCGREPORTINFO: if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) { r = -EFAULT; break; } report = hiddev_lookup_report(hid, &rinfo); if (report == NULL) break; rinfo.num_fields = report->maxfield; r = copy_to_user(user_arg, &rinfo, sizeof(rinfo)) ? -EFAULT : 0; break; case HIDIOCGFIELDINFO: if (copy_from_user(&finfo, user_arg, sizeof(finfo))) { r = -EFAULT; break; } rinfo.report_type = finfo.report_type; rinfo.report_id = finfo.report_id; report = hiddev_lookup_report(hid, &rinfo); if (report == NULL) break; if (finfo.field_index >= report->maxfield) break; field = report->field[finfo.field_index]; memset(&finfo, 0, sizeof(finfo)); finfo.report_type = rinfo.report_type; finfo.report_id = rinfo.report_id; finfo.field_index = field->report_count - 1; finfo.maxusage = field->maxusage; finfo.flags = field->flags; finfo.physical = field->physical; finfo.logical = field->logical; finfo.application = field->application; finfo.logical_minimum = field->logical_minimum; finfo.logical_maximum = field->logical_maximum; finfo.physical_minimum = field->physical_minimum; finfo.physical_maximum = field->physical_maximum; finfo.unit_exponent = field->unit_exponent; finfo.unit = field->unit; r = copy_to_user(user_arg, &finfo, sizeof(finfo)) ? -EFAULT : 0; break; case HIDIOCGUCODE: /* fall through */ case HIDIOCGUSAGE: case HIDIOCSUSAGE: case HIDIOCGUSAGES: case HIDIOCSUSAGES: case HIDIOCGCOLLECTIONINDEX: r = hiddev_ioctl_usage(hiddev, cmd, user_arg); break; case HIDIOCGCOLLECTIONINFO: if (copy_from_user(&cinfo, user_arg, sizeof(cinfo))) { r = -EFAULT; break; } if (cinfo.index >= hid->maxcollection) break; cinfo.type = hid->collection[cinfo.index].type; cinfo.usage = hid->collection[cinfo.index].usage; cinfo.level = hid->collection[cinfo.index].level; r = copy_to_user(user_arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0; break; default: if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ) break; if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGNAME(0))) { int len = strlen(hid->name) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); r = copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGPHYS(0))) { int len = strlen(hid->phys) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); r = copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; break; } } ret_unlock: mutex_unlock(&hiddev->existancelock); return r; } #ifdef CONFIG_COMPAT static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); } #endif static const struct file_operations hiddev_fops = { .owner = THIS_MODULE, .read = hiddev_read, .write = hiddev_write, .poll = hiddev_poll, .open = hiddev_open, .release = hiddev_release, .unlocked_ioctl = hiddev_ioctl, .fasync = hiddev_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = hiddev_compat_ioctl, #endif .llseek = noop_llseek, }; static char *hiddev_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } static struct usb_class_driver hiddev_class = { .name = "hiddev%d", .devnode = hiddev_devnode, .fops = &hiddev_fops, .minor_base = HIDDEV_MINOR_BASE, }; /* * This is where hid.c calls us to connect a hid device to the hiddev driver */ int hiddev_connect(struct hid_device *hid, unsigned int force) { struct hiddev *hiddev; struct usbhid_device *usbhid = hid->driver_data; int retval; if (!force) { unsigned int i; for (i = 0; i < hid->maxcollection; i++) if (hid->collection[i].type == HID_COLLECTION_APPLICATION && !IS_INPUT_APPLICATION(hid->collection[i].usage)) break; if (i == hid->maxcollection) return -1; } if (!(hiddev = kzalloc(sizeof(struct hiddev), GFP_KERNEL))) return -1; init_waitqueue_head(&hiddev->wait); INIT_LIST_HEAD(&hiddev->list); spin_lock_init(&hiddev->list_lock); mutex_init(&hiddev->existancelock); hid->hiddev = hiddev; hiddev->hid = hid; hiddev->exist = 1; retval = usb_register_dev(usbhid->intf, &hiddev_class); if (retval) { hid_err(hid, "Not able to get a minor for this device\n"); hid->hiddev = NULL; kfree(hiddev); return -1; } return 0; } /* * This is where hid.c calls us to disconnect a hiddev device from the * corresponding hid device (usually because the usb device has disconnected) */ static struct usb_class_driver hiddev_class; void hiddev_disconnect(struct hid_device *hid) { struct hiddev *hiddev = hid->hiddev; struct usbhid_device *usbhid = hid->driver_data; usb_deregister_dev(usbhid->intf, &hiddev_class); mutex_lock(&hiddev->existancelock); hiddev->exist = 0; if (hiddev->open) { mutex_unlock(&hiddev->existancelock); usbhid_close(hiddev->hid); wake_up_interruptible(&hiddev->wait); } else { mutex_unlock(&hiddev->existancelock); kfree(hiddev); } }
gpl-2.0
Vitronic-GmbH/HwdZynqLinux
drivers/media/usb/gspca/m5602/m5602_core.c
4243
10662
/* * USB Driver for ALi m5602 based webcams * * Copyright (C) 2008 Erik Andrén * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "m5602_ov9650.h" #include "m5602_ov7660.h" #include "m5602_mt9m111.h" #include "m5602_po1030.h" #include "m5602_s5k83a.h" #include "m5602_s5k4aa.h" /* Kernel module parameters */ int force_sensor; static bool dump_bridge; bool dump_sensor; static const struct usb_device_id m5602_table[] = { {USB_DEVICE(0x0402, 0x5602)}, {} }; MODULE_DEVICE_TABLE(usb, m5602_table); /* Reads a byte from the m5602 */ int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *) sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, 0x14, 0x8100 + address, buf, 1, M5602_URB_MSG_TIMEOUT); *i2c_data = buf[0]; PDEBUG(D_CONF, "Reading bridge register 0x%x containing 0x%x", address, *i2c_data); /* usb_control_msg(...) returns the number of bytes sent upon success, mask that and return zero instead*/ return (err < 0) ? err : 0; } /* Writes a byte to the m5602 */ int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *) sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; PDEBUG(D_CONF, "Writing bridge register 0x%x with 0x%x", address, i2c_data); memcpy(buf, bridge_urb_skeleton, sizeof(bridge_urb_skeleton)); buf[1] = address; buf[3] = i2c_data; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x19, 0x0000, buf, 4, M5602_URB_MSG_TIMEOUT); /* usb_control_msg(...) returns the number of bytes sent upon success, mask that and return zero instead */ return (err < 0) ? err : 0; } static int m5602_wait_for_i2c(struct sd *sd) { int err; u8 data; do { err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, &data); } while ((data & I2C_BUSY) && !err); return err; } int m5602_read_sensor(struct sd *sd, const u8 address, u8 *i2c_data, const u8 len) { int err, i; struct gspca_dev *gspca_dev = (struct gspca_dev *) sd; if (!len || len > sd->sensor->i2c_regW) return -EINVAL; err = m5602_wait_for_i2c(sd); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR, sd->sensor->i2c_slave_id); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address); if (err < 0) return err; /* Sensors with registers that are of only one byte width are differently read */ /* FIXME: This works with the ov9650, but has issues with the po1030 */ if (sd->sensor->i2c_regW == 1) { err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 1); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08); } else { err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len); } for (i = 0; (i < len) && !err; i++) { err = m5602_wait_for_i2c(sd); if (err < 0) return err; err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i])); PDEBUG(D_CONF, "Reading sensor register " "0x%x containing 0x%x ", address, *i2c_data); } return err; } int m5602_write_sensor(struct sd *sd, const u8 address, u8 *i2c_data, const u8 len) { int err, i; u8 *p; struct gspca_dev *gspca_dev = (struct gspca_dev *) sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; /* No sensor with a data width larger than 16 bits has yet been seen */ if (len > sd->sensor->i2c_regW || !len) return -EINVAL; memcpy(buf, sensor_urb_skeleton, sizeof(sensor_urb_skeleton)); buf[11] = sd->sensor->i2c_slave_id; buf[15] = address; /* Special case larger sensor writes */ p = buf + 16; /* Copy a four byte write sequence for each byte to be written to */ for (i = 0; i < len; i++) { memcpy(p, sensor_urb_skeleton + 16, 4); p[3] = i2c_data[i]; p += 4; PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x", address, i2c_data[i]); } /* Copy the tailer */ memcpy(p, sensor_urb_skeleton + 20, 4); /* Set the total length */ p[3] = 0x10 + len; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x19, 0x0000, buf, 20 + len * 4, M5602_URB_MSG_TIMEOUT); return (err < 0) ? err : 0; } /* Dump all the registers of the m5602 bridge, unfortunately this breaks the camera until it's power cycled */ static void m5602_dump_bridge(struct sd *sd) { int i; for (i = 0; i < 0x80; i++) { unsigned char val = 0; m5602_read_bridge(sd, i, &val); pr_info("ALi m5602 address 0x%x contains 0x%x\n", i, val); } pr_info("Warning: The ALi m5602 webcam probably won't work until it's power cycled\n"); } static int m5602_probe_sensor(struct sd *sd) { /* Try the po1030 */ sd->sensor = &po1030; if (!sd->sensor->probe(sd)) return 0; /* Try the mt9m111 sensor */ sd->sensor = &mt9m111; if (!sd->sensor->probe(sd)) return 0; /* Try the s5k4aa */ sd->sensor = &s5k4aa; if (!sd->sensor->probe(sd)) return 0; /* Try the ov9650 */ sd->sensor = &ov9650; if (!sd->sensor->probe(sd)) return 0; /* Try the ov7660 */ sd->sensor = &ov7660; if (!sd->sensor->probe(sd)) return 0; /* Try the s5k83a */ sd->sensor = &s5k83a; if (!sd->sensor->probe(sd)) return 0; /* More sensor probe function goes here */ pr_info("Failed to find a sensor\n"); sd->sensor = NULL; return -ENODEV; } static int m5602_configure(struct gspca_dev *gspca_dev, const struct usb_device_id *id); static int m5602_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err; PDEBUG(D_CONF, "Initializing ALi m5602 webcam"); /* Run the init sequence */ err = sd->sensor->init(sd); return err; } static int m5602_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->sensor->init_controls) return 0; return sd->sensor->init_controls(sd); } static int m5602_start_transfer(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 *buf = sd->gspca_dev.usb_buf; int err; /* Send start command to the camera */ const u8 buffer[4] = {0x13, 0xf9, 0x0f, 0x01}; if (sd->sensor->start) sd->sensor->start(sd); memcpy(buf, buffer, sizeof(buffer)); err = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x04, 0x40, 0x19, 0x0000, buf, sizeof(buffer), M5602_URB_MSG_TIMEOUT); PDEBUG(D_STREAM, "Transfer started"); return (err < 0) ? err : 0; } static void m5602_urb_complete(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; if (len < 6) { PDEBUG(D_PACK, "Packet is less than 6 bytes"); return; } /* Frame delimiter: ff xx xx xx ff ff */ if (data[0] == 0xff && data[4] == 0xff && data[5] == 0xff && data[2] != sd->frame_id) { PDEBUG(D_FRAM, "Frame delimiter detected"); sd->frame_id = data[2]; /* Remove the extra fluff appended on each header */ data += 6; len -= 6; /* Complete the last frame (if any) */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); sd->frame_count++; /* Create a new frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); PDEBUG(D_FRAM, "Starting new frame %d", sd->frame_count); } else { int cur_frame_len; cur_frame_len = gspca_dev->image_len; /* Remove urb header */ data += 4; len -= 4; if (cur_frame_len + len <= gspca_dev->frsz) { PDEBUG(D_FRAM, "Continuing frame %d copying %d bytes", sd->frame_count, len); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } else { /* Add the remaining data up to frame size */ gspca_frame_add(gspca_dev, INTER_PACKET, data, gspca_dev->frsz - cur_frame_len); } } } static void m5602_stop_transfer(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* Run the sensor specific end transfer sequence */ if (sd->sensor->stop) sd->sensor->stop(sd); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = m5602_configure, .init = m5602_init, .init_controls = m5602_init_controls, .start = m5602_start_transfer, .stopN = m5602_stop_transfer, .pkt_scan = m5602_urb_complete }; /* this function is called at probe time */ static int m5602_configure(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; int err; cam = &gspca_dev->cam; if (dump_bridge) m5602_dump_bridge(sd); /* Probe sensor */ err = m5602_probe_sensor(sd); if (err) goto fail; return 0; fail: PERR("ALi m5602 webcam failed"); cam->cam_mode = NULL; cam->nmodes = 0; return err; } static int m5602_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static void m5602_disconnect(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor->disconnect) sd->sensor->disconnect(sd); gspca_disconnect(intf); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = m5602_table, .probe = m5602_probe, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif .disconnect = m5602_disconnect }; module_usb_driver(sd_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(force_sensor, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(force_sensor, "forces detection of a sensor, " "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, " "4 = MT9M111, 5 = PO1030, 6 = OV7660"); module_param(dump_bridge, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup"); module_param(dump_sensor, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers " "at startup providing a sensor is found");
gpl-2.0
Nyks45/Veno-M
arch/mn10300/mm/dma-alloc.c
4243
2052
/* MN10300 Dynamic DMA mapping support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * Derived from: arch/i386/kernel/pci-dma.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> #include <linux/export.h> #include <asm/io.h> static unsigned long pci_sram_allocated = 0xbc000000; void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) { unsigned long addr; void *ret; pr_debug("dma_alloc_coherent(%s,%zu,%x)\n", dev ? dev_name(dev) : "?", size, gfp); if (0xbe000000 - pci_sram_allocated >= size) { size = (size + 255) & ~255; addr = pci_sram_allocated; pci_sram_allocated += size; ret = (void *) addr; goto done; } /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) gfp |= GFP_DMA; addr = __get_free_pages(gfp, get_order(size)); if (!addr) return NULL; /* map the coherent memory through the uncached memory window */ ret = (void *) (addr | 0x20000000); /* fill the memory with obvious rubbish */ memset((void *) addr, 0xfb, size); /* write back and evict all cache lines covering this region */ mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE); done: *dma_handle = virt_to_bus((void *) addr); printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle); return ret; } EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned long addr = (unsigned long) vaddr & ~0x20000000; if (addr >= 0x9c000000) return; free_pages(addr, get_order(size)); } EXPORT_SYMBOL(dma_free_coherent);
gpl-2.0
GustavoRD78/78Kernel-ZL-233
fs/sysfs/mount.c
4499
4561
/* * fs/sysfs/symlink.c - operations for initializing and mounting sysfs * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. * * Please see Documentation/filesystems/sysfs.txt for more information. */ #define DEBUG #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/module.h> #include <linux/magic.h> #include <linux/slab.h> #include "sysfs.h" static struct vfsmount *sysfs_mnt; struct kmem_cache *sysfs_dir_cachep; static const struct super_operations sysfs_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .evict_inode = sysfs_evict_inode, }; struct sysfs_dirent sysfs_root = { .s_name = "", .s_count = ATOMIC_INIT(1), .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT), .s_mode = S_IFDIR | S_IRUGO | S_IXUGO, .s_ino = 1, }; static int sysfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = SYSFS_MAGIC; sb->s_op = &sysfs_ops; sb->s_time_gran = 1; /* get root inode, initialize and unlock it */ mutex_lock(&sysfs_mutex); inode = sysfs_get_inode(sb, &sysfs_root); mutex_unlock(&sysfs_mutex); if (!inode) { pr_debug("sysfs: could not get root inode\n"); return -ENOMEM; } /* instantiate and link root dentry */ root = d_make_root(inode); if (!root) { pr_debug("%s: could not get root dentry!\n",__func__); return -ENOMEM; } root->d_fsdata = &sysfs_root; sb->s_root = root; return 0; } static int sysfs_test_super(struct super_block *sb, void *data) { struct sysfs_super_info *sb_info = sysfs_info(sb); struct sysfs_super_info *info = data; enum kobj_ns_type type; int found = 1; for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) { if (sb_info->ns[type] != info->ns[type]) found = 0; } return found; } static int sysfs_set_super(struct super_block *sb, void *data) { int error; error = set_anon_super(sb, data); if (!error) sb->s_fs_info = data; return error; } static void free_sysfs_super_info(struct sysfs_super_info *info) { int type; for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) kobj_ns_drop(type, info->ns[type]); kfree(info); } static struct dentry *sysfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct sysfs_super_info *info; enum kobj_ns_type type; struct super_block *sb; int error; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) info->ns[type] = kobj_ns_grab_current(type); sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info); if (IS_ERR(sb) || sb->s_fs_info != info) free_sysfs_super_info(info); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { sb->s_flags = flags; error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (error) { deactivate_locked_super(sb); return ERR_PTR(error); } sb->s_flags |= MS_ACTIVE; } return dget(sb->s_root); } static void sysfs_kill_sb(struct super_block *sb) { struct sysfs_super_info *info = sysfs_info(sb); /* Remove the superblock from fs_supers/s_instances * so we can't find it, before freeing sysfs_super_info. */ kill_anon_super(sb); free_sysfs_super_info(info); } static struct file_system_type sysfs_fs_type = { .name = "sysfs", .mount = sysfs_mount, .kill_sb = sysfs_kill_sb, }; int __init sysfs_init(void) { int err = -ENOMEM; sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache", sizeof(struct sysfs_dirent), 0, 0, NULL); if (!sysfs_dir_cachep) goto out; err = sysfs_inode_init(); if (err) goto out_err; err = register_filesystem(&sysfs_fs_type); if (!err) { sysfs_mnt = kern_mount(&sysfs_fs_type); if (IS_ERR(sysfs_mnt)) { printk(KERN_ERR "sysfs: could not mount!\n"); err = PTR_ERR(sysfs_mnt); sysfs_mnt = NULL; unregister_filesystem(&sysfs_fs_type); goto out_err; } } else goto out_err; out: return err; out_err: kmem_cache_destroy(sysfs_dir_cachep); sysfs_dir_cachep = NULL; goto out; } #undef sysfs_get struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd) { return __sysfs_get(sd); } EXPORT_SYMBOL_GPL(sysfs_get); #undef sysfs_put void sysfs_put(struct sysfs_dirent *sd) { __sysfs_put(sd); } EXPORT_SYMBOL_GPL(sysfs_put);
gpl-2.0
dummie999/android_kernel_htc_z4u
drivers/net/wireless/orinoco/wext.c
5267
34859
/* Wireless extensions support. * * See copyright notice in main.c */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "hermes.h" #include "hermes_rid.h" #include "orinoco.h" #include "hw.h" #include "mic.h" #include "scan.h" #include "main.h" #include "wext.h" #define MAX_RID_LEN 1024 /* Helper routine to record keys * It is called under orinoco_lock so it may not sleep */ static int orinoco_set_key(struct orinoco_private *priv, int index, enum orinoco_alg alg, const u8 *key, int key_len, const u8 *seq, int seq_len) { kzfree(priv->keys[index].key); kzfree(priv->keys[index].seq); if (key_len) { priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC); if (!priv->keys[index].key) goto nomem; } else priv->keys[index].key = NULL; if (seq_len) { priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC); if (!priv->keys[index].seq) goto free_key; } else priv->keys[index].seq = NULL; priv->keys[index].key_len = key_len; priv->keys[index].seq_len = seq_len; if (key_len) memcpy(priv->keys[index].key, key, key_len); if (seq_len) memcpy(priv->keys[index].seq, seq, seq_len); switch (alg) { case ORINOCO_ALG_TKIP: priv->keys[index].cipher = WLAN_CIPHER_SUITE_TKIP; break; case ORINOCO_ALG_WEP: priv->keys[index].cipher = (key_len > SMALL_KEY_SIZE) ? WLAN_CIPHER_SUITE_WEP104 : WLAN_CIPHER_SUITE_WEP40; break; case ORINOCO_ALG_NONE: default: priv->keys[index].cipher = 0; break; } return 0; free_key: kfree(priv->keys[index].key); priv->keys[index].key = NULL; nomem: priv->keys[index].key_len = 0; priv->keys[index].seq_len = 0; priv->keys[index].cipher = 0; return -ENOMEM; } static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; struct iw_statistics *wstats = &priv->wstats; int err; unsigned long flags; if (!netif_device_present(dev)) { printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n", dev->name); return NULL; /* FIXME: Can we do better than this? */ } /* If busy, return the old stats. Returning NULL may cause * the interface to disappear from /proc/net/wireless */ if (orinoco_lock(priv, &flags) != 0) return wstats; /* We can't really wait for the tallies inquiry command to * complete, so we just use the previous results and trigger * a new tallies inquiry command for next time - Jean II */ /* FIXME: Really we should wait for the inquiry to come back - * as it is the stats we give don't make a whole lot of sense. * Unfortunately, it's not clear how to do that within the * wireless extensions framework: I think we're in user * context, but a lock seems to be held by the time we get in * here so we're not safe to sleep here. */ hermes_inquire(hw, HERMES_INQ_TALLIES); if (priv->iw_mode == NL80211_IFTYPE_ADHOC) { memset(&wstats->qual, 0, sizeof(wstats->qual)); /* If a spy address is defined, we report stats of the * first spy address - Jean II */ if (SPY_NUMBER(priv)) { wstats->qual.qual = priv->spy_data.spy_stat[0].qual; wstats->qual.level = priv->spy_data.spy_stat[0].level; wstats->qual.noise = priv->spy_data.spy_stat[0].noise; wstats->qual.updated = priv->spy_data.spy_stat[0].updated; } } else { struct { __le16 qual, signal, noise, unused; } __packed cq; err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_COMMSQUALITY, &cq); if (!err) { wstats->qual.qual = (int)le16_to_cpu(cq.qual); wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95; wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; } } orinoco_unlock(priv, &flags); return wstats; } /********************************************************************/ /* Wireless extensions */ /********************************************************************/ static int orinoco_ioctl_setwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Enable automatic roaming - no sanity checks are needed */ if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 || memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) { priv->bssid_fixed = 0; memset(priv->desired_bssid, 0, ETH_ALEN); /* "off" means keep existing connection */ if (ap_addr->sa_data[0] == 0) { __orinoco_hw_set_wap(priv); err = 0; } goto out; } if (priv->firmware_type == FIRMWARE_TYPE_AGERE) { printk(KERN_WARNING "%s: Lucent/Agere firmware doesn't " "support manual roaming\n", dev->name); err = -EOPNOTSUPP; goto out; } if (priv->iw_mode != NL80211_IFTYPE_STATION) { printk(KERN_WARNING "%s: Manual roaming supported only in " "managed mode\n", dev->name); err = -EOPNOTSUPP; goto out; } /* Intersil firmware hangs without Desired ESSID */ if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL && strlen(priv->desired_essid) == 0) { printk(KERN_WARNING "%s: Desired ESSID must be set for " "manual roaming\n", dev->name); err = -EOPNOTSUPP; goto out; } /* Finally, enable manual roaming */ priv->bssid_fixed = 1; memcpy(priv->desired_bssid, &ap_addr->sa_data, ETH_ALEN); out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int err = 0; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; ap_addr->sa_family = ARPHRD_ETHER; err = orinoco_hw_get_current_bssid(priv, ap_addr->sa_data); orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *keybuf) { struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; int setindex = priv->tx_key; enum orinoco_alg encode_alg = priv->encode_alg; int restricted = priv->wep_restrict; int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; if (!priv->has_wep) return -EOPNOTSUPP; if (erq->pointer) { /* We actually have a key to set - check its length */ if (erq->length > LARGE_KEY_SIZE) return -E2BIG; if ((erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep) return -E2BIG; } if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Clear any TKIP key we have */ if ((priv->has_wpa) && (priv->encode_alg == ORINOCO_ALG_TKIP)) (void) orinoco_clear_tkip_key(priv, setindex); if (erq->length > 0) { if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; /* Switch on WEP if off */ if (encode_alg != ORINOCO_ALG_WEP) { setindex = index; encode_alg = ORINOCO_ALG_WEP; } } else { /* Important note : if the user do "iwconfig eth0 enc off", * we will arrive there with an index of -1. This is valid * but need to be taken care off... Jean II */ if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) { if ((index != -1) || (erq->flags == 0)) { err = -EINVAL; goto out; } } else { /* Set the index : Check that the key is valid */ if (priv->keys[index].key_len == 0) { err = -EINVAL; goto out; } setindex = index; } } if (erq->flags & IW_ENCODE_DISABLED) encode_alg = ORINOCO_ALG_NONE; if (erq->flags & IW_ENCODE_OPEN) restricted = 0; if (erq->flags & IW_ENCODE_RESTRICTED) restricted = 1; if (erq->pointer && erq->length > 0) { err = orinoco_set_key(priv, index, ORINOCO_ALG_WEP, keybuf, erq->length, NULL, 0); } priv->tx_key = setindex; /* Try fast key change if connected and only keys are changed */ if ((priv->encode_alg == encode_alg) && (priv->wep_restrict == restricted) && netif_carrier_ok(dev)) { err = __orinoco_hw_setup_wepkeys(priv); /* No need to commit if successful */ goto out; } priv->encode_alg = encode_alg; priv->wep_restrict = restricted; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getiwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *keybuf) { struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; unsigned long flags; if (!priv->has_wep) return -EOPNOTSUPP; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; erq->flags = 0; if (!priv->encode_alg) erq->flags |= IW_ENCODE_DISABLED; erq->flags |= index + 1; if (priv->wep_restrict) erq->flags |= IW_ENCODE_RESTRICTED; else erq->flags |= IW_ENCODE_OPEN; erq->length = priv->keys[index].key_len; memcpy(keybuf, priv->keys[index].key, erq->length); orinoco_unlock(priv, &flags); return 0; } static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *essidbuf) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it * anyway... - Jean II */ /* Hum... Should not use Wireless Extension constant (may change), * should use our own... - Jean II */ if (erq->length > IW_ESSID_MAX_SIZE) return -E2BIG; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* NULL the string (for NULL termination & ESSID = ANY) - Jean II */ memset(priv->desired_essid, 0, sizeof(priv->desired_essid)); /* If not ANY, get the new ESSID */ if (erq->flags) memcpy(priv->desired_essid, essidbuf, erq->length); orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_getessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *essidbuf) { struct orinoco_private *priv = ndev_priv(dev); int active; int err = 0; unsigned long flags; if (netif_running(dev)) { err = orinoco_hw_get_essid(priv, &active, essidbuf); if (err < 0) return err; erq->length = err; } else { if (orinoco_lock(priv, &flags) != 0) return -EBUSY; memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE); erq->length = strlen(priv->desired_essid); orinoco_unlock(priv, &flags); } erq->flags = 1; return 0; } static int orinoco_ioctl_setfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *frq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int chan = -1; unsigned long flags; int err = -EINPROGRESS; /* Call commit handler */ /* In infrastructure mode the AP sets the channel */ if (priv->iw_mode == NL80211_IFTYPE_STATION) return -EBUSY; if ((frq->e == 0) && (frq->m <= 1000)) { /* Setting by channel number */ chan = frq->m; } else { /* Setting by frequency */ int denom = 1; int i; /* Calculate denominator to rescale to MHz */ for (i = 0; i < (6 - frq->e); i++) denom *= 10; chan = ieee80211_freq_to_dsss_chan(frq->m / denom); } if ((chan < 1) || (chan > NUM_CHANNELS) || !(priv->channel_mask & (1 << (chan - 1)))) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->channel = chan; if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { /* Fast channel change - no commit if successful */ struct hermes *hw = &priv->hw; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_SET_CHANNEL, chan, NULL); } orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *frq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int tmp; /* Locking done in there */ tmp = orinoco_hw_get_freq(priv); if (tmp < 0) return tmp; frq->m = tmp * 100000; frq->e = 1; return 0; } static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_request_info *info, struct iw_param *srq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; u16 val; int err; unsigned long flags; if (!priv->has_sensitivity) return -EOPNOTSUPP; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &val); orinoco_unlock(priv, &flags); if (err) return err; srq->value = val; srq->fixed = 0; /* auto */ return 0; } static int orinoco_ioctl_setsens(struct net_device *dev, struct iw_request_info *info, struct iw_param *srq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int val = srq->value; unsigned long flags; if (!priv->has_sensitivity) return -EOPNOTSUPP; if ((val < 1) || (val > 3)) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->ap_density = val; orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_setrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int ratemode; int bitrate; /* 100s of kilobits */ unsigned long flags; /* As the user space doesn't know our highest rate, it uses -1 * to ask us to set the highest rate. Test it using "iwconfig * ethX rate auto" - Jean II */ if (rrq->value == -1) bitrate = 110; else { if (rrq->value % 100000) return -EINVAL; bitrate = rrq->value / 100000; } ratemode = orinoco_get_bitratemode(bitrate, !rrq->fixed); if (ratemode == -1) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->bitratemode = ratemode; orinoco_unlock(priv, &flags); return -EINPROGRESS; } static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int err = 0; int bitrate, automatic; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; orinoco_get_ratemode_cfg(priv->bitratemode, &bitrate, &automatic); /* If the interface is running we try to find more about the current mode */ if (netif_running(dev)) { int act_bitrate; int lerr; /* Ignore errors if we can't get the actual bitrate */ lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate); if (!lerr) bitrate = act_bitrate; } orinoco_unlock(priv, &flags); rrq->value = bitrate; rrq->fixed = !automatic; rrq->disabled = 0; return err; } static int orinoco_ioctl_setpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *prq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (prq->disabled) { priv->pm_on = 0; } else { switch (prq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: priv->pm_mcast = 0; priv->pm_on = 1; break; case IW_POWER_ALL_R: priv->pm_mcast = 1; priv->pm_on = 1; break; case IW_POWER_ON: /* No flags : but we may have a value - Jean II */ break; default: err = -EINVAL; goto out; } if (prq->flags & IW_POWER_TIMEOUT) { priv->pm_on = 1; priv->pm_timeout = prq->value / 1000; } if (prq->flags & IW_POWER_PERIOD) { priv->pm_on = 1; priv->pm_period = prq->value / 1000; } /* It's valid to not have a value if we are just toggling * the flags... Jean II */ if (!priv->pm_on) { err = -EINVAL; goto out; } } out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *prq, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; int err = 0; u16 enable, period, timeout, mcast; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, &enable); if (err) goto out; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, &period); if (err) goto out; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &timeout); if (err) goto out; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, &mcast); if (err) goto out; prq->disabled = !enable; /* Note : by default, display the period */ if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { prq->flags = IW_POWER_TIMEOUT; prq->value = timeout * 1000; } else { prq->flags = IW_POWER_PERIOD; prq->value = period * 1000; } if (mcast) prq->flags |= IW_POWER_ALL_R; else prq->flags |= IW_POWER_UNICAST_R; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_set_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, alg = ext->alg, set_key = 1; unsigned long flags; int err = -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Determine and validate the key index */ idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if ((idx < 1) || (idx > 4)) goto out; idx--; } else idx = priv->tx_key; if (encoding->flags & IW_ENCODE_DISABLED) alg = IW_ENCODE_ALG_NONE; if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) { /* Clear any TKIP TX key we had */ (void) orinoco_clear_tkip_key(priv, priv->tx_key); } if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { priv->tx_key = idx; set_key = ((alg == IW_ENCODE_ALG_TKIP) || (ext->key_len > 0)) ? 1 : 0; } if (set_key) { /* Set the requested key first */ switch (alg) { case IW_ENCODE_ALG_NONE: priv->encode_alg = ORINOCO_ALG_NONE; err = orinoco_set_key(priv, idx, ORINOCO_ALG_NONE, NULL, 0, NULL, 0); break; case IW_ENCODE_ALG_WEP: if (ext->key_len <= 0) goto out; priv->encode_alg = ORINOCO_ALG_WEP; err = orinoco_set_key(priv, idx, ORINOCO_ALG_WEP, ext->key, ext->key_len, NULL, 0); break; case IW_ENCODE_ALG_TKIP: { u8 *tkip_iv = NULL; if (!priv->has_wpa || (ext->key_len > sizeof(struct orinoco_tkip_key))) goto out; priv->encode_alg = ORINOCO_ALG_TKIP; if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) tkip_iv = &ext->rx_seq[0]; err = orinoco_set_key(priv, idx, ORINOCO_ALG_TKIP, ext->key, ext->key_len, tkip_iv, ORINOCO_SEQ_LEN); err = __orinoco_hw_set_tkip_key(priv, idx, ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, priv->keys[idx].key, tkip_iv, ORINOCO_SEQ_LEN, NULL, 0); if (err) printk(KERN_ERR "%s: Error %d setting TKIP key" "\n", dev->name, err); goto out; } default: goto out; } } err = -EINPROGRESS; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_get_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, max_key_len; unsigned long flags; int err; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = -EINVAL; max_key_len = encoding->length - sizeof(*ext); if (max_key_len < 0) goto out; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if ((idx < 1) || (idx > 4)) goto out; idx--; } else idx = priv->tx_key; encoding->flags = idx + 1; memset(ext, 0, sizeof(*ext)); switch (priv->encode_alg) { case ORINOCO_ALG_NONE: ext->alg = IW_ENCODE_ALG_NONE; ext->key_len = 0; encoding->flags |= IW_ENCODE_DISABLED; break; case ORINOCO_ALG_WEP: ext->alg = IW_ENCODE_ALG_WEP; ext->key_len = min(priv->keys[idx].key_len, max_key_len); memcpy(ext->key, priv->keys[idx].key, ext->key_len); encoding->flags |= IW_ENCODE_ENABLED; break; case ORINOCO_ALG_TKIP: ext->alg = IW_ENCODE_ALG_TKIP; ext->key_len = min(priv->keys[idx].key_len, max_key_len); memcpy(ext->key, priv->keys[idx].key, ext->key_len); encoding->flags |= IW_ENCODE_ENABLED; break; } err = 0; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; struct iw_param *param = &wrqu->param; unsigned long flags; int ret = -EINPROGRESS; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_RX_UNENCRYPTED_EAPOL: case IW_AUTH_PRIVACY_INVOKED: case IW_AUTH_DROP_UNENCRYPTED: /* * orinoco does not use these parameters */ break; case IW_AUTH_MFP: /* Management Frame Protection not supported. * Only fail if set to required. */ if (param->value == IW_AUTH_MFP_REQUIRED) ret = -EINVAL; break; case IW_AUTH_KEY_MGMT: /* wl_lkm implies value 2 == PSK for Hermes I * which ties in with WEXT * no other hints tho :( */ priv->key_mgmt = param->value; break; case IW_AUTH_TKIP_COUNTERMEASURES: /* When countermeasures are enabled, shut down the * card; when disabled, re-enable the card. This must * take effect immediately. * * TODO: Make sure that the EAPOL message is getting * out before card disabled */ if (param->value) { priv->tkip_cm_active = 1; ret = hermes_disable_port(hw, 0); } else { priv->tkip_cm_active = 0; ret = hermes_enable_port(hw, 0); } break; case IW_AUTH_80211_AUTH_ALG: if (param->value & IW_AUTH_ALG_SHARED_KEY) priv->wep_restrict = 1; else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) priv->wep_restrict = 0; else ret = -EINVAL; break; case IW_AUTH_WPA_ENABLED: if (priv->has_wpa) { priv->wpa_enabled = param->value ? 1 : 0; } else { if (param->value) ret = -EOPNOTSUPP; /* else silently accept disable of WPA */ priv->wpa_enabled = 0; } break; default: ret = -EOPNOTSUPP; } orinoco_unlock(priv, &flags); return ret; } static int orinoco_ioctl_get_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_param *param = &wrqu->param; unsigned long flags; int ret = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_KEY_MGMT: param->value = priv->key_mgmt; break; case IW_AUTH_TKIP_COUNTERMEASURES: param->value = priv->tkip_cm_active; break; case IW_AUTH_80211_AUTH_ALG: if (priv->wep_restrict) param->value = IW_AUTH_ALG_SHARED_KEY; else param->value = IW_AUTH_ALG_OPEN_SYSTEM; break; case IW_AUTH_WPA_ENABLED: param->value = priv->wpa_enabled; break; default: ret = -EOPNOTSUPP; } orinoco_unlock(priv, &flags); return ret; } static int orinoco_ioctl_set_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); u8 *buf; unsigned long flags; /* cut off at IEEE80211_MAX_DATA_LEN */ if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || (wrqu->data.length && (extra == NULL))) return -EINVAL; if (wrqu->data.length) { buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); if (buf == NULL) return -ENOMEM; } else buf = NULL; if (orinoco_lock(priv, &flags) != 0) { kfree(buf); return -EBUSY; } kfree(priv->wpa_ie); priv->wpa_ie = buf; priv->wpa_ie_len = wrqu->data.length; if (priv->wpa_ie) { /* Looks like wl_lkm wants to check the auth alg, and * somehow pass it to the firmware. * Instead it just calls the key mgmt rid * - we do this in set auth. */ } orinoco_unlock(priv, &flags); return 0; } static int orinoco_ioctl_get_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int err = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) { wrqu->data.length = 0; goto out; } if (wrqu->data.length < priv->wpa_ie_len) { err = -E2BIG; goto out; } wrqu->data.length = priv->wpa_ie_len; memcpy(extra, priv->wpa_ie, priv->wpa_ie_len); out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; unsigned long flags; int ret = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (mlme->cmd) { case IW_MLME_DEAUTH: /* silently ignore */ break; case IW_MLME_DISASSOC: ret = orinoco_hw_disassociate(priv, mlme->addr.sa_data, mlme->reason_code); break; default: ret = -EOPNOTSUPP; } orinoco_unlock(priv, &flags); return ret; } static int orinoco_ioctl_reset(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (info->cmd == (SIOCIWFIRSTPRIV + 0x1)) { printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); /* Firmware reset */ orinoco_reset(&priv->reset_work); } else { printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); schedule_work(&priv->reset_work); } return 0; } static int orinoco_ioctl_setibssport(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int val = *((int *) extra); unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->ibss_port = val; /* Actually update the mode we are using */ set_port_type(priv); orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_getibssport(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int *val = (int *) extra; *val = priv->ibss_port; return 0; } static int orinoco_ioctl_setport3(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int val = *((int *) extra); int err = 0; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (val) { case 0: /* Try to do IEEE ad-hoc mode */ if (!priv->has_ibss) { err = -EINVAL; break; } priv->prefer_port3 = 0; break; case 1: /* Try to do Lucent proprietary ad-hoc mode */ if (!priv->has_port3) { err = -EINVAL; break; } priv->prefer_port3 = 1; break; default: err = -EINVAL; } if (!err) { /* Actually update the mode we are using */ set_port_type(priv); err = -EINPROGRESS; } orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getport3(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int *val = (int *) extra; *val = priv->prefer_port3; return 0; } static int orinoco_ioctl_setpreamble(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int val; if (!priv->has_preamble) return -EOPNOTSUPP; /* 802.11b has recently defined some short preamble. * Basically, the Phy header has been reduced in size. * This increase performance, especially at high rates * (the preamble is transmitted at 1Mb/s), unfortunately * this give compatibility troubles... - Jean II */ val = *((int *) extra); if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (val) priv->preamble = 1; else priv->preamble = 0; orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_getpreamble(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int *val = (int *) extra; if (!priv->has_preamble) return -EOPNOTSUPP; *val = priv->preamble; return 0; } /* ioctl interface to hermes_read_ltv() * To use with iwpriv, pass the RID as the token argument, e.g. * iwpriv get_rid [0xfc00] * At least Wireless Tools 25 is required to use iwpriv. * For Wireless Tools 25 and 26 append "dummy" are the end. */ static int orinoco_ioctl_getrid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; int rid = data->flags; u16 length; int err; unsigned long flags; /* It's a "get" function, but we don't want users to access the * WEP key and other raw firmware data */ if (!capable(CAP_NET_ADMIN)) return -EPERM; if (rid < 0xfc00 || rid > 0xffff) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length, extra); if (err) goto out; data->length = min_t(u16, HERMES_RECLEN_TO_BYTES(length), MAX_RID_LEN); out: orinoco_unlock(priv, &flags); return err; } /* Commit handler, called after set operations */ static int orinoco_ioctl_commit(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int err = 0; if (!priv->open) return 0; if (orinoco_lock(priv, &flags) != 0) return err; err = orinoco_commit(priv); orinoco_unlock(priv, &flags); return err; } static const struct iw_priv_args orinoco_privtab[] = { { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" }, { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_port3" }, { SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_port3" }, { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble" }, { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_preamble" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ibssport" }, { SIOCIWFIRSTPRIV + 0x7, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ibssport" }, { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_BYTE | MAX_RID_LEN, "get_rid" }, }; /* * Structures to export the Wireless Handlers */ static const iw_handler orinoco_handler[] = { IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit), IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname), IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq), IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq), IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode), IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode), IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens), IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens), IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange), IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap), IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap), IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan), IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan), IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid), IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid), IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate), IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate), IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts), IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts), IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag), IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag), IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry), IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode), IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode), IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower), IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower), IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie), IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie), IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme), IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth), IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth), IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext), IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext), }; /* Added typecasting since we no longer use iwreq_data -- Moustafa */ static const iw_handler orinoco_private_handler[] = { [0] = (iw_handler)orinoco_ioctl_reset, [1] = (iw_handler)orinoco_ioctl_reset, [2] = (iw_handler)orinoco_ioctl_setport3, [3] = (iw_handler)orinoco_ioctl_getport3, [4] = (iw_handler)orinoco_ioctl_setpreamble, [5] = (iw_handler)orinoco_ioctl_getpreamble, [6] = (iw_handler)orinoco_ioctl_setibssport, [7] = (iw_handler)orinoco_ioctl_getibssport, [9] = (iw_handler)orinoco_ioctl_getrid, }; const struct iw_handler_def orinoco_handler_def = { .num_standard = ARRAY_SIZE(orinoco_handler), .num_private = ARRAY_SIZE(orinoco_private_handler), .num_private_args = ARRAY_SIZE(orinoco_privtab), .standard = orinoco_handler, .private = orinoco_private_handler, .private_args = orinoco_privtab, .get_wireless_stats = orinoco_get_wireless_stats, };
gpl-2.0
richardtrip/endeavoru
arch/x86/kernel/acpi/boot.c
148
40704
/* * boot.c - Architecture-Specific Low-Level ACPI Boot Support * * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/init.h> #include <linux/acpi.h> #include <linux/acpi_pmtmr.h> #include <linux/efi.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/pci_x86.h> #include <asm/pgtable.h> #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/io.h> #include <asm/mpspec.h> #include <asm/smp.h> static int __initdata acpi_force = 0; u32 acpi_rsdt_forced; int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_64 # include <asm/proto.h> # include <asm/numa_64.h> #endif /* X86 */ #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " int acpi_noirq; /* skip ACPI IRQ initialization */ int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); int acpi_lapic; int acpi_ioapic; int acpi_strict; u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; #endif #ifndef __HAVE_ARCH_CMPXCHG #warning ACPI uses CMPXCHG, i486 and later hardware #endif /* -------------------------------------------------------------------------- Boot-time Configuration -------------------------------------------------------------------------- */ /* * The default interrupt routing model is PIC (8259). This gets * overridden if IOAPICs are enumerated (below). */ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; /* * ISA irqs by default are the first 16 gsis but can be * any gsi as specified by an interrupt source override. */ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; static unsigned int gsi_to_irq(unsigned int gsi) { unsigned int irq = gsi + NR_IRQS_LEGACY; unsigned int i; for (i = 0; i < NR_IRQS_LEGACY; i++) { if (isa_irq_to_gsi[i] == gsi) { return i; } } /* Provide an identity mapping of gsi == irq * except on truly weird platforms that have * non isa irqs in the first 16 gsis. */ if (gsi >= NR_IRQS_LEGACY) irq = gsi; else irq = gsi_top + gsi; return irq; } static u32 irq_to_gsi(int irq) { unsigned int gsi; if (irq < NR_IRQS_LEGACY) gsi = isa_irq_to_gsi[irq]; else if (irq < gsi_top) gsi = irq; else if (irq < (gsi_top + NR_IRQS_LEGACY)) gsi = irq - gsi_top; else gsi = 0xffffffff; return gsi; } /* * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, * to map the target physical address. The problem is that set_fixmap() * provides a single page, and it is possible that the page is not * sufficient. * By using this area, we can map up to MAX_IO_APICS pages temporarily, * i.e. until the next __va_range() call. * * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and * count idx down while incrementing the phys address. */ char *__init __acpi_map_table(unsigned long phys, unsigned long size) { if (!phys || !size) return NULL; return early_ioremap(phys, size); } void __init __acpi_unmap_table(char *map, unsigned long size) { if (!map || !size) return; early_iounmap(map, size); } #ifdef CONFIG_X86_LOCAL_APIC static int __init acpi_parse_madt(struct acpi_table_header *table) { struct acpi_table_madt *madt = NULL; if (!cpu_has_apic) return -EINVAL; madt = (struct acpi_table_madt *)table; if (!madt) { printk(KERN_WARNING PREFIX "Unable to map MADT\n"); return -ENODEV; } if (madt->address) { acpi_lapic_addr = (u64) madt->address; printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", madt->address); } default_acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); return 0; } static void __cpuinit acpi_register_lapic(int id, u8 enabled) { unsigned int ver = 0; if (id >= (MAX_LOCAL_APIC-1)) { printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); return; } if (!enabled) { ++disabled_cpus; return; } if (boot_cpu_physical_apicid != -1U) ver = apic_version[boot_cpu_physical_apicid]; generic_processor_info(id, ver); } static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; processor = (struct acpi_madt_local_x2apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); #ifdef CONFIG_X86_X2APIC /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->local_apic_id, /* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); #else printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); #endif return 0; } static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic *processor = NULL; processor = (struct acpi_madt_local_apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->id, /* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_sapic *processor = NULL; processor = (struct acpi_madt_local_sapic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_lapic_addr = lapic_addr_ovr->address; return 0; } static int __init acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; if (BAD_MADT_ENTRY(x2apic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (x2apic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; } static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lapic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (lapic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; } #endif /*CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = NULL; ioapic = (struct acpi_madt_io_apic *)header; if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base); return 0; } /* * Parse Interrupt Source Override for the ACPI SCI */ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi) { if (trigger == 0) /* compatible SCI trigger is level */ trigger = 3; if (polarity == 0) /* compatible SCI polarity is low */ polarity = 3; /* Command-line over-ride via acpi_sci= */ if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; /* * mp_config_acpi_legacy_irqs() already setup IRQs < 16 * If GSI is < 16, this will update its flags, * else it will create a new mp_irqs[] entry. */ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); /* * stash over-ride to indicate we've been here * and for later update of acpi_gbl_FADT */ acpi_sci_override_gsi = gsi; return; } static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = NULL; intsrc = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { if (acpi_skip_timer_override) { printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); return 0; } if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); } } mp_override_legacy_irq(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } static int __init acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src = NULL; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* TBD: Support nimsrc entries? */ return 0; } #endif /* CONFIG_X86_IO_APIC */ /* * acpi_pic_sci_set_trigger() * * use ELCR to set PIC-mode trigger type for SCI * * If a PIC-mode SCI is not recognized or gives spurious IRQ7's * it may require Edge Trigger -- use "acpi_sci=edge" * * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0) */ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) { unsigned int mask = 1 << irq; unsigned int old, new; /* Real old ELCR mask */ old = inb(0x4d0) | (inb(0x4d1) << 8); /* * If we use ACPI to set PCI IRQs, then we should clear ELCR * since we will set it correctly as we enable the PCI irq * routing. */ new = acpi_noirq ? old : 0; /* * Update SCI information in the ELCR, it isn't in the PCI * routing tables.. */ switch (trigger) { case 1: /* Edge - clear */ new &= ~mask; break; case 3: /* Level - set */ new |= mask; break; } if (old == new) return; printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); outb(new, 0x4d0); outb(new >> 8, 0x4d1); } int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { *irq = gsi_to_irq(gsi); #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) setup_IO_APIC_irq_extra(gsi); #endif return 0; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) { if (isa_irq >= 16) return -1; *gsi = irq_to_gsi(isa_irq); return 0; } static int acpi_register_gsi_pic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_PCI /* * Make sure all (legacy) PCI IRQs are set as level-triggered. */ if (trigger == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); #endif return gsi; } static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_IO_APIC gsi = mp_register_gsi(dev, gsi, trigger, polarity); #endif return gsi; } int (*__acpi_register_gsi)(struct device *dev, u32 gsi, int trigger, int polarity) = acpi_register_gsi_pic; /* * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity); irq = gsi_to_irq(plat_gsi); return irq; } void __init acpi_set_irq_model_pic(void) { acpi_irq_model = ACPI_IRQ_MODEL_PIC; __acpi_register_gsi = acpi_register_gsi_pic; acpi_ioapic = 0; } void __init acpi_set_irq_model_ioapic(void) { acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; __acpi_register_gsi = acpi_register_gsi_ioapic; acpi_ioapic = 1; } /* * ACPI based hotplug support for CPU */ #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; nid = acpi_get_node(handle); if (nid == -1 || !node_online(nid)) return; set_apicid_to_node(physid, nid); numa_set_node(cpu, nid); #endif } static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_local_apic *lapic; cpumask_var_t tmp_map, new_map; u8 physid; int cpu; int retval = -ENOMEM; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return -EINVAL; if (!buffer.length || !buffer.pointer) return -EINVAL; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < sizeof(*lapic)) { kfree(buffer.pointer); return -EINVAL; } lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { kfree(buffer.pointer); return -EINVAL; } physid = lapic->id; kfree(buffer.pointer); buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) goto free_tmp_map; cpumask_copy(tmp_map, cpu_present_mask); acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); /* * If mp_register_lapic successfully generates a new logical cpu * number, then the following will get us exactly what was mapped */ cpumask_andnot(new_map, cpu_present_mask, tmp_map); if (cpumask_empty(new_map)) { printk ("Unable to map lapic to logical cpu number\n"); retval = -EINVAL; goto free_new_map; } acpi_processor_set_pdc(handle); cpu = cpumask_first(new_map); acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; retval = 0; free_new_map: free_cpumask_var(new_map); free_tmp_map: free_cpumask_var(tmp_map); out: return retval; } /* wrapper to silence section mismatch warning */ int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) { return _acpi_map_lsapic(handle, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { per_cpu(x86_cpu_to_apicid, cpu) = -1; set_cpu_present(cpu, false); num_processors--; return (0); } EXPORT_SYMBOL(acpi_unmap_lsapic); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { /* TBD */ return -EINVAL; } EXPORT_SYMBOL(acpi_register_ioapic); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) { /* TBD */ return -EINVAL; } EXPORT_SYMBOL(acpi_unregister_ioapic); static int __init acpi_parse_sbf(struct acpi_table_header *table) { struct acpi_table_boot *sb; sb = (struct acpi_table_boot *)table; if (!sb) { printk(KERN_WARNING PREFIX "Unable to map SBF\n"); return -ENODEV; } sbf_port = sb->cmos_index; /* Save CMOS port */ return 0; } #ifdef CONFIG_HPET_TIMER #include <asm/hpet.h> static struct __initdata resource *hpet_res; static int __init acpi_parse_hpet(struct acpi_table_header *table) { struct acpi_table_hpet *hpet_tbl; hpet_tbl = (struct acpi_table_hpet *)table; if (!hpet_tbl) { printk(KERN_WARNING PREFIX "Unable to map HPET\n"); return -ENODEV; } if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { printk(KERN_WARNING PREFIX "HPET timers must be located in " "memory.\n"); return -1; } hpet_address = hpet_tbl->address.address; hpet_blockid = hpet_tbl->sequence; /* * Some broken BIOSes advertise HPET at 0x0. We really do not * want to allocate a resource there. */ if (!hpet_address) { printk(KERN_WARNING PREFIX "HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address); return 0; } #ifdef CONFIG_X86_64 /* * Some even more broken BIOSes advertise HPET at * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add * some noise: */ if (hpet_address == 0xfed0000000000000UL) { if (!hpet_force_user) { printk(KERN_WARNING PREFIX "HPET id: %#x " "base: 0xfed0000000000000 is bogus\n " "try hpet=force on the kernel command line to " "fix it up to 0xfed00000.\n", hpet_tbl->id); hpet_address = 0; return 0; } printk(KERN_WARNING PREFIX "HPET id: %#x base: 0xfed0000000000000 fixed up " "to 0xfed00000.\n", hpet_tbl->id); hpet_address >>= 32; } #endif printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address); /* * Allocate and initialize the HPET firmware resource for adding into * the resource tree during the lateinit timeframe. */ #define HPET_RESOURCE_NAME_SIZE 9 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", hpet_tbl->sequence); hpet_res->start = hpet_address; hpet_res->end = hpet_address + (1 * 1024) - 1; return 0; } /* * hpet_insert_resource inserts the HPET resources used into the resource * tree. */ static __init int hpet_insert_resource(void) { if (!hpet_res) return 1; return insert_resource(&iomem_resource, hpet_res); } late_initcall(hpet_insert_resource); #else #define acpi_parse_hpet NULL #endif static int __init acpi_parse_fadt(struct acpi_table_header *table) { #ifdef CONFIG_X86_PM_TIMER /* detect the location of the ACPI PM Timer */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { /* FADT rev. 2 */ if (acpi_gbl_FADT.xpm_timer_block.space_id != ACPI_ADR_SPACE_SYSTEM_IO) return 0; pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; /* * "X" fields are optional extensions to the original V1.0 * fields, so we must selectively expand V1.0 fields if the * corresponding X field is zero. */ if (!pmtmr_ioport) pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } else { /* FADT rev. 1 */ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } if (pmtmr_ioport) printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport); #endif return 0; } #ifdef CONFIG_X86_LOCAL_APIC /* * Parse LAPIC entries in MADT * returns 0 on success, < 0 on error */ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) { int count; if (!cpu_has_apic) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); return count; } static int __init acpi_parse_madt_lapic_entries(void) { int count; int x2count = 0; if (!cpu_has_apic) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_sapic, MAX_LOCAL_APIC); if (!count) { x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, acpi_parse_x2apic, MAX_LOCAL_APIC); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_LOCAL_APIC); } if (!count && !x2count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); /* TBD: Cleanup to allow fallback to MPS */ return -ENODEV; } else if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, acpi_parse_x2apic_nmi, 0); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 #ifdef CONFIG_X86_ES7000 extern int es7000_plat; #endif void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { int ioapic; int pin; struct mpc_intsrc mp_irq; /* * Convert 'gsi' to 'ioapic.pin'. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return; pin = mp_find_ioapic_pin(ioapic, gsi); /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE * increase of timer interrupts! */ if ((bus_irq == 0) && (trigger == 3)) trigger = 1; mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger << 2) | polarity; mp_irq.srcbus = MP_ISA_BUS; mp_irq.srcbusirq = bus_irq; /* IRQ */ mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */ mp_irq.dstirq = pin; /* INTIN# */ mp_save_irq(&mp_irq); isa_irq_to_gsi[bus_irq] = gsi; } void __init mp_config_acpi_legacy_irqs(void) { int i; struct mpc_intsrc mp_irq; #if defined (CONFIG_MCA) || defined (CONFIG_EISA) /* * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; #endif set_bit(MP_ISA_BUS, mp_bus_not_pci); pr_debug("Bus #%d is ISA\n", MP_ISA_BUS); #ifdef CONFIG_X86_ES7000 /* * Older generations of ES7000 have no legacy identity mappings */ if (es7000_plat == 1) return; #endif /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. */ for (i = 0; i < 16; i++) { int ioapic, pin; unsigned int dstapic; int idx; u32 gsi; /* Locate the gsi that irq i maps to. */ if (acpi_isa_irq_to_gsi(i, &gsi)) continue; /* * Locate the IOAPIC that manages the ISA IRQ. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) continue; pin = mp_find_ioapic_pin(ioapic, gsi); dstapic = mp_ioapics[ioapic].apicid; for (idx = 0; idx < mp_irq_entries; idx++) { struct mpc_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ if (irq->dstapic == dstapic && irq->dstirq == pin) break; } if (idx != mp_irq_entries) { printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); continue; /* IRQ already used */ } mp_irq.type = MP_INTSRC; mp_irq.irqflag = 0; /* Conforming */ mp_irq.srcbus = MP_ISA_BUS; mp_irq.dstapic = dstapic; mp_irq.irqtype = mp_INT; mp_irq.srcbusirq = i; /* Identity mapped */ mp_irq.dstirq = pin; mp_save_irq(&mp_irq); } } static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_MPPARSE struct mpc_intsrc mp_irq; struct pci_dev *pdev; unsigned char number; unsigned int devfn; int ioapic; u8 pin; if (!acpi_ioapic) return 0; if (!dev) return 0; if (dev->bus != &pci_bus_type) return 0; pdev = to_pci_dev(dev); number = pdev->bus->number; devfn = pdev->devfn; pin = pdev->pin; /* print the entry should happen on mptable identically */ mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); mp_irq.srcbus = number; mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); mp_irq.dstapic = mp_ioapics[ioapic].apicid; mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); mp_save_irq(&mp_irq); #endif return 0; } int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { int ioapic; int ioapic_pin; struct io_apic_irq_attr irq_attr; if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) return gsi; /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) return gsi; ioapic = mp_find_ioapic(gsi); if (ioapic < 0) { printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); return gsi; } ioapic_pin = mp_find_ioapic_pin(ioapic, gsi); if (ioapic_pin > MP_MAX_IOAPIC_PIN) { printk(KERN_ERR "Invalid reference to IOAPIC pin " "%d-%d\n", mp_ioapics[ioapic].apicid, ioapic_pin); return gsi; } if (enable_update_mptable) mp_config_acpi_gsi(dev, gsi, trigger, polarity); set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, polarity == ACPI_ACTIVE_HIGH ? 0 : 1); io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); return gsi; } /* * Parse IOAPIC related entries in MADT * returns 0 on success, < 0 on error */ static int __init acpi_parse_madt_ioapic_entries(void) { int count; /* * ACPI interpreter is required to complete interrupt setup, * so if it is off, don't enumerate the io-apics with ACPI. * If MPS is present, it will handle them, * otherwise the system will stay in PIC mode */ if (acpi_disabled || acpi_noirq) return -ENODEV; if (!cpu_has_apic) return -ENODEV; /* * if "noapic" boot option, don't look for IO-APICs */ if (skip_ioapic_setup) { printk(KERN_INFO PREFIX "Skipping IOAPIC probe " "due to 'noapic' option.\n"); return -ENODEV; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, MAX_IO_APICS); if (!count) { printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); return -ENODEV; } else if (count < 0) { printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); return count; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, nr_irqs); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } /* * If BIOS did not supply an INT_SRC_OVR for the SCI * pretend we got one so we can set the SCI flags. */ if (!acpi_sci_override_gsi) acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, acpi_gbl_FADT.sci_interrupt); /* Fill in identity legacy mappings where no override */ mp_config_acpi_legacy_irqs(); count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, nr_irqs); if (count < 0) { printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #else static inline int acpi_parse_madt_ioapic_entries(void) { return -1; } #endif /* !CONFIG_X86_IO_APIC */ static void __init early_acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = early_acpi_parse_madt_lapic_addr_ovr(); if (!error) { acpi_lapic = 1; smp_found_config = 1; } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } #endif } static void __init acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = acpi_parse_madt_lapic_entries(); if (!error) { acpi_lapic = 1; /* * Parse MADT IO-APIC entries */ error = acpi_parse_madt_ioapic_entries(); if (!error) { acpi_set_irq_model_ioapic(); smp_found_config = 1; } } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } else { /* * ACPI found no MADT, and so ACPI wants UP PIC mode. * In the event an MPS table was found, forget it. * Boot with "acpi=off" to use MPS on such a system. */ if (smp_found_config) { printk(KERN_WARNING PREFIX "No APIC-table, disabling MPS\n"); smp_found_config = 0; } } /* * ACPI supports both logical (e.g. Hyper-Threading) and physical * processors, where MPS only supports physical. */ if (acpi_lapic && acpi_ioapic) printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " "information\n"); else if (acpi_lapic) printk(KERN_INFO "Using ACPI for processor (LAPIC) " "configuration information\n"); #endif return; } static int __init disable_acpi_irq(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", d->ident); acpi_noirq_set(); } return 0; } static int __init disable_acpi_pci(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", d->ident); acpi_disable_pci(); } return 0; } static int __init dmi_disable_acpi(const struct dmi_system_id *d) { if (!acpi_force) { printk(KERN_NOTICE "%s detected: acpi off\n", d->ident); disable_acpi(); } else { printk(KERN_NOTICE "Warning: DMI blacklist says broken, but acpi forced\n"); } return 0; } /* * Force ignoring BIOS IRQ0 pin2 override */ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) { /* * The ati_ixp4x0_rev() early PCI quirk should have set * the acpi_skip_timer_override flag already: */ if (!acpi_skip_timer_override) { WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n"); pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", d->ident); acpi_skip_timer_override = 1; } return 0; } /* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ static struct dmi_system_id __initdata acpi_dmi_table[] = { /* * Boxes that need ACPI disabled */ { .callback = dmi_disable_acpi, .ident = "IBM Thinkpad", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), }, }, /* * Boxes that need ACPI PCI IRQ routing disabled */ { .callback = disable_acpi_irq, .ident = "ASUS A7V", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), DMI_MATCH(DMI_BOARD_NAME, "<A7V>"), /* newer BIOS, Revision 1011, does work */ DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"), }, }, { /* * Latest BIOS for IBM 600E (1.16) has bad pcinum * for LPC bridge, which is needed for the PCI * interrupt links to work. DSDT fix is in bug 5966. * 2645, 2646 model numbers are shared with 600/600E/600X */ .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2645", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2645"), }, }, { .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2646", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2646"), }, }, /* * Boxes that need ACPI PCI IRQ routing and PCI scan disabled */ { /* _BBN 0 bug */ .callback = disable_acpi_pci, .ident = "ASUS PR-DLS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"), DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") }, }, { .callback = disable_acpi_pci, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, {} }; /* second table for DMI checks that should run after early-quirks */ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature * trip points to 16C if the INTIN2 input of the I/O APIC * is enabled. This input is incorrectly designated the * ISA IRQ 0 via an interrupt source override even though * it is wired to the output of the master 8259A and INTIN0 * is not connected at all. Force ignoring BIOS IRQ0 pin2 * override in that cases. */ { .callback = dmi_ignore_irq0_timer_override, .ident = "HP nx6115 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6125 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6325 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP 6715b laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), }, }, {} }; /* * acpi_boot_table_init() and acpi_boot_init() * called from setup_arch(), always. * 1. checksums all tables * 2. enumerates lapics * 3. enumerates io-apics * * acpi_table_init() is separate to allow reading SRAT without * other side effects. * * side effects of acpi_boot_init: * acpi_lapic = 1 if LAPIC found * acpi_ioapic = 1 if IOAPIC found * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; * if acpi_blacklisted() acpi_disabled = 1; * acpi_irq_model=... * ... */ void __init acpi_boot_table_init(void) { dmi_check_system(acpi_dmi_table); /* * If acpi_disabled, bail out */ if (acpi_disabled) return; /* * Initialize the ACPI boot-time table parser. */ if (acpi_table_init()) { disable_acpi(); return; } acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * blacklist may disable ACPI entirely */ if (acpi_blacklisted()) { if (acpi_force) { printk(KERN_WARNING PREFIX "acpi=force override\n"); } else { printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); disable_acpi(); return; } } } int __init early_acpi_boot_init(void) { /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; /* * Process the Multiple APIC Description Table (MADT), if present */ early_acpi_process_madt(); return 0; } int __init acpi_boot_init(void) { /* those are executed after early-quirks are executed */ dmi_check_system(acpi_dmi_table_late); /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * set sci_int and PM timer address */ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); /* * Process the Multiple APIC Description Table (MADT), if present */ acpi_process_madt(); acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); if (!acpi_noirq) x86_init.pci.init = pci_acpi_init; return 0; } static int __init parse_acpi(char *arg) { if (!arg) return -EINVAL; /* "acpi=off" disables both ACPI table parsing and interpreter */ if (strcmp(arg, "off") == 0) { disable_acpi(); } /* acpi=force to over-ride black-list */ else if (strcmp(arg, "force") == 0) { acpi_force = 1; acpi_disabled = 0; } /* acpi=strict disables out-of-spec workarounds */ else if (strcmp(arg, "strict") == 0) { acpi_strict = 1; } /* acpi=rsdt use RSDT instead of XSDT */ else if (strcmp(arg, "rsdt") == 0) { acpi_rsdt_forced = 1; } /* "acpi=noirq" disables ACPI interrupt routing */ else if (strcmp(arg, "noirq") == 0) { acpi_noirq_set(); } /* "acpi=copy_dsdt" copys DSDT */ else if (strcmp(arg, "copy_dsdt") == 0) { acpi_gbl_copy_dsdt_locally = 1; } else { /* Core will printk when we return error. */ return -EINVAL; } return 0; } early_param("acpi", parse_acpi); /* FIXME: Using pci= for an ACPI parameter is a travesty. */ static int __init parse_pci(char *arg) { if (arg && strcmp(arg, "noacpi") == 0) acpi_disable_pci(); return 0; } early_param("pci", parse_pci); int __init acpi_mps_check(void) { #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) /* mptable code is not built-in*/ if (acpi_disabled || acpi_noirq) { printk(KERN_WARNING "MPS support code is not built-in.\n" "Using acpi=off or acpi=noirq or pci=noacpi " "may have problem\n"); return 1; } #endif return 0; } #ifdef CONFIG_X86_IO_APIC static int __init parse_acpi_skip_timer_override(char *arg) { acpi_skip_timer_override = 1; return 0; } early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); static int __init parse_acpi_use_timer_override(char *arg) { acpi_use_timer_override = 1; return 0; } early_param("acpi_use_timer_override", parse_acpi_use_timer_override); #endif /* CONFIG_X86_IO_APIC */ static int __init setup_acpi_sci(char *s) { if (!s) return -EINVAL; if (!strcmp(s, "edge")) acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "level")) acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "high")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else if (!strcmp(s, "low")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else return -EINVAL; return 0; } early_param("acpi_sci", setup_acpi_sci); int __acpi_acquire_global_lock(unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return (new < 3) ? -1 : 0; } int __acpi_release_global_lock(unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = old & ~0x3; val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return old & 0x1; }
gpl-2.0
LiYihai/linux-2.6.39.4-notes
fs/btrfs/volumes.c
148
97320
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/random.h> #include <linux/iocontext.h> #include <linux/capability.h> #include <asm/div64.h> #include "compat.h" #include "ctree.h" #include "extent_map.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "volumes.h" #include "async-thread.h" static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device); static int btrfs_relocate_sys_chunks(struct btrfs_root *root); #define map_lookup_size(n) (sizeof(struct map_lookup) + \ (sizeof(struct btrfs_bio_stripe) * (n))) static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); void btrfs_lock_volumes(void) { mutex_lock(&uuid_mutex); } void btrfs_unlock_volumes(void) { mutex_unlock(&uuid_mutex); } static void lock_chunks(struct btrfs_root *root) { mutex_lock(&root->fs_info->chunk_mutex); } static void unlock_chunks(struct btrfs_root *root) { mutex_unlock(&root->fs_info->chunk_mutex); } static void free_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { device = list_entry(fs_devices->devices.next, struct btrfs_device, dev_list); list_del(&device->dev_list); kfree(device->name); kfree(device); } kfree(fs_devices); } int btrfs_cleanup_fs_uuids(void) { struct btrfs_fs_devices *fs_devices; while (!list_empty(&fs_uuids)) { fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices, list); list_del(&fs_devices->list); free_fs_devices(fs_devices); } return 0; } static noinline struct btrfs_device *__find_device(struct list_head *head, u64 devid, u8 *uuid) { struct btrfs_device *dev; list_for_each_entry(dev, head, dev_list) { if (dev->devid == devid && (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { return dev; } } return NULL; } static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) { struct btrfs_fs_devices *fs_devices; list_for_each_entry(fs_devices, &fs_uuids, list) { if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) return fs_devices; } return NULL; } static void requeue_list(struct btrfs_pending_bios *pending_bios, struct bio *head, struct bio *tail) { struct bio *old_head; old_head = pending_bios->head; pending_bios->head = head; if (pending_bios->tail) tail->bi_next = old_head; else pending_bios->tail = tail; } /* * we try to collect pending bios for a device so we don't get a large * number of procs sending bios down to the same device. This greatly * improves the schedulers ability to collect and merge the bios. * * But, it also turns into a long list of bios to process and that is sure * to eventually make the worker thread block. The solution here is to * make some progress and then put this work struct back at the end of * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ static noinline int run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; struct btrfs_fs_info *fs_info; struct btrfs_pending_bios *pending_bios; struct bio *tail; struct bio *cur; int again = 0; unsigned long num_run; unsigned long batch_run = 0; unsigned long limit; unsigned long last_waited = 0; int force_reg = 0; struct blk_plug plug; /* * this function runs all the bios we've collected for * a particular device. We don't want to wander off to * another device without first sending all of these down. * So, setup a plug here and finish it off before we return */ blk_start_plug(&plug); bdi = blk_get_backing_dev_info(device->bdev); fs_info = device->dev_root->fs_info; limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; loop: spin_lock(&device->io_lock); loop_lock: num_run = 0; /* take all the bios off the list at once and process them * later on (without the lock held). But, remember the * tail and other pointers so the bios can be properly reinserted * into the list if we hit congestion */ if (!force_reg && device->pending_sync_bios.head) { pending_bios = &device->pending_sync_bios; force_reg = 1; } else { pending_bios = &device->pending_bios; force_reg = 0; } pending = pending_bios->head; tail = pending_bios->tail; WARN_ON(pending && !tail); /* * if pending was null this time around, no bios need processing * at all and we can stop. Otherwise it'll loop back up again * and do an additional check so no bios are missed. * * device->running_pending is used to synchronize with the * schedule_bio code. */ if (device->pending_sync_bios.head == NULL && device->pending_bios.head == NULL) { again = 0; device->running_pending = 0; } else { again = 1; device->running_pending = 1; } pending_bios->head = NULL; pending_bios->tail = NULL; spin_unlock(&device->io_lock); while (pending) { rmb(); /* we want to work on both lists, but do more bios on the * sync list than the regular list */ if ((num_run > 32 && pending_bios != &device->pending_sync_bios && device->pending_sync_bios.head) || (num_run > 64 && pending_bios == &device->pending_sync_bios && device->pending_bios.head)) { spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); goto loop_lock; } cur = pending; pending = pending->bi_next; cur->bi_next = NULL; atomic_dec(&fs_info->nr_async_bios); if (atomic_read(&fs_info->nr_async_bios) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); BUG_ON(atomic_read(&cur->bi_cnt) == 0); submit_bio(cur->bi_rw, cur); num_run++; batch_run++; if (need_resched()) cond_resched(); /* * we made progress, there is more work to do and the bdi * is now congested. Back off and let other work structs * run instead */ if (pending && bdi_write_congested(bdi) && batch_run > 8 && fs_info->fs_devices->open_devices > 1) { struct io_context *ioc; ioc = current->io_context; /* * the main goal here is that we don't want to * block if we're going to be able to submit * more requests without blocking. * * This code does two great things, it pokes into * the elevator code from a filesystem _and_ * it makes assumptions about how batching works. */ if (ioc && ioc->nr_batch_requests > 0 && time_before(jiffies, ioc->last_waited + HZ/50UL) && (last_waited == 0 || ioc->last_waited == last_waited)) { /* * we want to go through our batch of * requests and stop. So, we copy out * the ioc->last_waited time and test * against it before looping */ last_waited = ioc->last_waited; if (need_resched()) cond_resched(); continue; } spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); device->running_pending = 1; spin_unlock(&device->io_lock); btrfs_requeue_work(&device->work); goto done; } } cond_resched(); if (again) goto loop; spin_lock(&device->io_lock); if (device->pending_bios.head || device->pending_sync_bios.head) goto loop_lock; spin_unlock(&device->io_lock); done: blk_finish_plug(&plug); return 0; } static void pending_bios_fn(struct btrfs_work *work) { struct btrfs_device *device; device = container_of(work, struct btrfs_device, work); run_scheduled_bios(device); } static noinline int device_list_add(const char *path, struct btrfs_super_block *disk_super, u64 devid, struct btrfs_fs_devices **fs_devices_ret) { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices; u64 found_transid = btrfs_super_generation(disk_super); char *name; fs_devices = find_fsid(disk_super->fsid); if (!fs_devices) { fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); if (!fs_devices) return -ENOMEM; INIT_LIST_HEAD(&fs_devices->devices); INIT_LIST_HEAD(&fs_devices->alloc_list); list_add(&fs_devices->list, &fs_uuids); memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); fs_devices->latest_devid = devid; fs_devices->latest_trans = found_transid; mutex_init(&fs_devices->device_list_mutex); device = NULL; } else { device = __find_device(&fs_devices->devices, devid, disk_super->dev_item.uuid); } if (!device) { if (fs_devices->opened) return -EBUSY; device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) { /* we can safely leave the fs_devices entry around */ return -ENOMEM; } device->devid = devid; device->work.func = pending_bios_fn; memcpy(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); spin_lock_init(&device->io_lock); device->name = kstrdup(path, GFP_NOFS); if (!device->name) { kfree(device); return -ENOMEM; } INIT_LIST_HEAD(&device->dev_alloc_list); mutex_lock(&fs_devices->device_list_mutex); list_add(&device->dev_list, &fs_devices->devices); mutex_unlock(&fs_devices->device_list_mutex); device->fs_devices = fs_devices; fs_devices->num_devices++; } else if (!device->name || strcmp(device->name, path)) { name = kstrdup(path, GFP_NOFS); if (!name) return -ENOMEM; kfree(device->name); device->name = name; if (device->missing) { fs_devices->missing_devices--; device->missing = 0; } } if (found_transid > fs_devices->latest_trans) { fs_devices->latest_devid = devid; fs_devices->latest_trans = found_transid; } *fs_devices_ret = fs_devices; return 0; } static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) { struct btrfs_fs_devices *fs_devices; struct btrfs_device *device; struct btrfs_device *orig_dev; fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); if (!fs_devices) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&fs_devices->devices); INIT_LIST_HEAD(&fs_devices->alloc_list); INIT_LIST_HEAD(&fs_devices->list); mutex_init(&fs_devices->device_list_mutex); fs_devices->latest_devid = orig->latest_devid; fs_devices->latest_trans = orig->latest_trans; memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); mutex_lock(&orig->device_list_mutex); list_for_each_entry(orig_dev, &orig->devices, dev_list) { device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) goto error; device->name = kstrdup(orig_dev->name, GFP_NOFS); if (!device->name) { kfree(device); goto error; } device->devid = orig_dev->devid; device->work.func = pending_bios_fn; memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); spin_lock_init(&device->io_lock); INIT_LIST_HEAD(&device->dev_list); INIT_LIST_HEAD(&device->dev_alloc_list); list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; fs_devices->num_devices++; } mutex_unlock(&orig->device_list_mutex); return fs_devices; error: mutex_unlock(&orig->device_list_mutex); free_fs_devices(fs_devices); return ERR_PTR(-ENOMEM); } int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device, *next; mutex_lock(&uuid_mutex); again: mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (device->in_fs_metadata) continue; if (device->bdev) { blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } if (device->writeable) { list_del_init(&device->dev_alloc_list); device->writeable = 0; fs_devices->rw_devices--; } list_del_init(&device->dev_list); fs_devices->num_devices--; kfree(device->name); kfree(device); } mutex_unlock(&fs_devices->device_list_mutex); if (fs_devices->seed) { fs_devices = fs_devices->seed; goto again; } mutex_unlock(&uuid_mutex); return 0; } static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; if (--fs_devices->opened > 0) return 0; list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { blkdev_put(device->bdev, device->mode); fs_devices->open_devices--; } if (device->writeable) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } device->bdev = NULL; device->writeable = 0; device->in_fs_metadata = 0; } WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; fs_devices->seeding = 0; return 0; } int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_fs_devices *seed_devices = NULL; int ret; mutex_lock(&uuid_mutex); ret = __btrfs_close_devices(fs_devices); if (!fs_devices->opened) { seed_devices = fs_devices->seed; fs_devices->seed = NULL; } mutex_unlock(&uuid_mutex); while (seed_devices) { fs_devices = seed_devices; seed_devices = fs_devices->seed; __btrfs_close_devices(fs_devices); free_fs_devices(fs_devices); } return ret; } static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { struct block_device *bdev; struct list_head *head = &fs_devices->devices; struct btrfs_device *device; struct block_device *latest_bdev = NULL; struct buffer_head *bh; struct btrfs_super_block *disk_super; u64 latest_devid = 0; u64 latest_transid = 0; u64 devid; int seeding = 1; int ret = 0; flags |= FMODE_EXCL; list_for_each_entry(device, head, dev_list) { if (device->bdev) continue; if (!device->name) continue; bdev = blkdev_get_by_path(device->name, flags, holder); if (IS_ERR(bdev)) { printk(KERN_INFO "open %s failed\n", device->name); goto error; } set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); if (!bh) { ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); if (devid != device->devid) goto error_brelse; if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) goto error_brelse; device->generation = btrfs_super_generation(disk_super); if (!latest_transid || device->generation > latest_transid) { latest_devid = devid; latest_transid = device->generation; latest_bdev = bdev; } if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { device->writeable = 0; } else { device->writeable = !bdev_read_only(bdev); seeding = 0; } device->bdev = bdev; device->in_fs_metadata = 0; device->mode = flags; if (!blk_queue_nonrot(bdev_get_queue(bdev))) fs_devices->rotating = 1; fs_devices->open_devices++; if (device->writeable) { fs_devices->rw_devices++; list_add(&device->dev_alloc_list, &fs_devices->alloc_list); } continue; error_brelse: brelse(bh); error_close: blkdev_put(bdev, flags); error: continue; } if (fs_devices->open_devices == 0) { ret = -EIO; goto out; } fs_devices->seeding = seeding; fs_devices->opened = 1; fs_devices->latest_bdev = latest_bdev; fs_devices->latest_devid = latest_devid; fs_devices->latest_trans = latest_transid; fs_devices->total_rw_bytes = 0; out: return ret; } int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { int ret; mutex_lock(&uuid_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; } else { ret = __btrfs_open_devices(fs_devices, flags, holder); } mutex_unlock(&uuid_mutex); return ret; } int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, struct btrfs_fs_devices **fs_devices_ret) { struct btrfs_super_block *disk_super; struct block_device *bdev; struct buffer_head *bh; int ret; u64 devid; u64 transid; mutex_lock(&uuid_mutex); flags |= FMODE_EXCL; bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto error; } ret = set_blocksize(bdev, 4096); if (ret) goto error_close; bh = btrfs_read_dev_super(bdev); if (!bh) { ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) printk(KERN_INFO "device label %s ", disk_super->label); else { /* FIXME, make a readl uuid parser */ printk(KERN_INFO "device fsid %llx-%llx ", *(unsigned long long *)disk_super->fsid, *(unsigned long long *)(disk_super->fsid + 8)); } printk(KERN_CONT "devid %llu transid %llu %s\n", (unsigned long long)devid, (unsigned long long)transid, path); ret = device_list_add(path, disk_super, devid, fs_devices_ret); brelse(bh); error_close: blkdev_put(bdev, flags); error: mutex_unlock(&uuid_mutex); return ret; } /* helper to account the used device space in the range */ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; u64 extent_end; int ret; int slot; struct extent_buffer *l; *length = 0; if (start >= device->total_bytes) return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) goto out; } while (1) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.objectid < device->devid) goto next; if (key.objectid > device->devid) break; if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) goto next; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); extent_end = key.offset + btrfs_dev_extent_length(l, dev_extent); if (key.offset <= start && extent_end > end) { *length = end - start + 1; break; } else if (key.offset <= start && extent_end > start) *length += extent_end - start; else if (key.offset > start && extent_end <= end) *length += extent_end - key.offset; else if (key.offset > start && key.offset <= end) { *length += end - key.offset + 1; break; } else if (key.offset > end) break; next: path->slots[0]++; } ret = 0; out: btrfs_free_path(path); return ret; } /* * find_free_dev_extent - find free space in the specified device * @trans: transaction handler * @device: the device which we search the free space in * @num_bytes: the size of the free space that we need * @start: store the start of the free space. * @len: the size of the free space. that we find, or the size of the max * free space if we don't find suitable free space * * this uses a pretty simple search, the expectation is that it is * called very infrequently and that a given device has a small number * of extents * * @start is used to store the start of the free space if we find. But if we * don't find suitable free space, it will be used to store the start position * of the max free space. * * @len is used to store the size of the free space that we find. * But if we don't find suitable free space, it is used to store the size of * the max free space. */ int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *len) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; u64 hole_size; u64 max_hole_start; u64 max_hole_size; u64 extent_end; u64 search_start; u64 search_end = device->total_bytes; int ret; int slot; struct extent_buffer *l; /* FIXME use last free of some kind */ /* we don't want to overwrite the superblock on the drive, * so we make sure to start at an offset of at least 1MB */ search_start = 1024 * 1024; if (root->fs_info->alloc_start + num_bytes <= search_end) search_start = max(root->fs_info->alloc_start, search_start); max_hole_start = search_start; max_hole_size = 0; if (search_start >= search_end) { ret = -ENOSPC; goto error; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto error; } path->reada = 2; key.objectid = device->devid; key.offset = search_start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(trans, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) goto out; } while (1) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.objectid < device->devid) goto next; if (key.objectid > device->devid) break; if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) goto next; if (key.offset > search_start) { hole_size = key.offset - search_start; if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } /* * If this free space is greater than which we need, * it must be the max free space that we have found * until now, so max_hole_start must point to the start * of this free space and the length of this free space * is stored in max_hole_size. Thus, we return * max_hole_start and max_hole_size and go back to the * caller. */ if (hole_size >= num_bytes) { ret = 0; goto out; } } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); extent_end = key.offset + btrfs_dev_extent_length(l, dev_extent); if (extent_end > search_start) search_start = extent_end; next: path->slots[0]++; cond_resched(); } hole_size = search_end- search_start; if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } /* See above. */ if (hole_size < num_bytes) ret = -ENOSPC; else ret = 0; out: btrfs_free_path(path); error: *start = max_hole_start; if (len) *len = max_hole_size; return ret; } static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 start) { int ret; struct btrfs_path *path; struct btrfs_root *root = device->dev_root; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf = NULL; struct btrfs_dev_extent *extent = NULL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, BTRFS_DEV_EXTENT_KEY); BUG_ON(ret); leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); BUG_ON(found_key.offset > start || found_key.offset + btrfs_dev_extent_length(leaf, extent) < start); ret = 0; } else if (ret == 0) { leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } BUG_ON(ret); if (device->bytes_used > 0) device->bytes_used -= btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); BUG_ON(ret); btrfs_free_path(path); return ret; } int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 start, u64 num_bytes) { int ret; struct btrfs_path *path; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *extent; struct extent_buffer *leaf; struct btrfs_key key; WARN_ON(!device->in_fs_metadata); path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); BUG_ON(ret); leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE); btrfs_set_dev_extent_length(leaf, extent, num_bytes); btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); return ret; } static noinline int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) { struct btrfs_path *path; int ret; struct btrfs_key key; struct btrfs_chunk *chunk; struct btrfs_key found_key; path = btrfs_alloc_path(); BUG_ON(!path); key.objectid = objectid; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) { *offset = 0; } else { btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != objectid) *offset = 0; else { chunk = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_chunk); *offset = found_key.offset + btrfs_chunk_length(path->nodes[0], chunk); } } ret = 0; error: btrfs_free_path(path); return ret; } static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid) { int ret; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, BTRFS_DEV_ITEM_KEY); if (ret) { *objectid = 1; } else { btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); *objectid = found_key.offset + 1; } ret = 0; error: btrfs_free_path(path); return ret; } /* * the device information is stored in the chunk root * the btrfs_device struct should be fully filled in */ int btrfs_add_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; unsigned long ptr; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*dev_item)); if (ret) goto out; leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_generation(leaf, dev_item, 0); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); btrfs_set_device_group(leaf, dev_item, 0); btrfs_set_device_seek_speed(leaf, dev_item, 0); btrfs_set_device_bandwidth(leaf, dev_item, 0); btrfs_set_device_start_offset(leaf, dev_item, 0); ptr = (unsigned long)btrfs_device_uuid(dev_item); write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); ptr = (unsigned long)btrfs_device_fsid(dev_item); write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); btrfs_mark_buffer_dirty(leaf); ret = 0; out: btrfs_free_path(path); return ret; } static int btrfs_rm_dev_item(struct btrfs_root *root, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_trans_handle *trans; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; lock_chunks(root); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } ret = btrfs_del_item(trans, root, path); if (ret) goto out; out: btrfs_free_path(path); unlock_chunks(root); btrfs_commit_transaction(trans, root); return ret; } int btrfs_rm_device(struct btrfs_root *root, char *device_path) { struct btrfs_device *device; struct btrfs_device *next_device; struct block_device *bdev; struct buffer_head *bh = NULL; struct btrfs_super_block *disk_super; u64 all_avail; u64 devid; u64 num_devices; u8 *dev_uuid; int ret = 0; mutex_lock(&uuid_mutex); mutex_lock(&root->fs_info->volume_mutex); all_avail = root->fs_info->avail_data_alloc_bits | root->fs_info->avail_system_alloc_bits | root->fs_info->avail_metadata_alloc_bits; if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && root->fs_info->fs_devices->num_devices <= 4) { printk(KERN_ERR "btrfs: unable to go below four devices " "on raid10\n"); ret = -EINVAL; goto out; } if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && root->fs_info->fs_devices->num_devices <= 2) { printk(KERN_ERR "btrfs: unable to go below two " "devices on raid1\n"); ret = -EINVAL; goto out; } if (strcmp(device_path, "missing") == 0) { struct list_head *devices; struct btrfs_device *tmp; device = NULL; devices = &root->fs_info->fs_devices->devices; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_for_each_entry(tmp, devices, dev_list) { if (tmp->in_fs_metadata && !tmp->bdev) { device = tmp; break; } } mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); bdev = NULL; bh = NULL; disk_super = NULL; if (!device) { printk(KERN_ERR "btrfs: no missing devices found to " "remove\n"); goto out; } } else { bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, root->fs_info->bdev_holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; } set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); if (!bh) { ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); dev_uuid = disk_super->dev_item.uuid; device = btrfs_find_device(root, devid, dev_uuid, disk_super->fsid); if (!device) { ret = -ENOENT; goto error_brelse; } } if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { printk(KERN_ERR "btrfs: unable to remove the only writeable " "device\n"); ret = -EINVAL; goto error_brelse; } if (device->writeable) { list_del_init(&device->dev_alloc_list); root->fs_info->fs_devices->rw_devices--; } ret = btrfs_shrink_device(device, 0); if (ret) goto error_undo; ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); if (ret) goto error_undo; device->in_fs_metadata = 0; /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_del_init(&device->dev_list); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); device->fs_devices->num_devices--; if (device->missing) root->fs_info->fs_devices->missing_devices--; next_device = list_entry(root->fs_info->fs_devices->devices.next, struct btrfs_device, dev_list); if (device->bdev == root->fs_info->sb->s_bdev) root->fs_info->sb->s_bdev = next_device->bdev; if (device->bdev == root->fs_info->fs_devices->latest_bdev) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) { blkdev_put(device->bdev, device->mode); device->bdev = NULL; device->fs_devices->open_devices--; } num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); if (device->fs_devices->open_devices == 0) { struct btrfs_fs_devices *fs_devices; fs_devices = root->fs_info->fs_devices; while (fs_devices) { if (fs_devices->seed == device->fs_devices) break; fs_devices = fs_devices->seed; } fs_devices->seed = device->fs_devices->seed; device->fs_devices->seed = NULL; __btrfs_close_devices(device->fs_devices); free_fs_devices(device->fs_devices); } /* * at this point, the device is zero sized. We want to * remove it from the devices list and zero out the old super */ if (device->writeable) { /* make sure this device isn't detected as part of * the FS anymore */ memset(&disk_super->magic, 0, sizeof(disk_super->magic)); set_buffer_dirty(bh); sync_dirty_buffer(bh); } kfree(device->name); kfree(device); ret = 0; error_brelse: brelse(bh); error_close: if (bdev) blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); return ret; error_undo: if (device->writeable) { list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); root->fs_info->fs_devices->rw_devices++; } goto error_brelse; } /* * does all the dirty work required for changing file system's UUID. */ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; struct btrfs_fs_devices *old_devices; struct btrfs_fs_devices *seed_devices; struct btrfs_super_block *disk_super = &root->fs_info->super_copy; struct btrfs_device *device; u64 super_flags; BUG_ON(!mutex_is_locked(&uuid_mutex)); if (!fs_devices->seeding) return -EINVAL; seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); if (!seed_devices) return -ENOMEM; old_devices = clone_fs_devices(fs_devices); if (IS_ERR(old_devices)) { kfree(seed_devices); return PTR_ERR(old_devices); } list_add(&old_devices->list, &fs_uuids); memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); seed_devices->opened = 1; INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); list_splice_init(&fs_devices->devices, &seed_devices->devices); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); list_for_each_entry(device, &seed_devices->devices, dev_list) { device->fs_devices = seed_devices; } fs_devices->seeding = 0; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); super_flags = btrfs_super_flags(disk_super) & ~BTRFS_SUPER_FLAG_SEEDING; btrfs_set_super_flags(disk_super, super_flags); return 0; } /* * strore the expected generation for seed devices in device items. */ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dev_item *dev_item; struct btrfs_device *device; struct btrfs_key key; u8 fs_uuid[BTRFS_UUID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; u64 devid; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; root = root->fs_info->chunk_root; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = BTRFS_DEV_ITEM_KEY; while (1) { ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto error; leaf = path->nodes[0]; next_slot: if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret > 0) break; if (ret < 0) goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(root, path); continue; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || key.type != BTRFS_DEV_ITEM_KEY) break; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE); device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); BUG_ON(!device); if (device->fs_devices->seeding) { btrfs_set_device_generation(leaf, dev_item, device->generation); btrfs_mark_buffer_dirty(leaf); } path->slots[0]++; goto next_slot; } ret = 0; error: btrfs_free_path(path); return ret; } int btrfs_init_new_device(struct btrfs_root *root, char *device_path) { struct btrfs_trans_handle *trans; struct btrfs_device *device; struct block_device *bdev; struct list_head *devices; struct super_block *sb = root->fs_info->sb; u64 total_bytes; int seeding_dev = 0; int ret = 0; if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; bdev = blkdev_get_by_path(device_path, FMODE_EXCL, root->fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); if (root->fs_info->fs_devices->seeding) { seeding_dev = 1; down_write(&sb->s_umount); mutex_lock(&uuid_mutex); } filemap_write_and_wait(bdev->bd_inode->i_mapping); mutex_lock(&root->fs_info->volume_mutex); devices = &root->fs_info->fs_devices->devices; /* * we have the volume lock, so we don't need the extra * device list mutex while reading the list here. */ list_for_each_entry(device, devices, dev_list) { if (device->bdev == bdev) { ret = -EEXIST; goto error; } } device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) { /* we can safely leave the fs_devices entry around */ ret = -ENOMEM; goto error; } device->name = kstrdup(device_path, GFP_NOFS); if (!device->name) { kfree(device); ret = -ENOMEM; goto error; } ret = find_next_devid(root, &device->devid); if (ret) { kfree(device->name); kfree(device); goto error; } trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { kfree(device->name); kfree(device); ret = PTR_ERR(trans); goto error; } lock_chunks(root); device->writeable = 1; device->work.func = pending_bios_fn; generate_random_uuid(device->uuid); spin_lock_init(&device->io_lock); device->generation = trans->transid; device->io_width = root->sectorsize; device->io_align = root->sectorsize; device->sector_size = root->sectorsize; device->total_bytes = i_size_read(bdev->bd_inode); device->disk_total_bytes = device->total_bytes; device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; device->mode = FMODE_EXCL; set_blocksize(device->bdev, 4096); if (seeding_dev) { sb->s_flags &= ~MS_RDONLY; ret = btrfs_prepare_sprout(trans, root); BUG_ON(ret); } device->fs_devices = root->fs_info->fs_devices; /* * we don't want write_supers to jump in here with our device * half setup */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_add(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); root->fs_info->fs_devices->num_devices++; root->fs_info->fs_devices->open_devices++; root->fs_info->fs_devices->rw_devices++; root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; if (!blk_queue_nonrot(bdev_get_queue(bdev))) root->fs_info->fs_devices->rotating = 1; total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); btrfs_set_super_total_bytes(&root->fs_info->super_copy, total_bytes + device->total_bytes); total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy); btrfs_set_super_num_devices(&root->fs_info->super_copy, total_bytes + 1); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (seeding_dev) { ret = init_first_rw_device(trans, root, device); BUG_ON(ret); ret = btrfs_finish_sprout(trans, root); BUG_ON(ret); } else { ret = btrfs_add_device(trans, root, device); } /* * we've got more storage, clear any full flags on the space * infos */ btrfs_clear_space_info_full(root->fs_info); unlock_chunks(root); btrfs_commit_transaction(trans, root); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); ret = btrfs_relocate_sys_chunks(root); BUG_ON(ret); } out: mutex_unlock(&root->fs_info->volume_mutex); return ret; error: blkdev_put(bdev, FMODE_EXCL); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); } goto out; } static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_root *root; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; root = device->dev_root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } static int __btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size) { struct btrfs_super_block *super_copy = &device->dev_root->fs_info->super_copy; u64 old_total = btrfs_super_total_bytes(super_copy); u64 diff = new_size - device->total_bytes; if (!device->writeable) return -EACCES; if (new_size <= device->total_bytes) return -EINVAL; btrfs_set_super_total_bytes(super_copy, old_total + diff); device->fs_devices->total_rw_bytes += diff; device->total_bytes = new_size; device->disk_total_bytes = new_size; btrfs_clear_space_info_full(device->dev_root->fs_info); return btrfs_update_device(trans, device); } int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size) { int ret; lock_chunks(device->dev_root); ret = __btrfs_grow_device(trans, device, new_size); unlock_chunks(device->dev_root); return ret; } static int btrfs_free_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset) { int ret; struct btrfs_path *path; struct btrfs_key key; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = chunk_objectid; key.offset = chunk_offset; key.type = BTRFS_CHUNK_ITEM_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); BUG_ON(ret); ret = btrfs_del_item(trans, root, path); BUG_ON(ret); btrfs_free_path(path); return 0; } static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 chunk_offset) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *ptr; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur; struct btrfs_key key; array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; cur = 0; while (cur < array_size) { disk_key = (struct btrfs_disk_key *)ptr; btrfs_disk_key_to_cpu(&key, disk_key); len = sizeof(*disk_key); if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)(ptr + len); num_stripes = btrfs_stack_chunk_num_stripes(chunk); len += btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; } if (key.objectid == chunk_objectid && key.offset == chunk_offset) { memmove(ptr, ptr + len, array_size - (cur + len)); array_size -= len; btrfs_set_super_sys_array_size(super_copy, array_size); } else { ptr += len; cur += len; } } return ret; } static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset) { struct extent_map_tree *em_tree; struct btrfs_root *extent_root; struct btrfs_trans_handle *trans; struct extent_map *em; struct map_lookup *map; int ret; int i; root = root->fs_info->chunk_root; extent_root = root->fs_info->extent_root; em_tree = &root->fs_info->mapping_tree.map_tree; ret = btrfs_can_relocate(extent_root, chunk_offset); if (ret) return -ENOSPC; /* step one, relocate all the extents inside this chunk */ ret = btrfs_relocate_block_group(extent_root, chunk_offset); if (ret) return ret; trans = btrfs_start_transaction(root, 0); BUG_ON(IS_ERR(trans)); lock_chunks(root); /* * step two, delete the device extents and the * chunk tree entries */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, chunk_offset, 1); read_unlock(&em_tree->lock); BUG_ON(em->start > chunk_offset || em->start + em->len < chunk_offset); map = (struct map_lookup *)em->bdev; for (i = 0; i < map->num_stripes; i++) { ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, map->stripes[i].physical); BUG_ON(ret); if (map->stripes[i].dev) { ret = btrfs_update_device(trans, map->stripes[i].dev); BUG_ON(ret); } } ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, chunk_offset); BUG_ON(ret); trace_btrfs_chunk_free(root, map, chunk_offset, em->len); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); BUG_ON(ret); } ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); BUG_ON(ret); write_lock(&em_tree->lock); remove_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); kfree(map); em->bdev = NULL; /* once for the tree */ free_extent_map(em); /* once for us */ free_extent_map(em); unlock_chunks(root); btrfs_end_transaction(trans, root); return 0; } static int btrfs_relocate_sys_chunks(struct btrfs_root *root) { struct btrfs_root *chunk_root = root->fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_chunk *chunk; struct btrfs_key key; struct btrfs_key found_key; u64 chunk_tree = chunk_root->root_key.objectid; u64 chunk_type; bool retried = false; int failed = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; again: key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); ret = btrfs_previous_item(chunk_root, path, key.objectid, key.type); if (ret < 0) goto error; if (ret > 0) break; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); btrfs_release_path(chunk_root, path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_relocate_chunk(chunk_root, chunk_tree, found_key.objectid, found_key.offset); if (ret == -ENOSPC) failed++; else if (ret) BUG(); } if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } ret = 0; if (failed && !retried) { failed = 0; retried = true; goto again; } else if (failed && retried) { WARN_ON(1); ret = -ENOSPC; } error: btrfs_free_path(path); return ret; } static u64 div_factor(u64 num, int factor) { if (factor == 10) return num; num *= factor; do_div(num, 10); return num; } int btrfs_balance(struct btrfs_root *dev_root) { int ret; struct list_head *devices = &dev_root->fs_info->fs_devices->devices; struct btrfs_device *device; u64 old_size; u64 size_to_free; struct btrfs_path *path; struct btrfs_key key; struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; struct btrfs_trans_handle *trans; struct btrfs_key found_key; if (dev_root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&dev_root->fs_info->volume_mutex); dev_root = dev_root->fs_info->dev_root; /* step one make some room on all the devices */ list_for_each_entry(device, devices, dev_list) { old_size = device->total_bytes; size_to_free = div_factor(old_size, 1); size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); if (!device->writeable || device->total_bytes - device->bytes_used > size_to_free) continue; ret = btrfs_shrink_device(device, old_size - size_to_free); if (ret == -ENOSPC) break; BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); BUG_ON(IS_ERR(trans)); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); btrfs_end_transaction(trans, dev_root); } /* step two, relocate all the chunks */ path = btrfs_alloc_path(); BUG_ON(!path); key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) goto error; /* * this shouldn't happen, it means the last relocate * failed */ if (ret == 0) break; ret = btrfs_previous_item(chunk_root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) break; btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != key.objectid) break; /* chunk zero is special */ if (found_key.offset == 0) break; btrfs_release_path(chunk_root, path); ret = btrfs_relocate_chunk(chunk_root, chunk_root->root_key.objectid, found_key.objectid, found_key.offset); BUG_ON(ret && ret != -ENOSPC); key.offset = found_key.offset - 1; } ret = 0; error: btrfs_free_path(path); mutex_unlock(&dev_root->fs_info->volume_mutex); return ret; } /* * shrinking a device means finding all of the device extents past * the new size, and then following the back refs to the chunks. * The chunk relocation code actually frees the device extent */ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) { struct btrfs_trans_handle *trans; struct btrfs_root *root = device->dev_root; struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; u64 length; u64 chunk_tree; u64 chunk_objectid; u64 chunk_offset; int ret; int slot; int failed = 0; bool retried = false; struct extent_buffer *l; struct btrfs_key key; struct btrfs_super_block *super_copy = &root->fs_info->super_copy; u64 old_total = btrfs_super_total_bytes(super_copy); u64 old_size = device->total_bytes; u64 diff = device->total_bytes - new_size; if (new_size >= device->total_bytes) return -EINVAL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; lock_chunks(root); device->total_bytes = new_size; if (device->writeable) device->fs_devices->total_rw_bytes -= diff; unlock_chunks(root); again: key.objectid = device->devid; key.offset = (u64)-1; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto done; ret = btrfs_previous_item(root, path, 0, key.type); if (ret < 0) goto done; if (ret) { ret = 0; btrfs_release_path(root, path); break; } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { btrfs_release_path(root, path); break; } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { btrfs_release_path(root, path); break; } chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); btrfs_release_path(root, path); ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, chunk_offset); if (ret && ret != -ENOSPC) goto done; if (ret == -ENOSPC) failed++; key.offset -= 1; } if (failed && !retried) { failed = 0; retried = true; goto again; } else if (failed && retried) { ret = -ENOSPC; lock_chunks(root); device->total_bytes = old_size; if (device->writeable) device->fs_devices->total_rw_bytes += diff; unlock_chunks(root); goto done; } /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto done; } lock_chunks(root); device->disk_total_bytes = new_size; /* Now btrfs_update_device() will change the on-disk size. */ ret = btrfs_update_device(trans, device); if (ret) { unlock_chunks(root); btrfs_end_transaction(trans, root); goto done; } WARN_ON(diff > old_total); btrfs_set_super_total_bytes(super_copy, old_total - diff); unlock_chunks(root); btrfs_end_transaction(trans, root); done: btrfs_free_path(path); return ret; } static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_chunk *chunk, int item_size) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; struct btrfs_disk_key disk_key; u32 array_size; u8 *ptr; array_size = btrfs_super_sys_array_size(super_copy); if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) return -EFBIG; ptr = super_copy->sys_chunk_array + array_size; btrfs_cpu_key_to_disk(&disk_key, key); memcpy(ptr, &disk_key, sizeof(disk_key)); ptr += sizeof(disk_key); memcpy(ptr, chunk, item_size); item_size += sizeof(disk_key); btrfs_set_super_sys_array_size(super_copy, array_size + item_size); return 0; } static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) { if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) return calc_size; else if (type & BTRFS_BLOCK_GROUP_RAID10) return calc_size * (num_stripes / sub_stripes); else return calc_size * num_stripes; } /* Used to sort the devices by max_avail(descending sort) */ int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) { if (((struct btrfs_device_info *)dev_info1)->max_avail > ((struct btrfs_device_info *)dev_info2)->max_avail) return -1; else if (((struct btrfs_device_info *)dev_info1)->max_avail < ((struct btrfs_device_info *)dev_info2)->max_avail) return 1; else return 0; } static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, int *num_stripes, int *min_stripes, int *sub_stripes) { *num_stripes = 1; *min_stripes = 1; *sub_stripes = 0; if (type & (BTRFS_BLOCK_GROUP_RAID0)) { *num_stripes = fs_devices->rw_devices; *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_DUP)) { *num_stripes = 2; *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID1)) { if (fs_devices->rw_devices < 2) return -ENOSPC; *num_stripes = 2; *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID10)) { *num_stripes = fs_devices->rw_devices; if (*num_stripes < 4) return -ENOSPC; *num_stripes &= ~(u32)1; *sub_stripes = 2; *min_stripes = 4; } return 0; } static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, u64 proposed_size, u64 type, int num_stripes, int small_stripe) { int min_stripe_size = 1 * 1024 * 1024; u64 calc_size = proposed_size; u64 max_chunk_size = calc_size; int ncopies = 1; if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID10)) ncopies = 2; if (type & BTRFS_BLOCK_GROUP_DATA) { max_chunk_size = 10 * calc_size; min_stripe_size = 64 * 1024 * 1024; } else if (type & BTRFS_BLOCK_GROUP_METADATA) { max_chunk_size = 256 * 1024 * 1024; min_stripe_size = 32 * 1024 * 1024; } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { calc_size = 8 * 1024 * 1024; max_chunk_size = calc_size * 2; min_stripe_size = 1 * 1024 * 1024; } /* we don't want a chunk larger than 10% of writeable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); if (calc_size * num_stripes > max_chunk_size * ncopies) { calc_size = max_chunk_size * ncopies; do_div(calc_size, num_stripes); do_div(calc_size, BTRFS_STRIPE_LEN); calc_size *= BTRFS_STRIPE_LEN; } /* we don't want tiny stripes */ if (!small_stripe) calc_size = max_t(u64, min_stripe_size, calc_size); /* * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure * we end up with something bigger than a stripe */ calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); do_div(calc_size, BTRFS_STRIPE_LEN); calc_size *= BTRFS_STRIPE_LEN; return calc_size; } static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, int num_stripes) { struct map_lookup *new; size_t len = map_lookup_size(num_stripes); BUG_ON(map->num_stripes < num_stripes); if (map->num_stripes == num_stripes) return map; new = kmalloc(len, GFP_NOFS); if (!new) { /* just change map->num_stripes */ map->num_stripes = num_stripes; return map; } memcpy(new, map, len); new->num_stripes = num_stripes; kfree(map); return new; } /* * helper to allocate device space from btrfs_device_info, in which we stored * max free space information of every device. It is used when we can not * allocate chunks by default size. * * By this helper, we can allocate a new chunk as larger as possible. */ static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, struct btrfs_fs_devices *fs_devices, struct btrfs_device_info *devices, int nr_device, u64 type, struct map_lookup **map_lookup, int min_stripes, u64 *stripe_size) { int i, index, sort_again = 0; int min_devices = min_stripes; u64 max_avail, min_free; struct map_lookup *map = *map_lookup; int ret; if (nr_device < min_stripes) return -ENOSPC; btrfs_descending_sort_devices(devices, nr_device); max_avail = devices[0].max_avail; if (!max_avail) return -ENOSPC; for (i = 0; i < nr_device; i++) { /* * if dev_offset = 0, it means the free space of this device * is less than what we need, and we didn't search max avail * extent on this device, so do it now. */ if (!devices[i].dev_offset) { ret = find_free_dev_extent(trans, devices[i].dev, max_avail, &devices[i].dev_offset, &devices[i].max_avail); if (ret != 0 && ret != -ENOSPC) return ret; sort_again = 1; } } /* we update the max avail free extent of each devices, sort again */ if (sort_again) btrfs_descending_sort_devices(devices, nr_device); if (type & BTRFS_BLOCK_GROUP_DUP) min_devices = 1; if (!devices[min_devices - 1].max_avail) return -ENOSPC; max_avail = devices[min_devices - 1].max_avail; if (type & BTRFS_BLOCK_GROUP_DUP) do_div(max_avail, 2); max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, min_stripes, 1); if (type & BTRFS_BLOCK_GROUP_DUP) min_free = max_avail * 2; else min_free = max_avail; if (min_free > devices[min_devices - 1].max_avail) return -ENOSPC; map = __shrink_map_lookup_stripes(map, min_stripes); *stripe_size = max_avail; index = 0; for (i = 0; i < min_stripes; i++) { map->stripes[i].dev = devices[index].dev; map->stripes[i].physical = devices[index].dev_offset; if (type & BTRFS_BLOCK_GROUP_DUP) { i++; map->stripes[i].dev = devices[index].dev; map->stripes[i].physical = devices[index].dev_offset + max_avail; } index++; } *map_lookup = map; return 0; } static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct map_lookup **map_ret, u64 *num_bytes, u64 *stripe_size, u64 start, u64 type) { struct btrfs_fs_info *info = extent_root->fs_info; struct btrfs_device *device = NULL; struct btrfs_fs_devices *fs_devices = info->fs_devices; struct list_head *cur; struct map_lookup *map; struct extent_map_tree *em_tree; struct extent_map *em; struct btrfs_device_info *devices_info; struct list_head private_devs; u64 calc_size = 1024 * 1024 * 1024; u64 min_free; u64 avail; u64 dev_offset; int num_stripes; int min_stripes; int sub_stripes; int min_devices; /* the min number of devices we need */ int i; int ret; int index; if ((type & BTRFS_BLOCK_GROUP_RAID1) && (type & BTRFS_BLOCK_GROUP_DUP)) { WARN_ON(1); type &= ~BTRFS_BLOCK_GROUP_DUP; } if (list_empty(&fs_devices->alloc_list)) return -ENOSPC; ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, &min_stripes, &sub_stripes); if (ret) return ret; devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, GFP_NOFS); if (!devices_info) return -ENOMEM; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { ret = -ENOMEM; goto error; } map->num_stripes = num_stripes; cur = fs_devices->alloc_list.next; index = 0; i = 0; calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, num_stripes, 0); if (type & BTRFS_BLOCK_GROUP_DUP) { min_free = calc_size * 2; min_devices = 1; } else { min_free = calc_size; min_devices = min_stripes; } INIT_LIST_HEAD(&private_devs); while (index < num_stripes) { device = list_entry(cur, struct btrfs_device, dev_alloc_list); BUG_ON(!device->writeable); if (device->total_bytes > device->bytes_used) avail = device->total_bytes - device->bytes_used; else avail = 0; cur = cur->next; if (device->in_fs_metadata && avail >= min_free) { ret = find_free_dev_extent(trans, device, min_free, &devices_info[i].dev_offset, &devices_info[i].max_avail); if (ret == 0) { list_move_tail(&device->dev_alloc_list, &private_devs); map->stripes[index].dev = device; map->stripes[index].physical = devices_info[i].dev_offset; index++; if (type & BTRFS_BLOCK_GROUP_DUP) { map->stripes[index].dev = device; map->stripes[index].physical = devices_info[i].dev_offset + calc_size; index++; } } else if (ret != -ENOSPC) goto error; devices_info[i].dev = device; i++; } else if (device->in_fs_metadata && avail >= BTRFS_STRIPE_LEN) { devices_info[i].dev = device; devices_info[i].max_avail = avail; i++; } if (cur == &fs_devices->alloc_list) break; } list_splice(&private_devs, &fs_devices->alloc_list); if (index < num_stripes) { if (index >= min_stripes) { num_stripes = index; if (type & (BTRFS_BLOCK_GROUP_RAID10)) { num_stripes /= sub_stripes; num_stripes *= sub_stripes; } map = __shrink_map_lookup_stripes(map, num_stripes); } else if (i >= min_devices) { ret = __btrfs_alloc_tiny_space(trans, fs_devices, devices_info, i, type, &map, min_stripes, &calc_size); if (ret) goto error; } else { ret = -ENOSPC; goto error; } } map->sector_size = extent_root->sectorsize; map->stripe_len = BTRFS_STRIPE_LEN; map->io_align = BTRFS_STRIPE_LEN; map->io_width = BTRFS_STRIPE_LEN; map->type = type; map->sub_stripes = sub_stripes; *map_ret = map; *stripe_size = calc_size; *num_bytes = chunk_bytes_by_type(type, calc_size, map->num_stripes, sub_stripes); trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); em = alloc_extent_map(GFP_NOFS); if (!em) { ret = -ENOMEM; goto error; } em->bdev = (struct block_device *)map; em->start = start; em->len = *num_bytes; em->block_start = 0; em->block_len = em->len; em_tree = &extent_root->fs_info->mapping_tree.map_tree; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); BUG_ON(ret); free_extent_map(em); ret = btrfs_make_block_group(trans, extent_root, 0, type, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, *num_bytes); BUG_ON(ret); index = 0; while (index < map->num_stripes) { device = map->stripes[index].dev; dev_offset = map->stripes[index].physical; ret = btrfs_alloc_dev_extent(trans, device, info->chunk_root->root_key.objectid, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, dev_offset, calc_size); BUG_ON(ret); index++; } kfree(devices_info); return 0; error: kfree(map); kfree(devices_info); return ret; } static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct map_lookup *map, u64 chunk_offset, u64 chunk_size, u64 stripe_size) { u64 dev_offset; struct btrfs_key key; struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; struct btrfs_device *device; struct btrfs_chunk *chunk; struct btrfs_stripe *stripe; size_t item_size = btrfs_chunk_item_size(map->num_stripes); int index = 0; int ret; chunk = kzalloc(item_size, GFP_NOFS); if (!chunk) return -ENOMEM; index = 0; while (index < map->num_stripes) { device = map->stripes[index].dev; device->bytes_used += stripe_size; ret = btrfs_update_device(trans, device); BUG_ON(ret); index++; } index = 0; stripe = &chunk->stripe; while (index < map->num_stripes) { device = map->stripes[index].dev; dev_offset = map->stripes[index].physical; btrfs_set_stack_stripe_devid(stripe, device->devid); btrfs_set_stack_stripe_offset(stripe, dev_offset); memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); stripe++; index++; } btrfs_set_stack_chunk_length(chunk, chunk_size); btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_type(chunk, map->type); btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.type = BTRFS_CHUNK_ITEM_KEY; key.offset = chunk_offset; ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); BUG_ON(ret); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, item_size); BUG_ON(ret); } kfree(chunk); return 0; } /* * Chunk allocation falls into two parts. The first part does works * that make the new allocated chunk useable, but not do any operation * that modifies the chunk tree. The second part does the works that * require modifying the chunk tree. This division is important for the * bootstrap process of adding storage to a seed btrfs. */ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 type) { u64 chunk_offset; u64 chunk_size; u64 stripe_size; struct map_lookup *map; struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; int ret; ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); if (ret) return ret; ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, &stripe_size, chunk_offset, type); if (ret) return ret; ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, chunk_size, stripe_size); BUG_ON(ret); return 0; } static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device) { u64 chunk_offset; u64 sys_chunk_offset; u64 chunk_size; u64 sys_chunk_size; u64 stripe_size; u64 sys_stripe_size; u64 alloc_profile; struct map_lookup *map; struct map_lookup *sys_map; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; int ret; ret = find_next_chunk(fs_info->chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); BUG_ON(ret); alloc_profile = BTRFS_BLOCK_GROUP_METADATA | (fs_info->metadata_alloc_profile & fs_info->avail_metadata_alloc_bits); alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, &stripe_size, chunk_offset, alloc_profile); BUG_ON(ret); sys_chunk_offset = chunk_offset + chunk_size; alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | (fs_info->system_alloc_profile & fs_info->avail_system_alloc_bits); alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, &sys_chunk_size, &sys_stripe_size, sys_chunk_offset, alloc_profile); BUG_ON(ret); ret = btrfs_add_device(trans, fs_info->chunk_root, device); BUG_ON(ret); /* * Modifying chunk tree needs allocating new blocks from both * system block group and metadata block group. So we only can * do operations require modifying the chunk tree after both * block groups were created. */ ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, chunk_size, stripe_size); BUG_ON(ret); ret = __finish_chunk_alloc(trans, extent_root, sys_map, sys_chunk_offset, sys_chunk_size, sys_stripe_size); BUG_ON(ret); return 0; } int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) { struct extent_map *em; struct map_lookup *map; struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; int readonly = 0; int i; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); read_unlock(&map_tree->map_tree.lock); if (!em) return 1; if (btrfs_test_opt(root, DEGRADED)) { free_extent_map(em); return 0; } map = (struct map_lookup *)em->bdev; for (i = 0; i < map->num_stripes; i++) { if (!map->stripes[i].dev->writeable) { readonly = 1; break; } } free_extent_map(em); return readonly; } void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { extent_map_tree_init(&tree->map_tree, GFP_NOFS); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) { struct extent_map *em; while (1) { write_lock(&tree->map_tree.lock); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); if (em) remove_extent_mapping(&tree->map_tree, em); write_unlock(&tree->map_tree.lock); if (!em) break; kfree(em->bdev); /* once for us */ free_extent_map(em); /* once for the tree */ free_extent_map(em); } } int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) { struct extent_map *em; struct map_lookup *map; struct extent_map_tree *em_tree = &map_tree->map_tree; int ret; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, len); read_unlock(&em_tree->lock); BUG_ON(!em); BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) ret = map->num_stripes; else if (map->type & BTRFS_BLOCK_GROUP_RAID10) ret = map->sub_stripes; else ret = 1; free_extent_map(em); return ret; } static int find_live_mirror(struct map_lookup *map, int first, int num, int optimal) { int i; if (map->stripes[optimal].dev->bdev) return optimal; for (i = first; i < first + num; i++) { if (map->stripes[i].dev->bdev) return i; } /* we couldn't find one that doesn't fail. Just return something * and the io error handling code will clean up eventually */ return optimal; } static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, struct btrfs_multi_bio **multi_ret, int mirror_num) { struct extent_map *em; struct map_lookup *map; struct extent_map_tree *em_tree = &map_tree->map_tree; u64 offset; u64 stripe_offset; u64 stripe_end_offset; u64 stripe_nr; u64 stripe_nr_orig; u64 stripe_nr_end; int stripes_allocated = 8; int stripes_required = 1; int stripe_index; int i; int num_stripes; int max_errors = 0; struct btrfs_multi_bio *multi = NULL; if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD))) stripes_allocated = 1; again: if (multi_ret) { multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), GFP_NOFS); if (!multi) return -ENOMEM; atomic_set(&multi->error, 0); } read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, *length); read_unlock(&em_tree->lock); if (!em) { printk(KERN_CRIT "unable to find logical %llu len %llu\n", (unsigned long long)logical, (unsigned long long)*length); BUG(); } BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; offset = logical - em->start; if (mirror_num > map->num_stripes) mirror_num = 0; /* if our multi bio struct is too small, back off and try again */ if (rw & REQ_WRITE) { if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) { stripes_required = map->num_stripes; max_errors = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { stripes_required = map->sub_stripes; max_errors = 1; } } if (rw & REQ_DISCARD) { if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID10)) { stripes_required = map->num_stripes; } } if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) && stripes_allocated < stripes_required) { stripes_allocated = map->num_stripes; free_extent_map(em); kfree(multi); goto again; } stripe_nr = offset; /* * stripe_nr counts the total number of stripes we have to stride * to get to this block */ do_div(stripe_nr, map->stripe_len); stripe_offset = stripe_nr * map->stripe_len; BUG_ON(offset < stripe_offset); /* stripe_offset is the offset of this block in its stripe*/ stripe_offset = offset - stripe_offset; if (rw & REQ_DISCARD) *length = min_t(u64, em->len - offset, *length); else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_DUP)) { /* we limit the length of each bio to what fits in a stripe */ *length = min_t(u64, em->len - offset, map->stripe_len - stripe_offset); } else { *length = em->len - offset; } if (!multi_ret) goto out; num_stripes = 1; stripe_index = 0; stripe_nr_orig = stripe_nr; stripe_nr_end = (offset + *length + map->stripe_len - 1) & (~(map->stripe_len - 1)); do_div(stripe_nr_end, map->stripe_len); stripe_end_offset = stripe_nr_end * map->stripe_len - (offset + *length); if (map->type & BTRFS_BLOCK_GROUP_RAID0) { if (rw & REQ_DISCARD) num_stripes = min_t(u64, map->num_stripes, stripe_nr_end - stripe_nr_orig); stripe_index = do_div(stripe_nr, map->num_stripes); } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { if (rw & (REQ_WRITE | REQ_DISCARD)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; else { stripe_index = find_live_mirror(map, 0, map->num_stripes, current->pid % map->num_stripes); } } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { if (rw & (REQ_WRITE | REQ_DISCARD)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { int factor = map->num_stripes / map->sub_stripes; stripe_index = do_div(stripe_nr, factor); stripe_index *= map->sub_stripes; if (rw & REQ_WRITE) num_stripes = map->sub_stripes; else if (rw & REQ_DISCARD) num_stripes = min_t(u64, map->sub_stripes * (stripe_nr_end - stripe_nr_orig), map->num_stripes); else if (mirror_num) stripe_index += mirror_num - 1; else { stripe_index = find_live_mirror(map, stripe_index, map->sub_stripes, stripe_index + current->pid % map->sub_stripes); } } else { /* * after this do_div call, stripe_nr is the number of stripes * on this device we have to walk to find the data, and * stripe_index is the number of our device in the stripe array */ stripe_index = do_div(stripe_nr, map->num_stripes); } BUG_ON(stripe_index >= map->num_stripes); if (rw & REQ_DISCARD) { for (i = 0; i < num_stripes; i++) { multi->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; multi->stripes[i].dev = map->stripes[stripe_index].dev; if (map->type & BTRFS_BLOCK_GROUP_RAID0) { u64 stripes; u32 last_stripe = 0; int j; div_u64_rem(stripe_nr_end - 1, map->num_stripes, &last_stripe); for (j = 0; j < map->num_stripes; j++) { u32 test; div_u64_rem(stripe_nr_end - 1 - j, map->num_stripes, &test); if (test == stripe_index) break; } stripes = stripe_nr_end - 1 - j; do_div(stripes, map->num_stripes); multi->stripes[i].length = map->stripe_len * (stripes - stripe_nr + 1); if (i == 0) { multi->stripes[i].length -= stripe_offset; stripe_offset = 0; } if (stripe_index == last_stripe) multi->stripes[i].length -= stripe_end_offset; } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { u64 stripes; int j; int factor = map->num_stripes / map->sub_stripes; u32 last_stripe = 0; div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); last_stripe *= map->sub_stripes; for (j = 0; j < factor; j++) { u32 test; div_u64_rem(stripe_nr_end - 1 - j, factor, &test); if (test == stripe_index / map->sub_stripes) break; } stripes = stripe_nr_end - 1 - j; do_div(stripes, factor); multi->stripes[i].length = map->stripe_len * (stripes - stripe_nr + 1); if (i < map->sub_stripes) { multi->stripes[i].length -= stripe_offset; if (i == map->sub_stripes - 1) stripe_offset = 0; } if (stripe_index >= last_stripe && stripe_index <= (last_stripe + map->sub_stripes - 1)) { multi->stripes[i].length -= stripe_end_offset; } } else multi->stripes[i].length = *length; stripe_index++; if (stripe_index == map->num_stripes) { /* This could only happen for RAID0/10 */ stripe_index = 0; stripe_nr++; } } } else { for (i = 0; i < num_stripes; i++) { multi->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; multi->stripes[i].dev = map->stripes[stripe_index].dev; stripe_index++; } } if (multi_ret) { *multi_ret = multi; multi->num_stripes = num_stripes; multi->max_errors = max_errors; } out: free_extent_map(em); return 0; } int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, struct btrfs_multi_bio **multi_ret, int mirror_num) { return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, mirror_num); } int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, u64 chunk_start, u64 physical, u64 devid, u64 **logical, int *naddrs, int *stripe_len) { struct extent_map_tree *em_tree = &map_tree->map_tree; struct extent_map *em; struct map_lookup *map; u64 *buf; u64 bytenr; u64 length; u64 stripe_nr; int i, j, nr = 0; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, chunk_start, 1); read_unlock(&em_tree->lock); BUG_ON(!em || em->start != chunk_start); map = (struct map_lookup *)em->bdev; length = em->len; if (map->type & BTRFS_BLOCK_GROUP_RAID10) do_div(length, map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) do_div(length, map->num_stripes); buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); BUG_ON(!buf); for (i = 0; i < map->num_stripes; i++) { if (devid && map->stripes[i].dev->devid != devid) continue; if (map->stripes[i].physical > physical || map->stripes[i].physical + length <= physical) continue; stripe_nr = physical - map->stripes[i].physical; do_div(stripe_nr, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID10) { stripe_nr = stripe_nr * map->num_stripes + i; do_div(stripe_nr, map->sub_stripes); } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { stripe_nr = stripe_nr * map->num_stripes + i; } bytenr = chunk_start + stripe_nr * map->stripe_len; WARN_ON(nr >= map->num_stripes); for (j = 0; j < nr; j++) { if (buf[j] == bytenr) break; } if (j == nr) { WARN_ON(nr >= map->num_stripes); buf[nr++] = bytenr; } } *logical = buf; *naddrs = nr; *stripe_len = map->stripe_len; free_extent_map(em); return 0; } static void end_bio_multi_stripe(struct bio *bio, int err) { struct btrfs_multi_bio *multi = bio->bi_private; int is_orig_bio = 0; if (err) atomic_inc(&multi->error); if (bio == multi->orig_bio) is_orig_bio = 1; if (atomic_dec_and_test(&multi->stripes_pending)) { if (!is_orig_bio) { bio_put(bio); bio = multi->orig_bio; } bio->bi_private = multi->private; bio->bi_end_io = multi->end_io; /* only send an error to the higher layers if it is * beyond the tolerance of the multi-bio */ if (atomic_read(&multi->error) > multi->max_errors) { err = -EIO; } else if (err) { /* * this bio is actually up to date, we didn't * go over the max number of errors */ set_bit(BIO_UPTODATE, &bio->bi_flags); err = 0; } kfree(multi); bio_endio(bio, err); } else if (!is_orig_bio) { bio_put(bio); } } struct async_sched { struct bio *bio; int rw; struct btrfs_fs_info *info; struct btrfs_work work; }; /* * see run_scheduled_bios for a description of why bios are collected for * async submit. * * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ static noinline int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { int should_queue = 1; struct btrfs_pending_bios *pending_bios; /* don't bother with additional async steps for reads, right now */ if (!(rw & REQ_WRITE)) { bio_get(bio); submit_bio(rw, bio); bio_put(bio); return 0; } /* * nr_async_bios allows us to reliably return congestion to the * higher layers. Otherwise, the async bio makes it appear we have * made progress against dirty pages when we've really just put it * on a queue for later */ atomic_inc(&root->fs_info->nr_async_bios); WARN_ON(bio->bi_next); bio->bi_next = NULL; bio->bi_rw |= rw; spin_lock(&device->io_lock); if (bio->bi_rw & REQ_SYNC) pending_bios = &device->pending_sync_bios; else pending_bios = &device->pending_bios; if (pending_bios->tail) pending_bios->tail->bi_next = bio; pending_bios->tail = bio; if (!pending_bios->head) pending_bios->head = bio; if (device->running_pending) should_queue = 0; spin_unlock(&device->io_lock); if (should_queue) btrfs_queue_worker(&root->fs_info->submit_workers, &device->work); return 0; } int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int mirror_num, int async_submit) { struct btrfs_mapping_tree *map_tree; struct btrfs_device *dev; struct bio *first_bio = bio; u64 logical = (u64)bio->bi_sector << 9; u64 length = 0; u64 map_length; struct btrfs_multi_bio *multi = NULL; int ret; int dev_nr = 0; int total_devs = 1; length = bio->bi_size; map_tree = &root->fs_info->mapping_tree; map_length = length; ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, mirror_num); BUG_ON(ret); total_devs = multi->num_stripes; if (map_length < length) { printk(KERN_CRIT "mapping failed logical %llu bio len %llu " "len %llu\n", (unsigned long long)logical, (unsigned long long)length, (unsigned long long)map_length); BUG(); } multi->end_io = first_bio->bi_end_io; multi->private = first_bio->bi_private; multi->orig_bio = first_bio; atomic_set(&multi->stripes_pending, multi->num_stripes); while (dev_nr < total_devs) { if (total_devs > 1) { if (dev_nr < total_devs - 1) { bio = bio_clone(first_bio, GFP_NOFS); BUG_ON(!bio); } else { bio = first_bio; } bio->bi_private = multi; bio->bi_end_io = end_bio_multi_stripe; } bio->bi_sector = multi->stripes[dev_nr].physical >> 9; dev = multi->stripes[dev_nr].dev; if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { bio->bi_bdev = dev->bdev; if (async_submit) schedule_bio(root, dev, rw, bio); else submit_bio(rw, bio); } else { bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; bio->bi_sector = logical >> 9; bio_endio(bio, -EIO); } dev_nr++; } if (total_devs == 1) kfree(multi); return 0; } struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, u8 *uuid, u8 *fsid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; cur_devices = root->fs_info->fs_devices; while (cur_devices) { if (!fsid || !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { device = __find_device(&cur_devices->devices, devid, uuid); if (device) return device; } cur_devices = cur_devices->seed; } return NULL; } static struct btrfs_device *add_missing_dev(struct btrfs_root *root, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) return NULL; list_add(&device->dev_list, &fs_devices->devices); device->dev_root = root->fs_info->dev_root; device->devid = devid; device->work.func = pending_bios_fn; device->fs_devices = fs_devices; device->missing = 1; fs_devices->num_devices++; fs_devices->missing_devices++; spin_lock_init(&device->io_lock); INIT_LIST_HEAD(&device->dev_alloc_list); memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); return device; } static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; u64 logical; u64 length; u64 devid; u8 uuid[BTRFS_UUID_SIZE]; int num_stripes; int ret; int i; logical = key->offset; length = btrfs_chunk_length(leaf, chunk); read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); read_unlock(&map_tree->map_tree.lock); /* already mapped? */ if (em && em->start <= logical && em->start + em->len > logical) { free_extent_map(em); return 0; } else if (em) { free_extent_map(em); } em = alloc_extent_map(GFP_NOFS); if (!em) return -ENOMEM; num_stripes = btrfs_chunk_num_stripes(leaf, chunk); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { free_extent_map(em); return -ENOMEM; } em->bdev = (struct block_device *)map; em->start = logical; em->len = length; em->block_start = 0; em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk); map->sector_size = btrfs_chunk_sector_size(leaf, chunk); map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map->type = btrfs_chunk_type(leaf, chunk); map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); for (i = 0; i < num_stripes; i++) { map->stripes[i].physical = btrfs_stripe_offset_nr(leaf, chunk, i); devid = btrfs_stripe_devid_nr(leaf, chunk, i); read_extent_buffer(leaf, uuid, (unsigned long) btrfs_stripe_dev_uuid_nr(chunk, i), BTRFS_UUID_SIZE); map->stripes[i].dev = btrfs_find_device(root, devid, uuid, NULL); if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { kfree(map); free_extent_map(em); return -EIO; } if (!map->stripes[i].dev) { map->stripes[i].dev = add_missing_dev(root, devid, uuid); if (!map->stripes[i].dev) { kfree(map); free_extent_map(em); return -EIO; } } map->stripes[i].dev->in_fs_metadata = 1; } write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em); write_unlock(&map_tree->map_tree.lock); BUG_ON(ret); free_extent_map(em); return 0; } static int fill_device_from_item(struct extent_buffer *leaf, struct btrfs_dev_item *dev_item, struct btrfs_device *device) { unsigned long ptr; device->devid = btrfs_device_id(leaf, dev_item); device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); device->total_bytes = device->disk_total_bytes; device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); device->type = btrfs_device_type(leaf, dev_item); device->io_align = btrfs_device_io_align(leaf, dev_item); device->io_width = btrfs_device_io_width(leaf, dev_item); device->sector_size = btrfs_device_sector_size(leaf, dev_item); ptr = (unsigned long)btrfs_device_uuid(dev_item); read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); return 0; } static int open_seed_devices(struct btrfs_root *root, u8 *fsid) { struct btrfs_fs_devices *fs_devices; int ret; mutex_lock(&uuid_mutex); fs_devices = root->fs_info->fs_devices->seed; while (fs_devices) { if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { ret = 0; goto out; } fs_devices = fs_devices->seed; } fs_devices = find_fsid(fsid); if (!fs_devices) { ret = -ENOENT; goto out; } fs_devices = clone_fs_devices(fs_devices); if (IS_ERR(fs_devices)) { ret = PTR_ERR(fs_devices); goto out; } ret = __btrfs_open_devices(fs_devices, FMODE_READ, root->fs_info->bdev_holder); if (ret) goto out; if (!fs_devices->seeding) { __btrfs_close_devices(fs_devices); free_fs_devices(fs_devices); ret = -EINVAL; goto out; } fs_devices->seed = root->fs_info->fs_devices->seed; root->fs_info->fs_devices->seed = fs_devices; out: mutex_unlock(&uuid_mutex); return ret; } static int read_one_dev(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_dev_item *dev_item) { struct btrfs_device *device; u64 devid; int ret; u8 fs_uuid[BTRFS_UUID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE); if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { ret = open_seed_devices(root, fs_uuid); if (ret && !btrfs_test_opt(root, DEGRADED)) return ret; } device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); if (!device || !device->bdev) { if (!btrfs_test_opt(root, DEGRADED)) return -EIO; if (!device) { printk(KERN_WARNING "warning devid %llu missing\n", (unsigned long long)devid); device = add_missing_dev(root, devid, dev_uuid); if (!device) return -ENOMEM; } else if (!device->missing) { /* * this happens when a device that was properly setup * in the device info lists suddenly goes bad. * device->bdev is NULL, and so we have to set * device->missing to one here */ root->fs_info->fs_devices->missing_devices++; device->missing = 1; } } if (device->fs_devices != root->fs_info->fs_devices) { BUG_ON(device->writeable); if (device->generation != btrfs_device_generation(leaf, dev_item)) return -EINVAL; } fill_device_from_item(leaf, dev_item, device); device->dev_root = root->fs_info->dev_root; device->in_fs_metadata = 1; if (device->writeable) device->fs_devices->total_rw_bytes += device->total_bytes; ret = 0; return ret; } int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) { struct btrfs_dev_item *dev_item; dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, dev_item); return read_one_dev(root, buf, dev_item); } int btrfs_read_sys_array(struct btrfs_root *root) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; struct extent_buffer *sb; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *ptr; unsigned long sb_ptr; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur; struct btrfs_key key; sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE); if (!sb) return -ENOMEM; btrfs_set_buffer_uptodate(sb); btrfs_set_buffer_lockdep_class(sb, 0); write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); cur = 0; while (cur < array_size) { disk_key = (struct btrfs_disk_key *)ptr; btrfs_disk_key_to_cpu(&key, disk_key); len = sizeof(*disk_key); ptr += len; sb_ptr += len; cur += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)sb_ptr; ret = read_one_chunk(root, &key, sb, chunk); if (ret) break; num_stripes = btrfs_chunk_num_stripes(sb, chunk); len = btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; } ptr += len; sb_ptr += len; cur += len; } free_extent_buffer(sb); return ret; } int btrfs_read_chunk_tree(struct btrfs_root *root) { struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; struct btrfs_key found_key; int ret; int slot; root = root->fs_info->chunk_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* first we search for all of the device items, and then we * read in all of the chunk items. This way we can create chunk * mappings that reference all of the devices that are afound */ key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = 0; again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto error; break; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) break; if (found_key.type == BTRFS_DEV_ITEM_KEY) { struct btrfs_dev_item *dev_item; dev_item = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); ret = read_one_dev(root, leaf, dev_item); if (ret) goto error; } } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); ret = read_one_chunk(root, &found_key, leaf, chunk); if (ret) goto error; } path->slots[0]++; } if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { key.objectid = 0; btrfs_release_path(root, path); goto again; } ret = 0; error: btrfs_free_path(path); return ret; }
gpl-2.0
kurainooni/rk30-kernel
drivers/char/tpm/tpm.c
1428
30343
/* * Copyright (C) 2004 IBM Corporation * * Authors: * Leendert van Doorn <leendert@watson.ibm.com> * Dave Safford <safford@watson.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * Note, the TPM chip is not interrupt driven (only polling) * and can have very long timeouts (minutes!). Hence the unusual * calls to msleep. * */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include "tpm.h" enum tpm_const { TPM_MINOR = 224, /* officially assigned */ TPM_BUFSIZE = 4096, TPM_NUM_DEVICES = 256, }; enum tpm_duration { TPM_SHORT = 0, TPM_MEDIUM = 1, TPM_LONG = 2, TPM_UNDEFINED, }; #define TPM_MAX_ORDINAL 243 #define TPM_MAX_PROTECTED_ORDINAL 12 #define TPM_PROTECTED_ORDINAL_MASK 0xFF /* * Bug workaround - some TPM's don't flush the most * recently changed pcr on suspend, so force the flush * with an extend to the selected _unused_ non-volatile pcr. */ static int tpm_suspend_pcr; module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644); MODULE_PARM_DESC(suspend_pcr, "PCR to use for dummy writes to faciltate flush on suspend."); static LIST_HEAD(tpm_chip_list); static DEFINE_SPINLOCK(driver_lock); static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); /* * Array with one entry per ordinal defining the maximum amount * of time the chip could take to return the result. The ordinal * designation of short, medium or long is defined in a table in * TCG Specification TPM Main Part 2 TPM Structures Section 17. The * values of the SHORT, MEDIUM, and LONG durations are retrieved * from the chip during initialization with a call to tpm_get_timeouts. */ static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = { TPM_UNDEFINED, /* 0 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 5 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 10 */ TPM_SHORT, }; static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = { TPM_UNDEFINED, /* 0 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 5 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 10 */ TPM_SHORT, TPM_MEDIUM, TPM_LONG, TPM_LONG, TPM_MEDIUM, /* 15 */ TPM_SHORT, TPM_SHORT, TPM_MEDIUM, TPM_LONG, TPM_SHORT, /* 20 */ TPM_SHORT, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, /* 25 */ TPM_SHORT, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 30 */ TPM_LONG, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 35 */ TPM_MEDIUM, TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 40 */ TPM_LONG, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 45 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_LONG, TPM_MEDIUM, /* 50 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 55 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 60 */ TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 65 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 70 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 75 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 80 */ TPM_UNDEFINED, TPM_MEDIUM, TPM_LONG, TPM_SHORT, TPM_UNDEFINED, /* 85 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 90 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 95 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 100 */ TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 105 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 110 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 115 */ TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 120 */ TPM_LONG, TPM_MEDIUM, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 125 */ TPM_SHORT, TPM_LONG, TPM_SHORT, TPM_SHORT, TPM_SHORT, /* 130 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_SHORT, TPM_MEDIUM, TPM_UNDEFINED, /* 135 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 140 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 145 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 150 */ TPM_MEDIUM, TPM_MEDIUM, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 155 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 160 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, /* 165 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_LONG, /* 170 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 175 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_MEDIUM, /* 180 */ TPM_SHORT, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, /* 185 */ TPM_SHORT, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 190 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 195 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 200 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 205 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_MEDIUM, /* 210 */ TPM_UNDEFINED, TPM_MEDIUM, TPM_MEDIUM, TPM_MEDIUM, TPM_UNDEFINED, /* 215 */ TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, TPM_SHORT, /* 220 */ TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_SHORT, TPM_UNDEFINED, /* 225 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 230 */ TPM_LONG, TPM_MEDIUM, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, /* 235 */ TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_UNDEFINED, TPM_SHORT, /* 240 */ TPM_UNDEFINED, TPM_MEDIUM, }; static void user_reader_timeout(unsigned long ptr) { struct tpm_chip *chip = (struct tpm_chip *) ptr; schedule_work(&chip->work); } static void timeout_work(struct work_struct *work) { struct tpm_chip *chip = container_of(work, struct tpm_chip, work); mutex_lock(&chip->buffer_mutex); atomic_set(&chip->data_pending, 0); memset(chip->data_buffer, 0, TPM_BUFSIZE); mutex_unlock(&chip->buffer_mutex); } /* * Returns max number of jiffies to wait */ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) { int duration_idx = TPM_UNDEFINED; int duration = 0; if (ordinal < TPM_MAX_ORDINAL) duration_idx = tpm_ordinal_duration[ordinal]; else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) < TPM_MAX_PROTECTED_ORDINAL) duration_idx = tpm_protected_ordinal_duration[ordinal & TPM_PROTECTED_ORDINAL_MASK]; if (duration_idx != TPM_UNDEFINED) duration = chip->vendor.duration[duration_idx]; if (duration <= 0) return 2 * 60 * HZ; else return duration; } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); /* * Internal kernel interface to transmit TPM commands */ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, size_t bufsiz) { ssize_t rc; u32 count, ordinal; unsigned long stop; if (bufsiz > TPM_BUFSIZE) bufsiz = TPM_BUFSIZE; count = be32_to_cpu(*((__be32 *) (buf + 2))); ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); if (count == 0) return -ENODATA; if (count > bufsiz) { dev_err(chip->dev, "invalid count value %x %zx \n", count, bufsiz); return -E2BIG; } mutex_lock(&chip->tpm_mutex); if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) { dev_err(chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; } if (chip->vendor.irq) goto out_recv; stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal); do { u8 status = chip->vendor.status(chip); if ((status & chip->vendor.req_complete_mask) == chip->vendor.req_complete_val) goto out_recv; if ((status == chip->vendor.req_canceled)) { dev_err(chip->dev, "Operation Canceled\n"); rc = -ECANCELED; goto out; } msleep(TPM_TIMEOUT); /* CHECK */ rmb(); } while (time_before(jiffies, stop)); chip->vendor.cancel(chip); dev_err(chip->dev, "Operation Timed out\n"); rc = -ETIME; goto out; out_recv: rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz); if (rc < 0) dev_err(chip->dev, "tpm_transmit: tpm_recv: error %zd\n", rc); out: mutex_unlock(&chip->tpm_mutex); return rc; } #define TPM_DIGEST_SIZE 20 #define TPM_ERROR_SIZE 10 #define TPM_RET_CODE_IDX 6 enum tpm_capabilities { TPM_CAP_FLAG = cpu_to_be32(4), TPM_CAP_PROP = cpu_to_be32(5), CAP_VERSION_1_1 = cpu_to_be32(0x06), CAP_VERSION_1_2 = cpu_to_be32(0x1A) }; enum tpm_sub_capabilities { TPM_CAP_PROP_PCR = cpu_to_be32(0x101), TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103), TPM_CAP_FLAG_PERM = cpu_to_be32(0x108), TPM_CAP_FLAG_VOL = cpu_to_be32(0x109), TPM_CAP_PROP_OWNER = cpu_to_be32(0x111), TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115), TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120), }; static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd, int len, const char *desc) { int err; len = tpm_transmit(chip,(u8 *) cmd, len); if (len < 0) return len; if (len == TPM_ERROR_SIZE) { err = be32_to_cpu(cmd->header.out.return_code); dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc); return err; } return 0; } #define TPM_INTERNAL_RESULT_SIZE 200 #define TPM_TAG_RQU_COMMAND cpu_to_be16(193) #define TPM_ORD_GET_CAP cpu_to_be32(101) static const struct tpm_input_header tpm_getcap_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(22), .ordinal = TPM_ORD_GET_CAP }; ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap, const char *desc) { struct tpm_cmd_t tpm_cmd; int rc; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_cmd.header.in = tpm_getcap_header; if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) { tpm_cmd.params.getcap_in.cap = subcap_id; /*subcap field not necessary */ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0); tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32)); } else { if (subcap_id == TPM_CAP_FLAG_PERM || subcap_id == TPM_CAP_FLAG_VOL) tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG; else tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = subcap_id; } rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc); if (!rc) *cap = tpm_cmd.params.getcap_out.cap; return rc; } void tpm_gen_interrupt(struct tpm_chip *chip) { struct tpm_cmd_t tpm_cmd; ssize_t rc; tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, "attempting to determine the timeouts"); } EXPORT_SYMBOL_GPL(tpm_gen_interrupt); void tpm_get_timeouts(struct tpm_chip *chip) { struct tpm_cmd_t tpm_cmd; struct timeout_t *timeout_cap; struct duration_t *duration_cap; ssize_t rc; u32 timeout; tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT; rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, "attempting to determine the timeouts"); if (rc) goto duration; if (be32_to_cpu(tpm_cmd.header.out.length) != 4 * sizeof(u32)) goto duration; timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; /* Don't overwrite default if value is 0 */ timeout = be32_to_cpu(timeout_cap->a); if (timeout) chip->vendor.timeout_a = usecs_to_jiffies(timeout); timeout = be32_to_cpu(timeout_cap->b); if (timeout) chip->vendor.timeout_b = usecs_to_jiffies(timeout); timeout = be32_to_cpu(timeout_cap->c); if (timeout) chip->vendor.timeout_c = usecs_to_jiffies(timeout); timeout = be32_to_cpu(timeout_cap->d); if (timeout) chip->vendor.timeout_d = usecs_to_jiffies(timeout); duration: tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION; rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, "attempting to determine the durations"); if (rc) return; if (be32_to_cpu(tpm_cmd.header.out.return_code) != 3 * sizeof(u32)) return; duration_cap = &tpm_cmd.params.getcap_out.cap.duration; chip->vendor.duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above * value wrong and apparently reports msecs rather than usecs. So we * fix up the resulting too-small TPM_SHORT value to make things work. */ if (chip->vendor.duration[TPM_SHORT] < (HZ/100)) chip->vendor.duration[TPM_SHORT] = HZ; chip->vendor.duration[TPM_MEDIUM] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); chip->vendor.duration[TPM_LONG] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); } EXPORT_SYMBOL_GPL(tpm_get_timeouts); void tpm_continue_selftest(struct tpm_chip *chip) { u8 data[] = { 0, 193, /* TPM_TAG_RQU_COMMAND */ 0, 0, 0, 10, /* length */ 0, 0, 0, 83, /* TPM_ORD_GetCapability */ }; tpm_transmit(chip, data, sizeof(data)); } EXPORT_SYMBOL_GPL(tpm_continue_selftest); ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, "attempting to determine the permanent enabled state"); if (rc) return 0; rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); return rc; } EXPORT_SYMBOL_GPL(tpm_show_enabled); ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap, "attempting to determine the permanent active state"); if (rc) return 0; rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated); return rc; } EXPORT_SYMBOL_GPL(tpm_show_active); ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap, "attempting to determine the owner state"); if (rc) return 0; rc = sprintf(buf, "%d\n", cap.owned); return rc; } EXPORT_SYMBOL_GPL(tpm_show_owned); ssize_t tpm_show_temp_deactivated(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap, "attempting to determine the temporary state"); if (rc) return 0; rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated); return rc; } EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated); /* * tpm_chip_find_get - return tpm_chip for given chip number */ static struct tpm_chip *tpm_chip_find_get(int chip_num) { struct tpm_chip *pos, *chip = NULL; rcu_read_lock(); list_for_each_entry_rcu(pos, &tpm_chip_list, list) { if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) continue; if (try_module_get(pos->dev->driver->owner)) { chip = pos; break; } } rcu_read_unlock(); return chip; } #define TPM_ORDINAL_PCRREAD cpu_to_be32(21) #define READ_PCR_RESULT_SIZE 30 static struct tpm_input_header pcrread_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(14), .ordinal = TPM_ORDINAL_PCRREAD }; int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) { int rc; struct tpm_cmd_t cmd; cmd.header.in = pcrread_header; cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx); rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, "attempting to read a pcr value"); if (rc == 0) memcpy(res_buf, cmd.params.pcrread_out.pcr_result, TPM_DIGEST_SIZE); return rc; } /** * tpm_pcr_read - read a pcr value * @chip_num: tpm idx # or ANY * @pcr_idx: pcr idx to retrieve * @res_buf: TPM_PCR value * size of res_buf is 20 bytes (or NULL if you don't care) * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing * the module usage count. */ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { struct tpm_chip *chip; int rc; chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; rc = __tpm_pcr_read(chip, pcr_idx, res_buf); tpm_chip_put(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); /** * tpm_pcr_extend - extend pcr value with hash * @chip_num: tpm idx # or AN& * @pcr_idx: pcr idx to extend * @hash: hash value used to extend pcr value * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing * the module usage count. */ #define TPM_ORD_PCR_EXTEND cpu_to_be32(20) #define EXTEND_PCR_RESULT_SIZE 34 static struct tpm_input_header pcrextend_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(34), .ordinal = TPM_ORD_PCR_EXTEND }; int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { struct tpm_cmd_t cmd; int rc; struct tpm_chip *chip; chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; cmd.header.in = pcrextend_header; cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE); rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, "attempting extend a PCR value"); tpm_chip_put(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_extend); int tpm_send(u32 chip_num, void *cmd, size_t buflen) { struct tpm_chip *chip; int rc; chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd"); tpm_chip_put(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_send); ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; u8 digest[TPM_DIGEST_SIZE]; ssize_t rc; int i, j, num_pcrs; char *str = buf; struct tpm_chip *chip = dev_get_drvdata(dev); rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap, "attempting to determine the number of PCRS"); if (rc) return 0; num_pcrs = be32_to_cpu(cap.num_pcrs); for (i = 0; i < num_pcrs; i++) { rc = __tpm_pcr_read(chip, i, digest); if (rc) break; str += sprintf(str, "PCR-%02d: ", i); for (j = 0; j < TPM_DIGEST_SIZE; j++) str += sprintf(str, "%02X ", digest[j]); str += sprintf(str, "\n"); } return str - buf; } EXPORT_SYMBOL_GPL(tpm_show_pcrs); #define READ_PUBEK_RESULT_SIZE 314 #define TPM_ORD_READPUBEK cpu_to_be32(124) struct tpm_input_header tpm_readpubek_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(30), .ordinal = TPM_ORD_READPUBEK }; ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, char *buf) { u8 *data; struct tpm_cmd_t tpm_cmd; ssize_t err; int i, rc; char *str = buf; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_cmd.header.in = tpm_readpubek_header; err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, "attempting to read the PUBEK"); if (err) goto out; /* ignore header 10 bytes algorithm 32 bits (1 == RSA ) encscheme 16 bits sigscheme 16 bits parameters (RSA 12->bytes: keybit, #primes, expbit) keylenbytes 32 bits 256 byte modulus ignore checksum 20 bytes */ data = tpm_cmd.params.readpubek_out_buffer; str += sprintf(str, "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" " %02X %02X %02X %02X %02X %02X %02X %02X\n" "Modulus length: %d\nModulus: \n", data[10], data[11], data[12], data[13], data[14], data[15], data[16], data[17], data[22], data[23], data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32], data[33], be32_to_cpu(*((__be32 *) (data + 34)))); for (i = 0; i < 256; i++) { str += sprintf(str, "%02X ", data[i + 38]); if ((i + 1) % 16 == 0) str += sprintf(str, "\n"); } out: rc = str - buf; return rc; } EXPORT_SYMBOL_GPL(tpm_show_pubek); ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; ssize_t rc; char *str = buf; rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, "attempting to determine the manufacturer"); if (rc) return 0; str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, "attempting to determine the 1.1 version"); if (rc) return 0; str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", cap.tpm_version.Major, cap.tpm_version.Minor, cap.tpm_version.revMajor, cap.tpm_version.revMinor); return str - buf; } EXPORT_SYMBOL_GPL(tpm_show_caps); ssize_t tpm_show_caps_1_2(struct device * dev, struct device_attribute * attr, char *buf) { cap_t cap; ssize_t rc; char *str = buf; rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, "attempting to determine the manufacturer"); if (rc) return 0; str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version"); if (rc) return 0; str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor, cap.tpm_version_1_2.revMajor, cap.tpm_version_1_2.revMinor); return str - buf; } EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) return 0; chip->vendor.cancel(chip); return count; } EXPORT_SYMBOL_GPL(tpm_store_cancel); /* * Device file system interface to the TPM * * It's assured that the chip will be opened just once, * by the check of is_open variable, which is protected * by driver_lock. */ int tpm_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct tpm_chip *chip = NULL, *pos; rcu_read_lock(); list_for_each_entry_rcu(pos, &tpm_chip_list, list) { if (pos->vendor.miscdev.minor == minor) { chip = pos; get_device(chip->dev); break; } } rcu_read_unlock(); if (!chip) return -ENODEV; if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(chip->dev, "Another process owns this TPM\n"); put_device(chip->dev); return -EBUSY; } chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); put_device(chip->dev); return -ENOMEM; } atomic_set(&chip->data_pending, 0); file->private_data = chip; return 0; } EXPORT_SYMBOL_GPL(tpm_open); /* * Called on file close */ int tpm_release(struct inode *inode, struct file *file) { struct tpm_chip *chip = file->private_data; del_singleshot_timer_sync(&chip->user_read_timer); flush_work_sync(&chip->work); file->private_data = NULL; atomic_set(&chip->data_pending, 0); kfree(chip->data_buffer); clear_bit(0, &chip->is_open); put_device(chip->dev); return 0; } EXPORT_SYMBOL_GPL(tpm_release); ssize_t tpm_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; size_t in_size = size, out_size; /* cannot perform a write until the read has cleared either via tpm_read or a user_read_timer timeout */ while (atomic_read(&chip->data_pending) != 0) msleep(TPM_TIMEOUT); mutex_lock(&chip->buffer_mutex); if (in_size > TPM_BUFSIZE) in_size = TPM_BUFSIZE; if (copy_from_user (chip->data_buffer, (void __user *) buf, in_size)) { mutex_unlock(&chip->buffer_mutex); return -EFAULT; } /* atomic tpm command send and result receive */ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); atomic_set(&chip->data_pending, out_size); mutex_unlock(&chip->buffer_mutex); /* Set a timeout by which the reader must come claim the result */ mod_timer(&chip->user_read_timer, jiffies + (60 * HZ)); return in_size; } EXPORT_SYMBOL_GPL(tpm_write); ssize_t tpm_read(struct file *file, char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; ssize_t ret_size; int rc; del_singleshot_timer_sync(&chip->user_read_timer); flush_work_sync(&chip->work); ret_size = atomic_read(&chip->data_pending); atomic_set(&chip->data_pending, 0); if (ret_size > 0) { /* relay data */ if (size < ret_size) ret_size = size; mutex_lock(&chip->buffer_mutex); rc = copy_to_user(buf, chip->data_buffer, ret_size); memset(chip->data_buffer, 0, ret_size); if (rc) ret_size = -EFAULT; mutex_unlock(&chip->buffer_mutex); } return ret_size; } EXPORT_SYMBOL_GPL(tpm_read); void tpm_remove_hardware(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) { dev_err(dev, "No device data found\n"); return; } spin_lock(&driver_lock); list_del_rcu(&chip->list); spin_unlock(&driver_lock); synchronize_rcu(); misc_deregister(&chip->vendor.miscdev); sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); tpm_bios_log_teardown(chip->bios_dir); /* write it this way to be explicit (chip->dev == dev) */ put_device(chip->dev); } EXPORT_SYMBOL_GPL(tpm_remove_hardware); #define TPM_ORD_SAVESTATE cpu_to_be32(152) #define SAVESTATE_RESULT_SIZE 10 static struct tpm_input_header savestate_header = { .tag = TPM_TAG_RQU_COMMAND, .length = cpu_to_be32(10), .ordinal = TPM_ORD_SAVESTATE }; /* * We are about to suspend. Save the TPM state * so that it can be restored. */ int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) { struct tpm_chip *chip = dev_get_drvdata(dev); struct tpm_cmd_t cmd; int rc; u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; if (chip == NULL) return -ENODEV; /* for buggy tpm, flush pcrs with extend to selected dummy */ if (tpm_suspend_pcr) { cmd.header.in = pcrextend_header; cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr); memcpy(cmd.params.pcrextend_in.hash, dummy_hash, TPM_DIGEST_SIZE); rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, "extending dummy pcr before suspend"); } /* now do the actual savestate */ cmd.header.in = savestate_header; rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, "sending savestate before suspend"); return rc; } EXPORT_SYMBOL_GPL(tpm_pm_suspend); /* * Resume from a power safe. The BIOS already restored * the TPM state. */ int tpm_pm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip == NULL) return -ENODEV; return 0; } EXPORT_SYMBOL_GPL(tpm_pm_resume); /* In case vendor provided release function, call it too.*/ void tpm_dev_vendor_release(struct tpm_chip *chip) { if (chip->vendor.release) chip->vendor.release(chip->dev); clear_bit(chip->dev_num, dev_mask); kfree(chip->vendor.miscdev.name); } EXPORT_SYMBOL_GPL(tpm_dev_vendor_release); /* * Once all references to platform device are down to 0, * release all allocated structures. */ void tpm_dev_release(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); tpm_dev_vendor_release(chip); chip->release(dev); kfree(chip); } EXPORT_SYMBOL_GPL(tpm_dev_release); /* * Called from tpm_<specific>.c probe function only for devices * the driver has determined it should claim. Prior to calling * this function the specific probe function has called pci_enable_device * upon errant exit from this function specific probe function should call * pci_disable_device */ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vendor_specific *entry) { #define DEVNAME_SIZE 7 char *devname; struct tpm_chip *chip; /* Driver specific per-device data */ chip = kzalloc(sizeof(*chip), GFP_KERNEL); devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL); if (chip == NULL || devname == NULL) goto out_free; mutex_init(&chip->buffer_mutex); mutex_init(&chip->tpm_mutex); INIT_LIST_HEAD(&chip->list); INIT_WORK(&chip->work, timeout_work); setup_timer(&chip->user_read_timer, user_reader_timeout, (unsigned long)chip); memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific)); chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); if (chip->dev_num >= TPM_NUM_DEVICES) { dev_err(dev, "No available tpm device numbers\n"); goto out_free; } else if (chip->dev_num == 0) chip->vendor.miscdev.minor = TPM_MINOR; else chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR; set_bit(chip->dev_num, dev_mask); scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); chip->vendor.miscdev.name = devname; chip->vendor.miscdev.parent = dev; chip->dev = get_device(dev); chip->release = dev->release; dev->release = tpm_dev_release; dev_set_drvdata(dev, chip); if (misc_register(&chip->vendor.miscdev)) { dev_err(chip->dev, "unable to misc_register %s, minor %d\n", chip->vendor.miscdev.name, chip->vendor.miscdev.minor); put_device(chip->dev); return NULL; } if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { misc_deregister(&chip->vendor.miscdev); put_device(chip->dev); return NULL; } chip->bios_dir = tpm_bios_log_setup(devname); /* Make chip available */ spin_lock(&driver_lock); list_add_rcu(&chip->list, &tpm_chip_list); spin_unlock(&driver_lock); return chip; out_free: kfree(chip); kfree(devname); return NULL; } EXPORT_SYMBOL_GPL(tpm_register_hardware); MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
matianfu/barcelona-4.3.3
drivers/scsi/fcoe/fcoe_sysfs.c
1940
26564
/* * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/etherdevice.h> #include <linux/ctype.h> #include <scsi/fcoe_sysfs.h> #include <scsi/libfcoe.h> /* * OK to include local libfcoe.h for debug_logging, but cannot include * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have * have to include more than fcoe_sysfs.h. */ #include "libfcoe.h" static atomic_t ctlr_num; static atomic_t fcf_num; /* * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs * should insulate the loss of a fcf. */ static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */ module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fcf_dev_loss_tmo, "Maximum number of seconds that libfcoe should" " insulate the loss of a fcf. Once this value is" " exceeded, the fcf is removed."); /* * These are used by the fcoe_*_show_function routines, they * are intentionally placed in the .c file as they're not intended * for use throughout the code. */ #define fcoe_ctlr_id(x) \ ((x)->id) #define fcoe_ctlr_work_q_name(x) \ ((x)->work_q_name) #define fcoe_ctlr_work_q(x) \ ((x)->work_q) #define fcoe_ctlr_devloss_work_q_name(x) \ ((x)->devloss_work_q_name) #define fcoe_ctlr_devloss_work_q(x) \ ((x)->devloss_work_q) #define fcoe_ctlr_mode(x) \ ((x)->mode) #define fcoe_ctlr_fcf_dev_loss_tmo(x) \ ((x)->fcf_dev_loss_tmo) #define fcoe_ctlr_link_fail(x) \ ((x)->lesb.lesb_link_fail) #define fcoe_ctlr_vlink_fail(x) \ ((x)->lesb.lesb_vlink_fail) #define fcoe_ctlr_miss_fka(x) \ ((x)->lesb.lesb_miss_fka) #define fcoe_ctlr_symb_err(x) \ ((x)->lesb.lesb_symb_err) #define fcoe_ctlr_err_block(x) \ ((x)->lesb.lesb_err_block) #define fcoe_ctlr_fcs_error(x) \ ((x)->lesb.lesb_fcs_error) #define fcoe_ctlr_enabled(x) \ ((x)->enabled) #define fcoe_fcf_state(x) \ ((x)->state) #define fcoe_fcf_fabric_name(x) \ ((x)->fabric_name) #define fcoe_fcf_switch_name(x) \ ((x)->switch_name) #define fcoe_fcf_fc_map(x) \ ((x)->fc_map) #define fcoe_fcf_vfid(x) \ ((x)->vfid) #define fcoe_fcf_mac(x) \ ((x)->mac) #define fcoe_fcf_priority(x) \ ((x)->priority) #define fcoe_fcf_fka_period(x) \ ((x)->fka_period) #define fcoe_fcf_dev_loss_tmo(x) \ ((x)->dev_loss_tmo) #define fcoe_fcf_selected(x) \ ((x)->selected) #define fcoe_fcf_vlan_id(x) \ ((x)->vlan_id) /* * dev_loss_tmo attribute */ static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val) { int ret; ret = kstrtoul(buf, 0, val); if (ret) return -EINVAL; /* * Check for overflow; dev_loss_tmo is u32 */ if (*val > UINT_MAX) return -EINVAL; return 0; } static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf, unsigned long val) { if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) || (fcf->state == FCOE_FCF_STATE_DISCONNECTED) || (fcf->state == FCOE_FCF_STATE_DELETED)) return -EBUSY; /* * Check for overflow; dev_loss_tmo is u32 */ if (val > UINT_MAX) return -EINVAL; fcoe_fcf_dev_loss_tmo(fcf) = val; return 0; } #define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute device_attr_fcoe_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) #define fcoe_ctlr_show_function(field, format_string, sz, cast) \ static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ if (ctlr->f->get_fcoe_ctlr_##field) \ ctlr->f->get_fcoe_ctlr_##field(ctlr); \ return snprintf(buf, sz, format_string, \ cast fcoe_ctlr_##field(ctlr)); \ } #define fcoe_fcf_show_function(field, format_string, sz, cast) \ static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \ if (ctlr->f->get_fcoe_fcf_##field) \ ctlr->f->get_fcoe_fcf_##field(fcf); \ return snprintf(buf, sz, format_string, \ cast fcoe_fcf_##field(fcf)); \ } #define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \ static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \ } #define fcoe_fcf_private_show_function(field, format_string, sz, cast) \ static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \ } #define fcoe_ctlr_private_rd_attr(field, format_string, sz) \ fcoe_ctlr_private_show_function(field, format_string, sz, ) \ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ show_fcoe_ctlr_device_##field, NULL) #define fcoe_ctlr_rd_attr(field, format_string, sz) \ fcoe_ctlr_show_function(field, format_string, sz, ) \ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ show_fcoe_ctlr_device_##field, NULL) #define fcoe_fcf_rd_attr(field, format_string, sz) \ fcoe_fcf_show_function(field, format_string, sz, ) \ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ show_fcoe_fcf_device_##field, NULL) #define fcoe_fcf_private_rd_attr(field, format_string, sz) \ fcoe_fcf_private_show_function(field, format_string, sz, ) \ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ show_fcoe_fcf_device_##field, NULL) #define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \ fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ show_fcoe_ctlr_device_##field, NULL) #define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \ fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ show_fcoe_fcf_device_##field, NULL) #define fcoe_enum_name_search(title, table_type, table) \ static const char *get_fcoe_##title##_name(enum table_type table_key) \ { \ if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \ return NULL; \ return table[table_key]; \ } static char *fip_conn_type_names[] = { [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown", [ FIP_CONN_TYPE_FABRIC ] = "Fabric", [ FIP_CONN_TYPE_VN2VN ] = "VN2VN", }; fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) static enum fip_conn_type fcoe_parse_mode(const char *buf) { int i; for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) { if (strcasecmp(buf, fip_conn_type_names[i]) == 0) return i; } return FIP_CONN_TYPE_UNKNOWN; } static char *fcf_state_names[] = { [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown", [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected", [ FCOE_FCF_STATE_CONNECTED ] = "Connected", }; fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) #define FCOE_FCF_STATE_MAX_NAMELEN 50 static ssize_t show_fcf_state(struct device *dev, struct device_attribute *attr, char *buf) { struct fcoe_fcf_device *fcf = dev_to_fcf(dev); const char *name; name = get_fcoe_fcf_state_name(fcf->state); if (!name) return -EINVAL; return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name); } static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); #define FCOE_MAX_MODENAME_LEN 20 static ssize_t show_ctlr_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); const char *name; name = get_fcoe_ctlr_mode_name(ctlr->mode); if (!name) return -EINVAL; return snprintf(buf, FCOE_MAX_MODENAME_LEN, "%s\n", name); } static ssize_t store_ctlr_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); char mode[FCOE_MAX_MODENAME_LEN + 1]; if (count > FCOE_MAX_MODENAME_LEN) return -EINVAL; strncpy(mode, buf, count); if (mode[count - 1] == '\n') mode[count - 1] = '\0'; else mode[count] = '\0'; switch (ctlr->enabled) { case FCOE_CTLR_ENABLED: LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); return -EBUSY; case FCOE_CTLR_DISABLED: if (!ctlr->f->set_fcoe_ctlr_mode) { LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported by LLD.\n"); return -ENOTSUPP; } ctlr->mode = fcoe_parse_mode(mode); if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", buf); return -EINVAL; } ctlr->f->set_fcoe_ctlr_mode(ctlr); LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); return count; case FCOE_CTLR_UNUSED: default: LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); return -ENOTSUPP; }; } static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR, show_ctlr_mode, store_ctlr_mode); static ssize_t store_ctlr_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); int rc; switch (ctlr->enabled) { case FCOE_CTLR_ENABLED: if (*buf == '1') return count; ctlr->enabled = FCOE_CTLR_DISABLED; break; case FCOE_CTLR_DISABLED: if (*buf == '0') return count; ctlr->enabled = FCOE_CTLR_ENABLED; break; case FCOE_CTLR_UNUSED: return -ENOTSUPP; }; rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr); if (rc) return rc; return count; } static char *ctlr_enabled_state_names[] = { [ FCOE_CTLR_ENABLED ] = "1", [ FCOE_CTLR_DISABLED ] = "0", }; fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state, ctlr_enabled_state_names) #define FCOE_CTLR_ENABLED_MAX_NAMELEN 50 static ssize_t show_ctlr_enabled_state(struct device *dev, struct device_attribute *attr, char *buf) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); const char *name; name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled); if (!name) return -EINVAL; return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN, "%s\n", name); } static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, show_ctlr_enabled_state, store_ctlr_enabled); static ssize_t store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); struct fcoe_fcf_device *fcf; unsigned long val; int rc; rc = fcoe_str_to_dev_loss(buf, &val); if (rc) return rc; fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val; mutex_lock(&ctlr->lock); list_for_each_entry(fcf, &ctlr->fcfs, peers) fcoe_fcf_set_dev_loss_tmo(fcf, val); mutex_unlock(&ctlr->lock); return count; } fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, ); static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR, show_fcoe_ctlr_device_fcf_dev_loss_tmo, store_private_fcoe_ctlr_fcf_dev_loss_tmo); /* Link Error Status Block (LESB) */ fcoe_ctlr_rd_attr(link_fail, "%u\n", 20); fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20); fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20); fcoe_ctlr_rd_attr(symb_err, "%u\n", 20); fcoe_ctlr_rd_attr(err_block, "%u\n", 20); fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20); fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long); fcoe_fcf_private_rd_attr(priority, "%u\n", 20); fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20); fcoe_fcf_private_rd_attr(vfid, "%u\n", 20); fcoe_fcf_private_rd_attr(mac, "%pM\n", 20); fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20); fcoe_fcf_rd_attr(selected, "%u\n", 20); fcoe_fcf_rd_attr(vlan_id, "%u\n", 20); fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, ) static ssize_t store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fcoe_fcf_device *fcf = dev_to_fcf(dev); unsigned long val; int rc; rc = fcoe_str_to_dev_loss(buf, &val); if (rc) return rc; rc = fcoe_fcf_set_dev_loss_tmo(fcf, val); if (rc) return rc; return count; } static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR, show_fcoe_fcf_device_dev_loss_tmo, store_fcoe_fcf_dev_loss_tmo); static struct attribute *fcoe_ctlr_lesb_attrs[] = { &device_attr_fcoe_ctlr_link_fail.attr, &device_attr_fcoe_ctlr_vlink_fail.attr, &device_attr_fcoe_ctlr_miss_fka.attr, &device_attr_fcoe_ctlr_symb_err.attr, &device_attr_fcoe_ctlr_err_block.attr, &device_attr_fcoe_ctlr_fcs_error.attr, NULL, }; static struct attribute_group fcoe_ctlr_lesb_attr_group = { .name = "lesb", .attrs = fcoe_ctlr_lesb_attrs, }; static struct attribute *fcoe_ctlr_attrs[] = { &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, &device_attr_fcoe_ctlr_enabled.attr, &device_attr_fcoe_ctlr_mode.attr, NULL, }; static struct attribute_group fcoe_ctlr_attr_group = { .attrs = fcoe_ctlr_attrs, }; static const struct attribute_group *fcoe_ctlr_attr_groups[] = { &fcoe_ctlr_attr_group, &fcoe_ctlr_lesb_attr_group, NULL, }; static struct attribute *fcoe_fcf_attrs[] = { &device_attr_fcoe_fcf_fabric_name.attr, &device_attr_fcoe_fcf_switch_name.attr, &device_attr_fcoe_fcf_dev_loss_tmo.attr, &device_attr_fcoe_fcf_fc_map.attr, &device_attr_fcoe_fcf_vfid.attr, &device_attr_fcoe_fcf_mac.attr, &device_attr_fcoe_fcf_priority.attr, &device_attr_fcoe_fcf_fka_period.attr, &device_attr_fcoe_fcf_state.attr, &device_attr_fcoe_fcf_selected.attr, &device_attr_fcoe_fcf_vlan_id.attr, NULL }; static struct attribute_group fcoe_fcf_attr_group = { .attrs = fcoe_fcf_attrs, }; static const struct attribute_group *fcoe_fcf_attr_groups[] = { &fcoe_fcf_attr_group, NULL, }; static struct bus_type fcoe_bus_type; static int fcoe_bus_match(struct device *dev, struct device_driver *drv) { if (dev->bus == &fcoe_bus_type) return 1; return 0; } /** * fcoe_ctlr_device_release() - Release the FIP ctlr memory * @dev: Pointer to the FIP ctlr's embedded device * * Called when the last FIP ctlr reference is released. */ static void fcoe_ctlr_device_release(struct device *dev) { struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); kfree(ctlr); } /** * fcoe_fcf_device_release() - Release the FIP fcf memory * @dev: Pointer to the fcf's embedded device * * Called when the last FIP fcf reference is released. */ static void fcoe_fcf_device_release(struct device *dev) { struct fcoe_fcf_device *fcf = dev_to_fcf(dev); kfree(fcf); } static struct device_type fcoe_ctlr_device_type = { .name = "fcoe_ctlr", .groups = fcoe_ctlr_attr_groups, .release = fcoe_ctlr_device_release, }; static struct device_type fcoe_fcf_device_type = { .name = "fcoe_fcf", .groups = fcoe_fcf_attr_groups, .release = fcoe_fcf_device_release, }; static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store); static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store); static struct attribute *fcoe_bus_attrs[] = { &bus_attr_ctlr_create.attr, &bus_attr_ctlr_destroy.attr, NULL, }; ATTRIBUTE_GROUPS(fcoe_bus); static struct bus_type fcoe_bus_type = { .name = "fcoe", .match = &fcoe_bus_match, .bus_groups = fcoe_bus_groups, }; /** * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed */ static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) { if (!fcoe_ctlr_work_q(ctlr)) { printk(KERN_ERR "ERROR: FIP Ctlr '%d' attempted to flush work, " "when no workqueue created.\n", ctlr->id); dump_stack(); return; } flush_workqueue(fcoe_ctlr_work_q(ctlr)); } /** * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue * @work: Work to queue for execution * * Return value: * 1 on success / 0 already queued / < 0 for error */ static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, struct work_struct *work) { if (unlikely(!fcoe_ctlr_work_q(ctlr))) { printk(KERN_ERR "ERROR: FIP Ctlr '%d' attempted to queue work, " "when no workqueue created.\n", ctlr->id); dump_stack(); return -EINVAL; } return queue_work(fcoe_ctlr_work_q(ctlr), work); } /** * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed */ static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) { if (!fcoe_ctlr_devloss_work_q(ctlr)) { printk(KERN_ERR "ERROR: FIP Ctlr '%d' attempted to flush work, " "when no workqueue created.\n", ctlr->id); dump_stack(); return; } flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr)); } /** * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue * @work: Work to queue for execution * @delay: jiffies to delay the work queuing * * Return value: * 1 on success / 0 already queued / < 0 for error */ static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, struct delayed_work *work, unsigned long delay) { if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { printk(KERN_ERR "ERROR: FIP Ctlr '%d' attempted to queue work, " "when no workqueue created.\n", ctlr->id); dump_stack(); return -EINVAL; } return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); } static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, struct fcoe_fcf_device *old) { if (new->switch_name == old->switch_name && new->fabric_name == old->fabric_name && new->fc_map == old->fc_map && ether_addr_equal(new->mac, old->mac)) return 1; return 0; } /** * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs * @parent: The parent device to which the fcoe_ctlr instance * should be attached * @f: The LLD's FCoE sysfs function template pointer * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD * * This routine allocates a FIP ctlr object with some additional memory * for the LLD. The FIP ctlr is initialized, added to sysfs and then * attributes are added to it. */ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, struct fcoe_sysfs_function_template *f, int priv_size) { struct fcoe_ctlr_device *ctlr; int error = 0; ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size, GFP_KERNEL); if (!ctlr) goto out; ctlr->id = atomic_inc_return(&ctlr_num) - 1; ctlr->f = f; ctlr->mode = FIP_CONN_TYPE_FABRIC; INIT_LIST_HEAD(&ctlr->fcfs); mutex_init(&ctlr->lock); ctlr->dev.parent = parent; ctlr->dev.bus = &fcoe_bus_type; ctlr->dev.type = &fcoe_ctlr_device_type; ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), "ctlr_wq_%d", ctlr->id); ctlr->work_q = create_singlethread_workqueue( ctlr->work_q_name); if (!ctlr->work_q) goto out_del; snprintf(ctlr->devloss_work_q_name, sizeof(ctlr->devloss_work_q_name), "ctlr_dl_wq_%d", ctlr->id); ctlr->devloss_work_q = create_singlethread_workqueue( ctlr->devloss_work_q_name); if (!ctlr->devloss_work_q) goto out_del_q; dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); error = device_register(&ctlr->dev); if (error) goto out_del_q2; return ctlr; out_del_q2: destroy_workqueue(ctlr->devloss_work_q); ctlr->devloss_work_q = NULL; out_del_q: destroy_workqueue(ctlr->work_q); ctlr->work_q = NULL; out_del: kfree(ctlr); out: return NULL; } EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add); /** * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs * @ctlr: A pointer to the ctlr to be deleted * * Deletes a FIP ctlr and any fcfs attached * to it. Deleting fcfs will cause their childen * to be deleted as well. * * The ctlr is detached from sysfs and it's resources * are freed (work q), but the memory is not freed * until its last reference is released. * * This routine expects no locks to be held before * calling. * * TODO: Currently there are no callbacks to clean up LLD data * for a fcoe_fcf_device. LLDs must keep this in mind as they need * to clean up each of their LLD data for all fcoe_fcf_device before * calling fcoe_ctlr_device_delete. */ void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr) { struct fcoe_fcf_device *fcf, *next; /* Remove any attached fcfs */ mutex_lock(&ctlr->lock); list_for_each_entry_safe(fcf, next, &ctlr->fcfs, peers) { list_del(&fcf->peers); fcf->state = FCOE_FCF_STATE_DELETED; fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); } mutex_unlock(&ctlr->lock); fcoe_ctlr_device_flush_work(ctlr); destroy_workqueue(ctlr->devloss_work_q); ctlr->devloss_work_q = NULL; destroy_workqueue(ctlr->work_q); ctlr->work_q = NULL; device_unregister(&ctlr->dev); } EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete); /** * fcoe_fcf_device_final_delete() - Final delete routine * @work: The FIP fcf's embedded work struct * * It is expected that the fcf has been removed from * the FIP ctlr's list before calling this routine. */ static void fcoe_fcf_device_final_delete(struct work_struct *work) { struct fcoe_fcf_device *fcf = container_of(work, struct fcoe_fcf_device, delete_work); struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); /* * Cancel any outstanding timers. These should really exist * only when rmmod'ing the LLDD and we're asking for * immediate termination of the rports */ if (!cancel_delayed_work(&fcf->dev_loss_work)) fcoe_ctlr_device_flush_devloss(ctlr); device_unregister(&fcf->dev); } /** * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires * @work: The FIP fcf's embedded work struct * * Removes the fcf from the FIP ctlr's list of fcfs and * queues the final deletion. */ static void fip_timeout_deleted_fcf(struct work_struct *work) { struct fcoe_fcf_device *fcf = container_of(work, struct fcoe_fcf_device, dev_loss_work.work); struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); mutex_lock(&ctlr->lock); /* * If the fcf is deleted or reconnected before the timer * fires the devloss queue will be flushed, but the state will * either be CONNECTED or DELETED. If that is the case we * cancel deleting the fcf. */ if (fcf->state != FCOE_FCF_STATE_DISCONNECTED) goto out; dev_printk(KERN_ERR, &fcf->dev, "FIP fcf connection time out: removing fcf\n"); list_del(&fcf->peers); fcf->state = FCOE_FCF_STATE_DELETED; fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); out: mutex_unlock(&ctlr->lock); } /** * fcoe_fcf_device_delete() - Delete a FIP fcf * @fcf: Pointer to the fcf which is to be deleted * * Queues the FIP fcf on the devloss workqueue * * Expects the ctlr_attrs mutex to be held for fcf * state change. */ void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf) { struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); int timeout = fcf->dev_loss_tmo; if (fcf->state != FCOE_FCF_STATE_CONNECTED) return; fcf->state = FCOE_FCF_STATE_DISCONNECTED; /* * FCF will only be re-connected by the LLD calling * fcoe_fcf_device_add, and it should be setting up * priv then. */ fcf->priv = NULL; fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work, timeout * HZ); } EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete); /** * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent * @new_fcf: A temporary FCF used for lookups on the current list of fcfs * * Expects to be called with the ctlr->lock held */ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, struct fcoe_fcf_device *new_fcf) { struct fcoe_fcf_device *fcf; int error = 0; list_for_each_entry(fcf, &ctlr->fcfs, peers) { if (fcoe_fcf_device_match(new_fcf, fcf)) { if (fcf->state == FCOE_FCF_STATE_CONNECTED) return fcf; fcf->state = FCOE_FCF_STATE_CONNECTED; if (!cancel_delayed_work(&fcf->dev_loss_work)) fcoe_ctlr_device_flush_devloss(ctlr); return fcf; } } fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC); if (unlikely(!fcf)) goto out; INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete); INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf); fcf->dev.parent = &ctlr->dev; fcf->dev.bus = &fcoe_bus_type; fcf->dev.type = &fcoe_fcf_device_type; fcf->id = atomic_inc_return(&fcf_num) - 1; fcf->state = FCOE_FCF_STATE_UNKNOWN; fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; dev_set_name(&fcf->dev, "fcf_%d", fcf->id); fcf->fabric_name = new_fcf->fabric_name; fcf->switch_name = new_fcf->switch_name; fcf->fc_map = new_fcf->fc_map; fcf->vfid = new_fcf->vfid; memcpy(fcf->mac, new_fcf->mac, ETH_ALEN); fcf->priority = new_fcf->priority; fcf->fka_period = new_fcf->fka_period; fcf->selected = new_fcf->selected; error = device_register(&fcf->dev); if (error) goto out_del; fcf->state = FCOE_FCF_STATE_CONNECTED; list_add_tail(&fcf->peers, &ctlr->fcfs); return fcf; out_del: kfree(fcf); out: return NULL; } EXPORT_SYMBOL_GPL(fcoe_fcf_device_add); int __init fcoe_sysfs_setup(void) { int error; atomic_set(&ctlr_num, 0); atomic_set(&fcf_num, 0); error = bus_register(&fcoe_bus_type); if (error) return error; return 0; } void __exit fcoe_sysfs_teardown(void) { bus_unregister(&fcoe_bus_type); }
gpl-2.0
jejecule/kernel_despair_find7
arch/arm/mach-msm/board-msm7x30.c
1940
188369
/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/bootmem.h> #include <linux/io.h> #ifdef CONFIG_SPI_QSD #include <linux/spi/spi.h> #endif #include <linux/msm_ssbi.h> #include <linux/mfd/pmic8058.h> #include <linux/leds.h> #include <linux/mfd/marimba.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/smsc911x.h> #include <linux/ofn_atlab.h> #include <linux/power_supply.h> #include <linux/i2c/isa1200.h> #include <linux/i2c/tsc2007.h> #include <linux/input/kp_flip_switch.h> #include <linux/leds-pmic8058.h> #include <linux/input/cy8c_ts.h> #include <linux/msm_adc.h> #include <linux/dma-mapping.h> #include <linux/regulator/consumer.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/setup.h> #include <mach/mpp.h> #include <mach/board.h> #include <mach/camera.h> #include <mach/memory.h> #include <mach/msm_iomap.h> #include <mach/msm_hsusb.h> #include <mach/rpc_hsusb.h> #include <mach/msm_spi.h> #include <mach/qdsp5v2/msm_lpa.h> #include <mach/dma.h> #include <linux/input/msm_ts.h> #include <mach/pmic.h> #include <mach/rpc_pmapp.h> #include <mach/qdsp5v2/aux_pcm.h> #include <mach/qdsp5v2/mi2s.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/msm_battery.h> #include <mach/rpc_server_handset.h> #include <mach/msm_tsif.h> #include <mach/socinfo.h> #include <mach/msm_memtypes.h> #include <linux/cyttsp-qc.h> #include <asm/mach/mmc.h> #include <asm/mach/flash.h> #include <mach/vreg.h> #include <linux/platform_data/qcom_crypto_device.h> #include "devices.h" #include "timer.h" #ifdef CONFIG_USB_G_ANDROID #include <linux/usb/android.h> #include <mach/usbdiag.h> #endif #include "pm.h" #include "pm-boot.h" #include "spm.h" #include "acpuclock.h" #include "clock.h" #include <mach/dal_axi.h> #include <mach/msm_serial_hs.h> #include <mach/qdsp5v2/mi2s.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/sdio_al.h> #include "smd_private.h" #include <linux/bma150.h> #include "board-msm7x30-regulator.h" #include "pm.h" #define MSM_PMEM_SF_SIZE 0x1700000 #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER #define MSM_FB_PRIM_BUF_SIZE (864 * 480 * 4 * 3) /* 4bpp * 3 Pages */ #else #define MSM_FB_PRIM_BUF_SIZE (864 * 480 * 4 * 2) /* 4bpp * 2 Pages */ #endif /* * Reserve space for double buffered full screen * res V4L2 video overlay - i.e. 1280x720x1.5x2 */ #define MSM_V4L2_VIDEO_OVERLAY_BUF_SIZE 2764800 #ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL #define MSM_FB_EXT_BUF_SIZE (1280 * 720 * 2 * 1) /* 2 bpp x 1 page */ #else #define MSM_FB_EXT_BUF_SIZE 0 #endif #ifdef CONFIG_FB_MSM_OVERLAY0_WRITEBACK /* width x height x 3 bpp x 2 frame buffer */ #define MSM_FB_OVERLAY0_WRITEBACK_SIZE roundup((864 * 480 * 3 * 2), 4096) #else #define MSM_FB_OVERLAY0_WRITEBACK_SIZE 0 #endif #define MSM_FB_SIZE roundup(MSM_FB_PRIM_BUF_SIZE + MSM_FB_EXT_BUF_SIZE, 4096) #define MSM_PMEM_ADSP_SIZE 0x1E00000 #define MSM_FLUID_PMEM_ADSP_SIZE 0x2800000 #define PMEM_KERNEL_EBI0_SIZE 0x600000 #define MSM_PMEM_AUDIO_SIZE 0x200000 #ifdef CONFIG_ION_MSM static struct platform_device ion_dev; #define MSM_ION_AUDIO_SIZE (MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI0_SIZE) #define MSM_ION_SF_SIZE MSM_PMEM_SF_SIZE #define MSM_ION_HEAP_NUM 4 #endif #define PMIC_GPIO_INT 27 #define PMIC_VREG_WLAN_LEVEL 2900 #define PMIC_GPIO_SD_DET 36 #define PMIC_GPIO_SDC4_EN_N 17 /* PMIC GPIO Number 18 */ #define PMIC_GPIO_HDMI_5V_EN_V3 32 /* PMIC GPIO for V3 H/W */ #define PMIC_GPIO_HDMI_5V_EN_V2 39 /* PMIC GPIO for V2 H/W */ #define ADV7520_I2C_ADDR 0x39 #define FPGA_SDCC_STATUS 0x8E0001A8 #define FPGA_OPTNAV_GPIO_ADDR 0x8E000026 #define OPTNAV_I2C_SLAVE_ADDR (0xB0 >> 1) #define OPTNAV_IRQ 20 #define OPTNAV_CHIP_SELECT 19 #define PMIC_GPIO_SDC4_PWR_EN_N 24 /* PMIC GPIO Number 25 */ /* Macros assume PMIC GPIOs start at 0 */ #define PM8058_GPIO_PM_TO_SYS(pm_gpio) (pm_gpio + NR_GPIO_IRQS) #define PM8058_GPIO_SYS_TO_PM(sys_gpio) (sys_gpio - NR_GPIO_IRQS) #define PM8058_MPP_BASE PM8058_GPIO_PM_TO_SYS(PM8058_GPIOS) #define PM8058_MPP_PM_TO_SYS(pm_gpio) (pm_gpio + PM8058_MPP_BASE) #define PMIC_GPIO_FLASH_BOOST_ENABLE 15 /* PMIC GPIO Number 16 */ #define PMIC_GPIO_HAP_ENABLE 16 /* PMIC GPIO Number 17 */ #define PMIC_GPIO_WLAN_EXT_POR 22 /* PMIC GPIO NUMBER 23 */ #define BMA150_GPIO_INT 1 #define HAP_LVL_SHFT_MSM_GPIO 24 #define PMIC_GPIO_QUICKVX_CLK 37 /* PMIC GPIO 38 */ #define PM_FLIP_MPP 5 /* PMIC MPP 06 */ #define DDR1_BANK_BASE 0X20000000 #define DDR2_BANK_BASE 0X40000000 static unsigned int phys_add = DDR2_BANK_BASE; unsigned long ebi1_phys_offset = DDR2_BANK_BASE; EXPORT_SYMBOL(ebi1_phys_offset); struct pm8xxx_gpio_init_info { unsigned gpio; struct pm_gpio config; }; static int pm8058_gpios_init(void) { int rc; struct pm8xxx_gpio_init_info sdc4_en = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC4_EN_N), { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .vin_sel = PM8058_GPIO_VIN_L5, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, .out_strength = PM_GPIO_STRENGTH_LOW, .output_value = 0, }, }; struct pm8xxx_gpio_init_info sdc4_pwr_en = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC4_PWR_EN_N), { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .vin_sel = PM8058_GPIO_VIN_L5, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, .out_strength = PM_GPIO_STRENGTH_LOW, .output_value = 0, }, }; struct pm8xxx_gpio_init_info haptics_enable = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_HAP_ENABLE), { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .out_strength = PM_GPIO_STRENGTH_HIGH, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, .vin_sel = 2, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 0, }, }; struct pm8xxx_gpio_init_info hdmi_5V_en = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_HDMI_5V_EN_V3), { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .vin_sel = PM8058_GPIO_VIN_VPH, .function = PM_GPIO_FUNC_NORMAL, .out_strength = PM_GPIO_STRENGTH_LOW, .output_value = 0, }, }; struct pm8xxx_gpio_init_info flash_boost_enable = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_FLASH_BOOST_ENABLE), { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 0, .pull = PM_GPIO_PULL_NO, .vin_sel = PM8058_GPIO_VIN_S3, .out_strength = PM_GPIO_STRENGTH_HIGH, .function = PM_GPIO_FUNC_2, }, }; struct pm8xxx_gpio_init_info gpio23 = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_WLAN_EXT_POR), { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 0, .pull = PM_GPIO_PULL_NO, .vin_sel = 2, .out_strength = PM_GPIO_STRENGTH_LOW, .function = PM_GPIO_FUNC_NORMAL, } }; struct pm8xxx_gpio_init_info sdcc_det = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1), { .direction = PM_GPIO_DIR_IN, .pull = PM_GPIO_PULL_UP_1P5, .vin_sel = 2, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, }, }; if (machine_is_msm7x30_fluid()) sdcc_det.config.inv_int_pol = 1; rc = pm8xxx_gpio_config(sdcc_det.gpio, &sdcc_det.config); if (rc) { pr_err("%s PMIC_GPIO_SD_DET config failed\n", __func__); return rc; } if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa() || machine_is_msm7x30_fluid()) hdmi_5V_en.gpio = PMIC_GPIO_HDMI_5V_EN_V2; else hdmi_5V_en.gpio = PMIC_GPIO_HDMI_5V_EN_V3; hdmi_5V_en.gpio = PM8058_GPIO_PM_TO_SYS(hdmi_5V_en.gpio); rc = pm8xxx_gpio_config(hdmi_5V_en.gpio, &hdmi_5V_en.config); if (rc) { pr_err("%s PMIC_GPIO_HDMI_5V_EN config failed\n", __func__); return rc; } /* Deassert GPIO#23 (source for Ext_POR on WLAN-Volans) */ rc = pm8xxx_gpio_config(gpio23.gpio, &gpio23.config); if (rc) { pr_err("%s PMIC_GPIO_WLAN_EXT_POR config failed\n", __func__); return rc; } if (machine_is_msm7x30_fluid()) { /* Haptics gpio */ rc = pm8xxx_gpio_config(haptics_enable.gpio, &haptics_enable.config); if (rc) { pr_err("%s: PMIC GPIO %d write failed\n", __func__, haptics_enable.gpio); return rc; } /* Flash boost gpio */ rc = pm8xxx_gpio_config(flash_boost_enable.gpio, &flash_boost_enable.config); if (rc) { pr_err("%s: PMIC GPIO %d write failed\n", __func__, flash_boost_enable.gpio); return rc; } /* SCD4 gpio */ rc = pm8xxx_gpio_config(sdc4_en.gpio, &sdc4_en.config); if (rc) { pr_err("%s PMIC_GPIO_SDC4_EN_N config failed\n", __func__); return rc; } rc = gpio_request(sdc4_en.gpio, "sdc4_en"); if (rc) { pr_err("%s PMIC_GPIO_SDC4_EN_N gpio_request failed\n", __func__); return rc; } gpio_set_value_cansleep(sdc4_en.gpio, 0); } /* FFA -> gpio_25 controls vdd of sdcc4 */ else { /* SCD4 gpio_25 */ rc = pm8xxx_gpio_config(sdc4_pwr_en.gpio, &sdc4_pwr_en.config); if (rc) { pr_err("%s PMIC_GPIO_SDC4_PWR_EN_N config failed: %d\n", __func__, rc); return rc; } rc = gpio_request(sdc4_pwr_en.gpio, "sdc4_pwr_en"); if (rc) { pr_err("PMIC_GPIO_SDC4_PWR_EN_N gpio_req failed: %d\n", rc); return rc; } } return 0; } /* Regulator API support */ #ifdef CONFIG_MSM_PROC_COMM_REGULATOR static struct platform_device msm_proccomm_regulator_dev = { .name = PROCCOMM_REGULATOR_DEV_NAME, .id = -1, .dev = { .platform_data = &msm7x30_proccomm_regulator_data } }; #endif /*virtual key support */ static ssize_t tma300_vkeys_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":50:842:80:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":170:842:80:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":290:842:80:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":410:842:80:100" "\n"); } static struct kobj_attribute tma300_vkeys_attr = { .attr = { .mode = S_IRUGO, }, .show = &tma300_vkeys_show, }; static struct attribute *tma300_properties_attrs[] = { &tma300_vkeys_attr.attr, NULL }; static struct attribute_group tma300_properties_attr_group = { .attrs = tma300_properties_attrs, }; static struct kobject *properties_kobj; static struct regulator_bulk_data cyttsp_regs[] = { { .supply = "ldo8", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "ldo15", .min_uV = 3050000, .max_uV = 3100000 }, }; #define CYTTSP_TS_GPIO_IRQ 150 static int cyttsp_platform_init(struct i2c_client *client) { int rc = -EINVAL; rc = regulator_bulk_get(NULL, ARRAY_SIZE(cyttsp_regs), cyttsp_regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(cyttsp_regs), cyttsp_regs); if (rc) { pr_err("%s: could not set regulator voltages: %d\n", __func__, rc); goto regs_free; } rc = regulator_bulk_enable(ARRAY_SIZE(cyttsp_regs), cyttsp_regs); if (rc) { pr_err("%s: could not enable regulators: %d\n", __func__, rc); goto regs_free; } /* check this device active by reading first byte/register */ rc = i2c_smbus_read_byte_data(client, 0x01); if (rc < 0) { pr_err("%s: i2c sanity check failed\n", __func__); goto regs_disable; } rc = gpio_tlmm_config(GPIO_CFG(CYTTSP_TS_GPIO_IRQ, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_6MA), GPIO_CFG_ENABLE); if (rc) { pr_err("%s: Could not configure gpio %d\n", __func__, CYTTSP_TS_GPIO_IRQ); goto regs_disable; } /* virtual keys */ tma300_vkeys_attr.attr.name = "virtualkeys.cyttsp-i2c"; properties_kobj = kobject_create_and_add("board_properties", NULL); if (properties_kobj) rc = sysfs_create_group(properties_kobj, &tma300_properties_attr_group); if (!properties_kobj || rc) pr_err("%s: failed to create board_properties\n", __func__); return CY_OK; regs_disable: regulator_bulk_disable(ARRAY_SIZE(cyttsp_regs), cyttsp_regs); regs_free: regulator_bulk_free(ARRAY_SIZE(cyttsp_regs), cyttsp_regs); out: return rc; } /* TODO: Put the regulator to LPM / HPM in suspend/resume*/ static int cyttsp_platform_suspend(struct i2c_client *client) { msleep(20); return CY_OK; } static int cyttsp_platform_resume(struct i2c_client *client) { /* add any special code to strobe a wakeup pin or chip reset */ mdelay(10); return CY_OK; } static struct cyttsp_platform_data cyttsp_data = { .fw_fname = "cyttsp_7630_fluid.hex", .panel_maxx = 479, .panel_maxy = 799, .disp_maxx = 469, .disp_maxy = 799, .disp_minx = 10, .disp_miny = 0, .flags = 0, .gen = CY_GEN3, /* or */ .use_st = CY_USE_ST, .use_mt = CY_USE_MT, .use_hndshk = CY_SEND_HNDSHK, .use_trk_id = CY_USE_TRACKING_ID, .use_sleep = CY_USE_DEEP_SLEEP_SEL | CY_USE_LOW_POWER_SEL, .use_gestures = CY_USE_GESTURES, /* activate up to 4 groups * and set active distance */ .gest_set = CY_GEST_GRP1 | CY_GEST_GRP2 | CY_GEST_GRP3 | CY_GEST_GRP4 | CY_ACT_DIST, /* change act_intrvl to customize the Active power state * scanning/processing refresh interval for Operating mode */ .act_intrvl = CY_ACT_INTRVL_DFLT, /* change tch_tmout to customize the touch timeout for the * Active power state for Operating mode */ .tch_tmout = CY_TCH_TMOUT_DFLT, /* change lp_intrvl to customize the Low Power power state * scanning/processing refresh interval for Operating mode */ .lp_intrvl = CY_LP_INTRVL_DFLT, .resume = cyttsp_platform_resume, .suspend = cyttsp_platform_suspend, .init = cyttsp_platform_init, .sleep_gpio = -1, .resout_gpio = -1, .irq_gpio = CYTTSP_TS_GPIO_IRQ, .correct_fw_ver = 2, }; static int pm8058_pwm_config(struct pwm_device *pwm, int ch, int on) { struct pm_gpio pwm_gpio_config = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 0, .pull = PM_GPIO_PULL_NO, .vin_sel = PM8058_GPIO_VIN_S3, .out_strength = PM_GPIO_STRENGTH_HIGH, .function = PM_GPIO_FUNC_2, }; int rc = -EINVAL; int id, mode, max_mA; id = mode = max_mA = 0; switch (ch) { case 0: case 1: case 2: if (on) { id = 24 + ch; rc = pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(id - 1), &pwm_gpio_config); if (rc) pr_err("%s: pm8xxx_gpio_config(%d): rc=%d\n", __func__, id, rc); } break; case 3: id = PM_PWM_LED_KPD; mode = PM_PWM_CONF_DTEST3; max_mA = 200; break; case 4: id = PM_PWM_LED_0; mode = PM_PWM_CONF_PWM1; max_mA = 40; break; case 5: id = PM_PWM_LED_2; mode = PM_PWM_CONF_PWM2; max_mA = 40; break; case 6: id = PM_PWM_LED_FLASH; mode = PM_PWM_CONF_DTEST3; max_mA = 200; break; default: break; } if (ch >= 3 && ch <= 6) { if (!on) { mode = PM_PWM_CONF_NONE; max_mA = 0; } rc = pm8058_pwm_config_led(pwm, id, mode, max_mA); if (rc) pr_err("%s: pm8058_pwm_config_led(ch=%d): rc=%d\n", __func__, ch, rc); } return rc; } static int pm8058_pwm_enable(struct pwm_device *pwm, int ch, int on) { int rc; switch (ch) { case 7: rc = pm8058_pwm_set_dtest(pwm, on); if (rc) pr_err("%s: pwm_set_dtest(%d): rc=%d\n", __func__, on, rc); break; default: rc = -EINVAL; break; } return rc; } static const unsigned int fluid_keymap[] = { KEY(0, 0, KEY_7), KEY(0, 1, KEY_ENTER), KEY(0, 2, KEY_UP), /* drop (0,3) as it always shows up in pair with(0,2) */ KEY(0, 4, KEY_DOWN), KEY(1, 0, KEY_CAMERA_SNAPSHOT), KEY(1, 1, KEY_SELECT), KEY(1, 2, KEY_1), KEY(1, 3, KEY_VOLUMEUP), KEY(1, 4, KEY_VOLUMEDOWN), }; static const unsigned int surf_keymap[] = { KEY(0, 0, KEY_7), KEY(0, 1, KEY_DOWN), KEY(0, 2, KEY_UP), KEY(0, 3, KEY_RIGHT), KEY(0, 4, KEY_ENTER), KEY(0, 5, KEY_L), KEY(0, 6, KEY_BACK), KEY(0, 7, KEY_M), KEY(1, 0, KEY_LEFT), KEY(1, 1, KEY_SEND), KEY(1, 2, KEY_1), KEY(1, 3, KEY_4), KEY(1, 4, KEY_CLEAR), KEY(1, 5, KEY_MSDOS), KEY(1, 6, KEY_SPACE), KEY(1, 7, KEY_COMMA), KEY(2, 0, KEY_6), KEY(2, 1, KEY_5), KEY(2, 2, KEY_8), KEY(2, 3, KEY_3), KEY(2, 4, KEY_NUMERIC_STAR), KEY(2, 5, KEY_UP), KEY(2, 6, KEY_DOWN), /* SYN */ KEY(2, 7, KEY_LEFTSHIFT), KEY(3, 0, KEY_9), KEY(3, 1, KEY_NUMERIC_POUND), KEY(3, 2, KEY_0), KEY(3, 3, KEY_2), KEY(3, 4, KEY_SLEEP), KEY(3, 5, KEY_F1), KEY(3, 6, KEY_F2), KEY(3, 7, KEY_F3), KEY(4, 0, KEY_BACK), KEY(4, 1, KEY_HOME), KEY(4, 2, KEY_MENU), KEY(4, 3, KEY_VOLUMEUP), KEY(4, 4, KEY_VOLUMEDOWN), KEY(4, 5, KEY_F4), KEY(4, 6, KEY_F5), KEY(4, 7, KEY_F6), KEY(5, 0, KEY_R), KEY(5, 1, KEY_T), KEY(5, 2, KEY_Y), KEY(5, 3, KEY_LEFTALT), KEY(5, 4, KEY_KPENTER), KEY(5, 5, KEY_Q), KEY(5, 6, KEY_W), KEY(5, 7, KEY_E), KEY(6, 0, KEY_F), KEY(6, 1, KEY_G), KEY(6, 2, KEY_H), KEY(6, 3, KEY_CAPSLOCK), KEY(6, 4, KEY_PAGEUP), KEY(6, 5, KEY_A), KEY(6, 6, KEY_S), KEY(6, 7, KEY_D), KEY(7, 0, KEY_V), KEY(7, 1, KEY_B), KEY(7, 2, KEY_N), KEY(7, 3, KEY_MENU), /* REVISIT - SYM */ KEY(7, 4, KEY_PAGEDOWN), KEY(7, 5, KEY_Z), KEY(7, 6, KEY_X), KEY(7, 7, KEY_C), KEY(8, 0, KEY_P), KEY(8, 1, KEY_J), KEY(8, 2, KEY_K), KEY(8, 3, KEY_INSERT), KEY(8, 4, KEY_LINEFEED), KEY(8, 5, KEY_U), KEY(8, 6, KEY_I), KEY(8, 7, KEY_O), KEY(9, 0, KEY_4), KEY(9, 1, KEY_5), KEY(9, 2, KEY_6), KEY(9, 3, KEY_7), KEY(9, 4, KEY_8), KEY(9, 5, KEY_1), KEY(9, 6, KEY_2), KEY(9, 7, KEY_3), KEY(10, 0, KEY_F7), KEY(10, 1, KEY_F8), KEY(10, 2, KEY_F9), KEY(10, 3, KEY_F10), KEY(10, 4, KEY_FN), KEY(10, 5, KEY_9), KEY(10, 6, KEY_0), KEY(10, 7, KEY_DOT), KEY(11, 0, KEY_LEFTCTRL), KEY(11, 1, KEY_F11), /* START */ KEY(11, 2, KEY_ENTER), KEY(11, 3, KEY_SEARCH), KEY(11, 4, KEY_DELETE), KEY(11, 5, KEY_RIGHT), KEY(11, 6, KEY_LEFT), KEY(11, 7, KEY_RIGHTSHIFT), }; static struct matrix_keymap_data surf_keymap_data = { .keymap_size = ARRAY_SIZE(surf_keymap), .keymap = surf_keymap, }; static struct pm8xxx_keypad_platform_data surf_keypad_data = { .input_name = "surf_keypad", .input_phys_device = "surf_keypad/input0", .num_rows = 12, .num_cols = 8, .rows_gpio_start = PM8058_GPIO_PM_TO_SYS(8), .cols_gpio_start = PM8058_GPIO_PM_TO_SYS(0), .debounce_ms = 15, .scan_delay_ms = 32, .row_hold_ns = 91500, .wakeup = 1, .keymap_data = &surf_keymap_data, }; static struct matrix_keymap_data fluid_keymap_data = { .keymap_size = ARRAY_SIZE(fluid_keymap), .keymap = fluid_keymap, }; static struct pm8xxx_keypad_platform_data fluid_keypad_data = { .input_name = "fluid-keypad", .input_phys_device = "fluid-keypad/input0", .num_rows = 5, .num_cols = 5, .rows_gpio_start = PM8058_GPIO_PM_TO_SYS(8), .cols_gpio_start = PM8058_GPIO_PM_TO_SYS(0), .debounce_ms = 15, .scan_delay_ms = 32, .row_hold_ns = 91500, .wakeup = 1, .keymap_data = &fluid_keymap_data, }; static struct pm8058_pwm_pdata pm8058_pwm_data = { .config = pm8058_pwm_config, .enable = pm8058_pwm_enable, }; static struct pmic8058_led pmic8058_ffa_leds[] = { [0] = { .name = "keyboard-backlight", .max_brightness = 15, .id = PMIC8058_ID_LED_KB_LIGHT, }, }; static struct pmic8058_leds_platform_data pm8058_ffa_leds_data = { .num_leds = ARRAY_SIZE(pmic8058_ffa_leds), .leds = pmic8058_ffa_leds, }; static struct pmic8058_led pmic8058_surf_leds[] = { [0] = { .name = "keyboard-backlight", .max_brightness = 15, .id = PMIC8058_ID_LED_KB_LIGHT, }, [1] = { .name = "voice:red", .max_brightness = 20, .id = PMIC8058_ID_LED_0, }, [2] = { .name = "wlan:green", .max_brightness = 20, .id = PMIC8058_ID_LED_2, }, }; static struct pmic8058_leds_platform_data pm8058_surf_leds_data = { .num_leds = ARRAY_SIZE(pmic8058_surf_leds), .leds = pmic8058_surf_leds, }; static struct pmic8058_led pmic8058_fluid_leds[] = { [0] = { .name = "keyboard-backlight", .max_brightness = 15, .id = PMIC8058_ID_LED_KB_LIGHT, }, [1] = { .name = "flash:led_0", .max_brightness = 15, .id = PMIC8058_ID_FLASH_LED_0, }, [2] = { .name = "flash:led_1", .max_brightness = 15, .id = PMIC8058_ID_FLASH_LED_1, }, }; static struct pmic8058_leds_platform_data pm8058_fluid_leds_data = { .num_leds = ARRAY_SIZE(pmic8058_fluid_leds), .leds = pmic8058_fluid_leds, }; static struct pm8xxx_irq_platform_data pm8xxx_irq_pdata = { .irq_base = PMIC8058_IRQ_BASE, .devirq = MSM_GPIO_TO_INT(PMIC_GPIO_INT), .irq_trigger_flag = IRQF_TRIGGER_LOW, }; static struct pm8xxx_gpio_platform_data pm8xxx_gpio_pdata = { .gpio_base = PM8058_GPIO_PM_TO_SYS(0), }; static struct pm8xxx_mpp_platform_data pm8xxx_mpp_pdata = { .mpp_base = PM8058_MPP_PM_TO_SYS(0), }; static struct pm8058_platform_data pm8058_7x30_data = { .irq_pdata = &pm8xxx_irq_pdata, .gpio_pdata = &pm8xxx_gpio_pdata, .mpp_pdata = &pm8xxx_mpp_pdata, .pwm_pdata = &pm8058_pwm_data, }; #ifdef CONFIG_MSM_SSBI static struct msm_ssbi_platform_data msm7x30_ssbi_pm8058_pdata = { .rsl_id = "D:PMIC_SSBI", .controller_type = MSM_SBI_CTRL_SSBI2, .slave = { .name = "pm8058-core", .platform_data = &pm8058_7x30_data, }, }; #endif static struct i2c_board_info cy8info[] __initdata = { { I2C_BOARD_INFO(CY_I2C_NAME, 0x24), .platform_data = &cyttsp_data, #ifndef CY_USE_TIMER .irq = MSM_GPIO_TO_INT(CYTTSP_TS_GPIO_IRQ), #endif /* CY_USE_TIMER */ }, }; #ifdef CONFIG_MSM_CAMERA_V4L2 static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = { { .csiphy_core = 0, .csid_core = 0, .is_vpe = 1, .ioclk = { .vfe_clk_rate = 153600000, }, }, { .csiphy_core = 0, .csid_core = 0, .is_vpe = 1, .ioclk = { .vfe_clk_rate = 153600000, }, }, }; static struct camera_vreg_t msm_7x30_back_cam_vreg[] = { {"gp2", REG_LDO, 2600000, 2600000, -1}, {"lvsw1", REG_VS, 0, 0, 0}, }; static uint32_t camera_off_gpio_table[] = { /* parallel CAMERA interfaces */ /* RST */ GPIO_CFG(0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT2 */ GPIO_CFG(2, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT3 */ GPIO_CFG(3, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT4 */ GPIO_CFG(4, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT5 */ GPIO_CFG(5, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT6 */ GPIO_CFG(6, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT7 */ GPIO_CFG(7, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT8 */ GPIO_CFG(8, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT9 */ GPIO_CFG(9, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT10 */ GPIO_CFG(10, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT11 */ GPIO_CFG(11, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCLK */ GPIO_CFG(12, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HSYNC_IN */ GPIO_CFG(13, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VSYNC_IN */ GPIO_CFG(14, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* MCLK */ GPIO_CFG(15, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static uint32_t camera_on_gpio_table[] = { /* parallel CAMERA interfaces */ /* RST */ GPIO_CFG(0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT2 */ GPIO_CFG(2, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT3 */ GPIO_CFG(3, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT4 */ GPIO_CFG(4, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT5 */ GPIO_CFG(5, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT6 */ GPIO_CFG(6, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT7 */ GPIO_CFG(7, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT8 */ GPIO_CFG(8, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT9 */ GPIO_CFG(9, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT10 */ GPIO_CFG(10, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT11 */ GPIO_CFG(11, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCLK */ GPIO_CFG(12, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HSYNC_IN */ GPIO_CFG(13, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VSYNC_IN */ GPIO_CFG(14, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* MCLK */ GPIO_CFG(15, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static struct gpio msm7x30_back_cam_gpio[] = { {0, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct msm_gpio_set_tbl msm7x30_back_cam_gpio_set_tbl[] = { {0, GPIOF_OUT_INIT_LOW, 1000}, {0, GPIOF_OUT_INIT_HIGH, 4000}, }; static struct msm_camera_gpio_conf msm_7x30_back_cam_gpio_conf = { .cam_gpio_req_tbl = msm7x30_back_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(msm7x30_back_cam_gpio), .cam_gpio_set_tbl = msm7x30_back_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(msm7x30_back_cam_gpio_set_tbl), .camera_off_table = camera_off_gpio_table, .camera_off_table_size = ARRAY_SIZE(camera_off_gpio_table), .camera_on_table = camera_on_gpio_table, .camera_on_table_size = ARRAY_SIZE(camera_on_gpio_table), .gpio_no_mux = 1, }; static struct msm_camera_sensor_flash_data flash_vx6953 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_sensor_platform_info sensor_board_info_vx6953 = { .mount_angle = 0, .cam_vreg = msm_7x30_back_cam_vreg, .num_vreg = ARRAY_SIZE(msm_7x30_back_cam_vreg), .gpio_conf = &msm_7x30_back_cam_gpio_conf, }; static struct msm_camera_sensor_info msm_camera_sensor_vx6953_data = { .sensor_name = "vx6953", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_vx6953, .sensor_platform_info = &sensor_board_info_vx6953, .csi_if = 1, .camera_type = BACK_CAMERA_2D, }; static struct platform_device msm_camera_server = { .name = "msm_cam_server", .id = 0, }; void __init msm7x30_init_cam(void) { platform_device_register(&msm_camera_server); platform_device_register(&msm_device_csic0); platform_device_register(&msm_device_vfe); platform_device_register(&msm_device_vpe); } #ifdef CONFIG_I2C static struct i2c_board_info msm_camera_boardinfo[] = { { I2C_BOARD_INFO("vx6953", 0x20), .platform_data = &msm_camera_sensor_vx6953_data, }, }; #endif #else static struct i2c_board_info msm_camera_boardinfo[] __initdata = { #ifdef CONFIG_MT9D112 { I2C_BOARD_INFO("mt9d112", 0x78 >> 1), }, #endif #ifdef CONFIG_WEBCAM_OV9726 { I2C_BOARD_INFO("ov9726", 0x10), }, #endif #ifdef CONFIG_S5K3E2FX { I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1), }, #endif #ifdef CONFIG_MT9P012 { I2C_BOARD_INFO("mt9p012", 0x6C >> 1), }, #endif #ifdef CONFIG_VX6953 { I2C_BOARD_INFO("vx6953", 0x20), }, #endif #ifdef CONFIG_MT9E013 { I2C_BOARD_INFO("mt9e013", 0x6C >> 2), }, #endif #ifdef CONFIG_SN12M0PZ { I2C_BOARD_INFO("sn12m0pz", 0x34 >> 1), }, #endif #if defined(CONFIG_MT9T013) || defined(CONFIG_SENSORS_MT9T013) { I2C_BOARD_INFO("mt9t013", 0x6C), }, #endif }; #ifdef CONFIG_MSM_CAMERA #define CAM_STNDBY 143 static uint32_t camera_off_vcm_gpio_table[] = { GPIO_CFG(1, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VCM */ }; static uint32_t camera_off_gpio_table[] = { /* parallel CAMERA interfaces */ /* RST */ GPIO_CFG(0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT2 */ GPIO_CFG(2, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT3 */ GPIO_CFG(3, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT4 */ GPIO_CFG(4, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT5 */ GPIO_CFG(5, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT6 */ GPIO_CFG(6, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT7 */ GPIO_CFG(7, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT8 */ GPIO_CFG(8, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT9 */ GPIO_CFG(9, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT10 */ GPIO_CFG(10, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT11 */ GPIO_CFG(11, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCLK */ GPIO_CFG(12, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HSYNC_IN */ GPIO_CFG(13, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VSYNC_IN */ GPIO_CFG(14, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* MCLK */ GPIO_CFG(15, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static uint32_t camera_on_vcm_gpio_table[] = { GPIO_CFG(1, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), /* VCM */ }; static uint32_t camera_on_gpio_table[] = { /* parallel CAMERA interfaces */ /* RST */ GPIO_CFG(0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT2 */ GPIO_CFG(2, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT3 */ GPIO_CFG(3, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT4 */ GPIO_CFG(4, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT5 */ GPIO_CFG(5, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT6 */ GPIO_CFG(6, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT7 */ GPIO_CFG(7, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT8 */ GPIO_CFG(8, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT9 */ GPIO_CFG(9, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT10 */ GPIO_CFG(10, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT11 */ GPIO_CFG(11, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCLK */ GPIO_CFG(12, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HSYNC_IN */ GPIO_CFG(13, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VSYNC_IN */ GPIO_CFG(14, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* MCLK */ GPIO_CFG(15, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static uint32_t camera_off_gpio_fluid_table[] = { /* FLUID: CAM_VGA_RST_N */ GPIO_CFG(31, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* FLUID: CAMIF_STANDBY */ GPIO_CFG(CAM_STNDBY, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA) }; static uint32_t camera_on_gpio_fluid_table[] = { /* FLUID: CAM_VGA_RST_N */ GPIO_CFG(31, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* FLUID: CAMIF_STANDBY */ GPIO_CFG(CAM_STNDBY, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA) }; static void config_gpio_table(uint32_t *table, int len) { int n, rc; for (n = 0; n < len; n++) { rc = gpio_tlmm_config(table[n], GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, table[n], rc); break; } } } static int config_camera_on_gpios(void) { config_gpio_table(camera_on_gpio_table, ARRAY_SIZE(camera_on_gpio_table)); if (adie_get_detected_codec_type() != TIMPANI_ID) /* GPIO1 is shared also used in Timpani RF card so only configure it for non-Timpani RF card */ config_gpio_table(camera_on_vcm_gpio_table, ARRAY_SIZE(camera_on_vcm_gpio_table)); if (machine_is_msm7x30_fluid()) { config_gpio_table(camera_on_gpio_fluid_table, ARRAY_SIZE(camera_on_gpio_fluid_table)); /* FLUID: turn on 5V booster */ gpio_set_value( PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_FLASH_BOOST_ENABLE), 1); /* FLUID: drive high to put secondary sensor to STANDBY */ gpio_set_value(CAM_STNDBY, 1); } return 0; } static void config_camera_off_gpios(void) { config_gpio_table(camera_off_gpio_table, ARRAY_SIZE(camera_off_gpio_table)); if (adie_get_detected_codec_type() != TIMPANI_ID) /* GPIO1 is shared also used in Timpani RF card so only configure it for non-Timpani RF card */ config_gpio_table(camera_off_vcm_gpio_table, ARRAY_SIZE(camera_off_vcm_gpio_table)); if (machine_is_msm7x30_fluid()) { config_gpio_table(camera_off_gpio_fluid_table, ARRAY_SIZE(camera_off_gpio_fluid_table)); /* FLUID: turn off 5V booster */ gpio_set_value( PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_FLASH_BOOST_ENABLE), 0); } } struct resource msm_camera_resources[] = { { .start = 0xA6000000, .end = 0xA6000000 + SZ_1M - 1, .flags = IORESOURCE_MEM, }, { .start = INT_VFE, .end = INT_VFE, .flags = IORESOURCE_IRQ, }, { .flags = IORESOURCE_DMA, } }; struct msm_camera_device_platform_data msm_camera_device_data = { .camera_gpio_on = config_camera_on_gpios, .camera_gpio_off = config_camera_off_gpios, .ioext.camifpadphy = 0xAB000000, .ioext.camifpadsz = 0x00000400, .ioext.csiphy = 0xA6100000, .ioext.csisz = 0x00000400, .ioext.csiirq = INT_CSI, .ioclk.mclk_clk_rate = 24000000, .ioclk.vfe_clk_rate = 147456000, }; static struct msm_camera_sensor_flash_src msm_flash_src_pwm = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_PWM, ._fsrc.pwm_src.freq = 1000, ._fsrc.pwm_src.max_load = 300, ._fsrc.pwm_src.low_load = 30, ._fsrc.pwm_src.high_load = 100, ._fsrc.pwm_src.channel = 7, }; #ifdef CONFIG_MT9D112 static struct msm_camera_sensor_flash_data flash_mt9d112 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm }; static struct msm_camera_sensor_info msm_camera_sensor_mt9d112_data = { .sensor_name = "mt9d112", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 0, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .flash_data = &flash_mt9d112, .csi_if = 0 }; static struct platform_device msm_camera_sensor_mt9d112 = { .name = "msm_camera_mt9d112", .dev = { .platform_data = &msm_camera_sensor_mt9d112_data, }, }; #endif #ifdef CONFIG_WEBCAM_OV9726 static struct msm_camera_sensor_platform_info ov9726_sensor_7630_info = { .mount_angle = 90 }; static struct msm_camera_sensor_flash_data flash_ov9726 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm }; static struct msm_camera_sensor_info msm_camera_sensor_ov9726_data = { .sensor_name = "ov9726", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 0, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .flash_data = &flash_ov9726, .sensor_platform_info = &ov9726_sensor_7630_info, .csi_if = 1 }; struct platform_device msm_camera_sensor_ov9726 = { .name = "msm_camera_ov9726", .dev = { .platform_data = &msm_camera_sensor_ov9726_data, }, }; #endif #ifdef CONFIG_S5K3E2FX static struct msm_camera_sensor_flash_data flash_s5k3e2fx = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm, }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3e2fx_data = { .sensor_name = "s5k3e2fx", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 0, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .flash_data = &flash_s5k3e2fx, .csi_if = 0 }; static struct platform_device msm_camera_sensor_s5k3e2fx = { .name = "msm_camera_s5k3e2fx", .dev = { .platform_data = &msm_camera_sensor_s5k3e2fx_data, }, }; #endif #ifdef CONFIG_MT9P012 static struct msm_camera_sensor_flash_data flash_mt9p012 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm }; static struct msm_camera_sensor_info msm_camera_sensor_mt9p012_data = { .sensor_name = "mt9p012", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 1, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .flash_data = &flash_mt9p012, .csi_if = 0 }; static struct platform_device msm_camera_sensor_mt9p012 = { .name = "msm_camera_mt9p012", .dev = { .platform_data = &msm_camera_sensor_mt9p012_data, }, }; #endif #ifdef CONFIG_MT9E013 static struct msm_camera_sensor_platform_info mt9e013_sensor_7630_info = { .mount_angle = 0 }; static struct msm_camera_sensor_flash_data flash_mt9e013 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm }; static struct msm_camera_sensor_info msm_camera_sensor_mt9e013_data = { .sensor_name = "mt9e013", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 1, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .flash_data = &flash_mt9e013, .sensor_platform_info = &mt9e013_sensor_7630_info, .csi_if = 1 }; static struct platform_device msm_camera_sensor_mt9e013 = { .name = "msm_camera_mt9e013", .dev = { .platform_data = &msm_camera_sensor_mt9e013_data, }, }; #endif #ifdef CONFIG_VX6953 static struct msm_camera_sensor_platform_info vx6953_sensor_7630_info = { .mount_angle = 0 }; static struct msm_camera_sensor_flash_data flash_vx6953 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm }; static struct msm_camera_sensor_info msm_camera_sensor_vx6953_data = { .sensor_name = "vx6953", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 0, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .sensor_platform_info = &vx6953_sensor_7630_info, .flash_data = &flash_vx6953, .csi_if = 1 }; static struct platform_device msm_camera_sensor_vx6953 = { .name = "msm_camera_vx6953", .dev = { .platform_data = &msm_camera_sensor_vx6953_data, }, }; #endif #ifdef CONFIG_SN12M0PZ static struct msm_camera_sensor_flash_src msm_flash_src_current_driver = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_CURRENT_DRIVER, ._fsrc.current_driver_src.low_current = 210, ._fsrc.current_driver_src.high_current = 700, ._fsrc.current_driver_src.driver_channel = &pm8058_fluid_leds_data, }; static struct msm_camera_sensor_flash_data flash_sn12m0pz = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_current_driver }; static struct msm_camera_sensor_info msm_camera_sensor_sn12m0pz_data = { .sensor_name = "sn12m0pz", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 1, .pdata = &msm_camera_device_data, .flash_data = &flash_sn12m0pz, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .csi_if = 0 }; static struct platform_device msm_camera_sensor_sn12m0pz = { .name = "msm_camera_sn12m0pz", .dev = { .platform_data = &msm_camera_sensor_sn12m0pz_data, }, }; #endif #ifdef CONFIG_MT9T013 static struct msm_camera_sensor_flash_data flash_mt9t013 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src_pwm }; static struct msm_camera_sensor_info msm_camera_sensor_mt9t013_data = { .sensor_name = "mt9t013", .sensor_reset = 0, .sensor_pwd = 85, .vcm_pwd = 1, .vcm_enable = 0, .pdata = &msm_camera_device_data, .resource = msm_camera_resources, .num_resources = ARRAY_SIZE(msm_camera_resources), .flash_data = &flash_mt9t013, .csi_if = 1 }; static struct platform_device msm_camera_sensor_mt9t013 = { .name = "msm_camera_mt9t013", .dev = { .platform_data = &msm_camera_sensor_mt9t013_data, }, }; #endif #ifdef CONFIG_MSM_VPE static struct resource msm_vpe_resources[] = { { .start = 0xAD200000, .end = 0xAD200000 + SZ_1M - 1, .flags = IORESOURCE_MEM, }, { .start = INT_VPE, .end = INT_VPE, .flags = IORESOURCE_IRQ, }, }; static struct platform_device msm_vpe_device = { .name = "msm_vpe", .id = 0, .num_resources = ARRAY_SIZE(msm_vpe_resources), .resource = msm_vpe_resources, }; #endif #endif /*CONFIG_MSM_CAMERA*/ #endif #ifdef CONFIG_MSM_GEMINI static struct resource msm_gemini_resources[] = { { .start = 0xA3A00000, .end = 0xA3A00000 + 0x0150 - 1, .flags = IORESOURCE_MEM, }, { .start = INT_JPEG, .end = INT_JPEG, .flags = IORESOURCE_IRQ, }, }; static struct platform_device msm_gemini_device = { .name = "msm_gemini", .resource = msm_gemini_resources, .num_resources = ARRAY_SIZE(msm_gemini_resources), }; #endif #ifdef CONFIG_MSM7KV2_AUDIO static uint32_t audio_pamp_gpio_config = GPIO_CFG(82, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); static uint32_t audio_fluid_icodec_tx_config = GPIO_CFG(85, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); static int __init snddev_poweramp_gpio_init(void) { int rc; pr_info("snddev_poweramp_gpio_init \n"); rc = gpio_tlmm_config(audio_pamp_gpio_config, GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, audio_pamp_gpio_config, rc); } return rc; } void msm_snddev_tx_route_config(void) { int rc; pr_debug("%s()\n", __func__); if (machine_is_msm7x30_fluid()) { rc = gpio_tlmm_config(audio_fluid_icodec_tx_config, GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, audio_fluid_icodec_tx_config, rc); } else gpio_set_value(85, 0); } } void msm_snddev_tx_route_deconfig(void) { int rc; pr_debug("%s()\n", __func__); if (machine_is_msm7x30_fluid()) { rc = gpio_tlmm_config(audio_fluid_icodec_tx_config, GPIO_CFG_DISABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, audio_fluid_icodec_tx_config, rc); } } } void msm_snddev_poweramp_on(void) { gpio_set_value(82, 1); /* enable spkr poweramp */ pr_info("%s: power on amplifier\n", __func__); } void msm_snddev_poweramp_off(void) { gpio_set_value(82, 0); /* disable spkr poweramp */ pr_info("%s: power off amplifier\n", __func__); } static struct regulator_bulk_data snddev_regs[] = { { .supply = "gp4", .min_uV = 2600000, .max_uV = 2600000 }, { .supply = "ncp", .min_uV = 1800000, .max_uV = 1800000 }, }; static int __init snddev_hsed_voltage_init(void) { int rc; rc = regulator_bulk_get(NULL, ARRAY_SIZE(snddev_regs), snddev_regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(snddev_regs), snddev_regs); if (rc) { pr_err("%s: could not set regulator voltages: %d\n", __func__, rc); goto regs_free; } return 0; regs_free: regulator_bulk_free(ARRAY_SIZE(snddev_regs), snddev_regs); out: return rc; } void msm_snddev_hsed_voltage_on(void) { int rc = regulator_bulk_enable(ARRAY_SIZE(snddev_regs), snddev_regs); if (rc) pr_err("%s: could not enable regulators: %d\n", __func__, rc); } void msm_snddev_hsed_voltage_off(void) { int rc = regulator_bulk_disable(ARRAY_SIZE(snddev_regs), snddev_regs); if (rc) { pr_err("%s: could not disable regulators: %d\n", __func__, rc); } } static unsigned aux_pcm_gpio_on[] = { GPIO_CFG(138, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_DOUT */ GPIO_CFG(139, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_DIN */ GPIO_CFG(140, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_SYNC */ GPIO_CFG(141, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_CLK */ }; static int __init aux_pcm_gpio_init(void) { int pin, rc; pr_info("aux_pcm_gpio_init \n"); for (pin = 0; pin < ARRAY_SIZE(aux_pcm_gpio_on); pin++) { rc = gpio_tlmm_config(aux_pcm_gpio_on[pin], GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, aux_pcm_gpio_on[pin], rc); } } return rc; } static struct msm_gpio mi2s_clk_gpios[] = { { GPIO_CFG(145, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_SCLK"}, { GPIO_CFG(144, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_WS"}, { GPIO_CFG(120, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_MCLK_A"}, }; static struct msm_gpio mi2s_rx_data_lines_gpios[] = { { GPIO_CFG(121, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_DATA_SD0_A"}, { GPIO_CFG(122, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_DATA_SD1_A"}, { GPIO_CFG(123, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_DATA_SD2_A"}, { GPIO_CFG(146, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_DATA_SD3"}, }; static struct msm_gpio mi2s_tx_data_lines_gpios[] = { { GPIO_CFG(146, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MI2S_DATA_SD3"}, }; int mi2s_config_clk_gpio(void) { int rc = 0; rc = msm_gpios_request_enable(mi2s_clk_gpios, ARRAY_SIZE(mi2s_clk_gpios)); if (rc) { pr_err("%s: enable mi2s clk gpios failed\n", __func__); return rc; } return 0; } int mi2s_unconfig_data_gpio(u32 direction, u8 sd_line_mask) { int i, rc = 0; sd_line_mask &= MI2S_SD_LINE_MASK; switch (direction) { case DIR_TX: msm_gpios_disable_free(mi2s_tx_data_lines_gpios, 1); break; case DIR_RX: i = 0; while (sd_line_mask) { if (sd_line_mask & 0x1) msm_gpios_disable_free( mi2s_rx_data_lines_gpios + i , 1); sd_line_mask = sd_line_mask >> 1; i++; } break; default: pr_err("%s: Invaild direction direction = %u\n", __func__, direction); rc = -EINVAL; break; } return rc; } int mi2s_config_data_gpio(u32 direction, u8 sd_line_mask) { int i , rc = 0; u8 sd_config_done_mask = 0; sd_line_mask &= MI2S_SD_LINE_MASK; switch (direction) { case DIR_TX: if ((sd_line_mask & MI2S_SD_0) || (sd_line_mask & MI2S_SD_1) || (sd_line_mask & MI2S_SD_2) || !(sd_line_mask & MI2S_SD_3)) { pr_err("%s: can not use SD0 or SD1 or SD2 for TX" ".only can use SD3. sd_line_mask = 0x%x\n", __func__ , sd_line_mask); rc = -EINVAL; } else { rc = msm_gpios_request_enable(mi2s_tx_data_lines_gpios, 1); if (rc) pr_err("%s: enable mi2s gpios for TX failed\n", __func__); } break; case DIR_RX: i = 0; while (sd_line_mask && (rc == 0)) { if (sd_line_mask & 0x1) { rc = msm_gpios_request_enable( mi2s_rx_data_lines_gpios + i , 1); if (rc) { pr_err("%s: enable mi2s gpios for" "RX failed. SD line = %s\n", __func__, (mi2s_rx_data_lines_gpios + i)->label); mi2s_unconfig_data_gpio(DIR_RX, sd_config_done_mask); } else sd_config_done_mask |= (1 << i); } sd_line_mask = sd_line_mask >> 1; i++; } break; default: pr_err("%s: Invaild direction direction = %u\n", __func__, direction); rc = -EINVAL; break; } return rc; } int mi2s_unconfig_clk_gpio(void) { msm_gpios_disable_free(mi2s_clk_gpios, ARRAY_SIZE(mi2s_clk_gpios)); return 0; } #endif /* CONFIG_MSM7KV2_AUDIO */ static int __init buses_init(void) { if (gpio_tlmm_config(GPIO_CFG(PMIC_GPIO_INT, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE)) pr_err("%s: gpio_tlmm_config (gpio=%d) failed\n", __func__, PMIC_GPIO_INT); if (machine_is_msm8x60_fluid()) pm8058_7x30_data.keypad_pdata = &fluid_keypad_data; else pm8058_7x30_data.keypad_pdata = &surf_keypad_data; return 0; } #define TIMPANI_RESET_GPIO 1 struct bahama_config_register{ u8 reg; u8 value; u8 mask; }; enum version{ VER_1_0, VER_2_0, VER_UNSUPPORTED = 0xFF }; static struct regulator *vreg_marimba_1; static struct regulator *vreg_marimba_2; static struct regulator *vreg_bahama; static struct msm_gpio timpani_reset_gpio_cfg[] = { { GPIO_CFG(TIMPANI_RESET_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "timpani_reset"} }; static u8 read_bahama_ver(void) { int rc; struct marimba config = { .mod_id = SLAVE_ID_BAHAMA }; u8 bahama_version; rc = marimba_read_bit_mask(&config, 0x00, &bahama_version, 1, 0x1F); if (rc < 0) { printk(KERN_ERR "%s: version read failed: %d\n", __func__, rc); return rc; } else { printk(KERN_INFO "%s: version read got: 0x%x\n", __func__, bahama_version); } switch (bahama_version) { case 0x08: /* varient of bahama v1 */ case 0x10: case 0x00: return VER_1_0; case 0x09: /* variant of bahama v2 */ return VER_2_0; default: return VER_UNSUPPORTED; } } static int config_timpani_reset(void) { int rc; rc = msm_gpios_request_enable(timpani_reset_gpio_cfg, ARRAY_SIZE(timpani_reset_gpio_cfg)); if (rc < 0) { printk(KERN_ERR "%s: msm_gpios_request_enable failed (%d)\n", __func__, rc); } return rc; } static unsigned int msm_timpani_setup_power(void) { int rc; rc = config_timpani_reset(); if (rc < 0) goto out; rc = regulator_enable(vreg_marimba_1); if (rc) { pr_err("%s: regulator_enable failed (%d)\n", __func__, rc); goto out; } rc = regulator_enable(vreg_marimba_2); if (rc) { pr_err("%s: regulator_enable failed (%d)\n", __func__, rc); goto disable_marimba_1; } rc = gpio_direction_output(TIMPANI_RESET_GPIO, 1); if (rc < 0) { pr_err("%s: gpio_direction_output failed (%d)\n", __func__, rc); msm_gpios_free(timpani_reset_gpio_cfg, ARRAY_SIZE(timpani_reset_gpio_cfg)); goto disable_marimba_2; } return 0; disable_marimba_2: regulator_disable(vreg_marimba_2); disable_marimba_1: regulator_disable(vreg_marimba_1); out: return rc; }; static void msm_timpani_shutdown_power(void) { int rc; rc = regulator_disable(vreg_marimba_2); if (rc) pr_err("%s: regulator_disable failed (%d)\n", __func__, rc); rc = regulator_disable(vreg_marimba_1); if (rc) pr_err("%s: regulator_disable failed (%d)\n", __func__, rc); rc = gpio_direction_output(TIMPANI_RESET_GPIO, 0); if (rc < 0) pr_err("%s: gpio_direction_output failed (%d)\n", __func__, rc); msm_gpios_free(timpani_reset_gpio_cfg, ARRAY_SIZE(timpani_reset_gpio_cfg)); }; static unsigned int msm_bahama_core_config(int type) { int rc = 0; if (type == BAHAMA_ID) { int i; struct marimba config = { .mod_id = SLAVE_ID_BAHAMA }; const struct bahama_config_register v20_init[] = { /* reg, value, mask */ { 0xF4, 0x84, 0xFF }, /* AREG */ { 0xF0, 0x04, 0xFF } /* DREG */ }; if (read_bahama_ver() == VER_2_0) { for (i = 0; i < ARRAY_SIZE(v20_init); i++) { u8 value = v20_init[i].value; rc = marimba_write_bit_mask(&config, v20_init[i].reg, &value, sizeof(v20_init[i].value), v20_init[i].mask); if (rc < 0) { printk(KERN_ERR "%s: reg %d write failed: %d\n", __func__, v20_init[i].reg, rc); return rc; } printk(KERN_INFO "%s: reg 0x%02x value 0x%02x" " mask 0x%02x\n", __func__, v20_init[i].reg, v20_init[i].value, v20_init[i].mask); } } } printk(KERN_INFO "core type: %d\n", type); return rc; } static unsigned int msm_bahama_setup_power(void) { int rc = regulator_enable(vreg_bahama); if (rc) pr_err("%s: regulator_enable failed (%d)\n", __func__, rc); return rc; }; static unsigned int msm_bahama_shutdown_power(int value) { int rc = 0; if (value != BAHAMA_ID) { rc = regulator_disable(vreg_bahama); if (rc) pr_err("%s: regulator_disable failed (%d)\n", __func__, rc); } return rc; }; static struct msm_gpio marimba_svlte_config_clock[] = { { GPIO_CFG(34, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "MARIMBA_SVLTE_CLOCK_ENABLE" }, }; static unsigned int msm_marimba_gpio_config_svlte(int gpio_cfg_marimba) { if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { if (gpio_cfg_marimba) gpio_set_value(GPIO_PIN (marimba_svlte_config_clock->gpio_cfg), 1); else gpio_set_value(GPIO_PIN (marimba_svlte_config_clock->gpio_cfg), 0); } return 0; }; static unsigned int msm_marimba_setup_power(void) { int rc; rc = regulator_enable(vreg_marimba_1); if (rc) { pr_err("%s: regulator_enable failed (%d)\n", __func__, rc); goto out; } rc = regulator_enable(vreg_marimba_2); if (rc) { pr_err("%s: regulator_enable failed (%d)\n", __func__, rc); goto disable_marimba_1; } if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { rc = msm_gpios_request_enable(marimba_svlte_config_clock, ARRAY_SIZE(marimba_svlte_config_clock)); if (rc < 0) { pr_err("%s: msm_gpios_request_enable failed (%d)\n", __func__, rc); goto disable_marimba_2; } rc = gpio_direction_output(GPIO_PIN (marimba_svlte_config_clock->gpio_cfg), 0); if (rc < 0) { pr_err("%s: gpio_direction_output failed (%d)\n", __func__, rc); goto disable_marimba_2; } } return 0; disable_marimba_2: regulator_disable(vreg_marimba_2); disable_marimba_1: regulator_disable(vreg_marimba_1); out: return rc; }; static void msm_marimba_shutdown_power(void) { int rc; rc = regulator_disable(vreg_marimba_2); if (rc) pr_err("%s: regulator_disable failed (%d)\n", __func__, rc); rc = regulator_disable(vreg_marimba_1); if (rc) pr_err("%s: regulator_disable failed (%d)\n", __func__, rc); }; static int bahama_present(void) { int id; switch (id = adie_get_detected_connectivity_type()) { case BAHAMA_ID: return 1; case MARIMBA_ID: return 0; case TIMPANI_ID: default: printk(KERN_ERR "%s: unexpected adie connectivity type: %d\n", __func__, id); return -ENODEV; } } struct regulator *fm_regulator; static int fm_radio_setup(struct marimba_fm_platform_data *pdata) { int rc, voltage; uint32_t irqcfg; const char *id = "FMPW"; int bahama_not_marimba = bahama_present(); if (bahama_not_marimba < 0) { pr_warn("%s: bahama_present: %d\n", __func__, bahama_not_marimba); rc = -ENODEV; goto out; } if (bahama_not_marimba) { fm_regulator = regulator_get(NULL, "s3"); voltage = 1800000; } else { fm_regulator = regulator_get(NULL, "s2"); voltage = 1300000; } if (IS_ERR(fm_regulator)) { rc = PTR_ERR(fm_regulator); pr_err("%s: regulator_get failed (%d)\n", __func__, rc); goto out; } rc = regulator_set_voltage(fm_regulator, voltage, voltage); if (rc) { pr_err("%s: regulator_set_voltage failed (%d)\n", __func__, rc); goto regulator_free; } rc = regulator_enable(fm_regulator); if (rc) { pr_err("%s: regulator_enable failed (%d)\n", __func__, rc); goto regulator_free; } rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_DO, PMAPP_CLOCK_VOTE_ON); if (rc < 0) { pr_err("%s: clock vote failed (%d)\n", __func__, rc); goto regulator_disable; } /*Request the Clock Using GPIO34/AP2MDM_MRMBCK_EN in case of svlte*/ if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { rc = marimba_gpio_config(1); if (rc < 0) { pr_err("%s: clock enable for svlte : %d\n", __func__, rc); goto clock_devote; } } irqcfg = GPIO_CFG(147, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); rc = gpio_tlmm_config(irqcfg, GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, irqcfg, rc); rc = -EIO; goto gpio_deconfig; } return 0; gpio_deconfig: if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) marimba_gpio_config(0); clock_devote: pmapp_clock_vote(id, PMAPP_CLOCK_ID_DO, PMAPP_CLOCK_VOTE_OFF); regulator_disable: regulator_disable(fm_regulator); regulator_free: regulator_put(fm_regulator); fm_regulator = NULL; out: return rc; }; static void fm_radio_shutdown(struct marimba_fm_platform_data *pdata) { int rc; const char *id = "FMPW"; uint32_t irqcfg = GPIO_CFG(147, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA); int bahama_not_marimba = bahama_present(); if (bahama_not_marimba == -1) { pr_warn("%s: bahama_present: %d\n", __func__, bahama_not_marimba); return; } rc = gpio_tlmm_config(irqcfg, GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, irqcfg, rc); } if (!IS_ERR_OR_NULL(fm_regulator)) { rc = regulator_disable(fm_regulator); if (rc) pr_err("%s: return val: %d\n", __func__, rc); regulator_put(fm_regulator); fm_regulator = NULL; } rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_DO, PMAPP_CLOCK_VOTE_OFF); if (rc < 0) pr_err("%s: clock_vote return val: %d\n", __func__, rc); /*Disable the Clock Using GPIO34/AP2MDM_MRMBCK_EN in case of svlte*/ if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { rc = marimba_gpio_config(0); if (rc < 0) pr_err("%s: clock disable for svlte : %d\n", __func__, rc); } } static struct marimba_fm_platform_data marimba_fm_pdata = { .fm_setup = fm_radio_setup, .fm_shutdown = fm_radio_shutdown, .irq = MSM_GPIO_TO_INT(147), .vreg_s2 = NULL, .vreg_xo_out = NULL, .is_fm_soc_i2s_master = false, .config_i2s_gpio = NULL, }; /* Slave id address for FM/CDC/QMEMBIST * Values can be programmed using Marimba slave id 0 * should there be a conflict with other I2C devices * */ #define MARIMBA_SLAVE_ID_FM_ADDR 0x2A #define MARIMBA_SLAVE_ID_CDC_ADDR 0x77 #define MARIMBA_SLAVE_ID_QMEMBIST_ADDR 0X66 #define BAHAMA_SLAVE_ID_FM_ADDR 0x2A #define BAHAMA_SLAVE_ID_QMEMBIST_ADDR 0x7B static const char *tsadc_id = "MADC"; static struct regulator_bulk_data regs_tsadc_marimba[] = { { .supply = "gp12", .min_uV = 2200000, .max_uV = 2200000 }, { .supply = "s2", .min_uV = 1300000, .max_uV = 1300000 }, }; static struct regulator_bulk_data regs_tsadc_timpani[] = { { .supply = "s3", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "gp12", .min_uV = 2200000, .max_uV = 2200000 }, { .supply = "gp16", .min_uV = 1200000, .max_uV = 1200000 }, }; static struct regulator_bulk_data *regs_tsadc; static int regs_tsadc_count; static int marimba_tsadc_power(int vreg_on) { int rc = 0; int tsadc_adie_type = adie_get_detected_codec_type(); switch (tsadc_adie_type) { case TIMPANI_ID: rc = pmapp_clock_vote(tsadc_id, PMAPP_CLOCK_ID_D1, vreg_on ? PMAPP_CLOCK_VOTE_ON : PMAPP_CLOCK_VOTE_OFF); if (rc) { pr_err("%s: unable to %svote for d1 clk\n", __func__, vreg_on ? "" : "de-"); goto D1_vote_fail; } /* fall through */ case MARIMBA_ID: rc = pmapp_clock_vote(tsadc_id, PMAPP_CLOCK_ID_DO, vreg_on ? PMAPP_CLOCK_VOTE_ON : PMAPP_CLOCK_VOTE_OFF); if (rc) { pr_err("%s: unable to %svote for d1 clk\n", __func__, vreg_on ? "" : "de-"); goto D0_vote_fail; } WARN_ON(regs_tsadc_count == 0); rc = vreg_on ? regulator_bulk_enable(regs_tsadc_count, regs_tsadc) : regulator_bulk_disable(regs_tsadc_count, regs_tsadc); if (rc) { pr_err("%s: regulator %sable failed: %d\n", __func__, vreg_on ? "en" : "dis", rc); goto regulator_switch_fail; } break; default: pr_err("%s:Adie %d not supported\n", __func__, tsadc_adie_type); return -ENODEV; } msleep(5); /* ensure power is stable */ return 0; regulator_switch_fail: pmapp_clock_vote(tsadc_id, PMAPP_CLOCK_ID_DO, vreg_on ? PMAPP_CLOCK_VOTE_OFF : PMAPP_CLOCK_VOTE_ON); D0_vote_fail: if (tsadc_adie_type == TIMPANI_ID) pmapp_clock_vote(tsadc_id, PMAPP_CLOCK_ID_D1, vreg_on ? PMAPP_CLOCK_VOTE_OFF : PMAPP_CLOCK_VOTE_ON); D1_vote_fail: return rc; } static int marimba_tsadc_init(void) { int rc = 0; int tsadc_adie_type = adie_get_detected_codec_type(); switch (tsadc_adie_type) { case MARIMBA_ID: regs_tsadc = regs_tsadc_marimba; regs_tsadc_count = ARRAY_SIZE(regs_tsadc_marimba); break; case TIMPANI_ID: regs_tsadc = regs_tsadc_timpani; regs_tsadc_count = ARRAY_SIZE(regs_tsadc_timpani); break; default: pr_err("%s:Adie %d not supported\n", __func__, tsadc_adie_type); rc = -ENODEV; goto out; } rc = regulator_bulk_get(NULL, regs_tsadc_count, regs_tsadc); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(regs_tsadc_count, regs_tsadc); if (rc) { pr_err("%s: could not set regulator voltages: %d\n", __func__, rc); goto vreg_free; } return 0; vreg_free: regulator_bulk_free(regs_tsadc_count, regs_tsadc); out: regs_tsadc = NULL; regs_tsadc_count = 0; return rc; } static int marimba_tsadc_exit(void) { regulator_bulk_free(regs_tsadc_count, regs_tsadc); regs_tsadc_count = 0; regs_tsadc = NULL; return 0; } static struct msm_ts_platform_data msm_ts_data = { .min_x = 284, .max_x = 3801, .min_y = 155, .max_y = 3929, .min_press = 0, .max_press = 255, .inv_x = 4096, .inv_y = 4096, .can_wakeup = false, }; static struct marimba_tsadc_platform_data marimba_tsadc_pdata = { .marimba_tsadc_power = marimba_tsadc_power, .init = marimba_tsadc_init, .exit = marimba_tsadc_exit, .tsadc_prechg_en = true, .can_wakeup = false, .setup = { .pen_irq_en = true, .tsadc_en = true, }, .params2 = { .input_clk_khz = 2400, .sample_prd = TSADC_CLK_3, }, .params3 = { .prechg_time_nsecs = 6400, .stable_time_nsecs = 6400, .tsadc_test_mode = 0, }, .tssc_data = &msm_ts_data, }; static struct regulator_bulk_data codec_regs[] = { { .supply = "s4", .min_uV = 2200000, .max_uV = 2200000 }, }; static int __init msm_marimba_codec_init(void) { int rc = regulator_bulk_get(NULL, ARRAY_SIZE(codec_regs), codec_regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(codec_regs), codec_regs); if (rc) { pr_err("%s: could not set regulator voltages: %d\n", __func__, rc); goto reg_free; } return rc; reg_free: regulator_bulk_free(ARRAY_SIZE(codec_regs), codec_regs); out: return rc; } static int msm_marimba_codec_power(int vreg_on) { int rc = vreg_on ? regulator_bulk_enable(ARRAY_SIZE(codec_regs), codec_regs) : regulator_bulk_disable(ARRAY_SIZE(codec_regs), codec_regs); if (rc) { pr_err("%s: could not %sable regulators: %d", __func__, vreg_on ? "en" : "dis", rc); return rc; } return 0; } static struct marimba_codec_platform_data mariba_codec_pdata = { .marimba_codec_power = msm_marimba_codec_power, #ifdef CONFIG_MARIMBA_CODEC .snddev_profile_init = msm_snddev_init, #endif }; static struct marimba_platform_data marimba_pdata = { .slave_id[MARIMBA_SLAVE_ID_FM] = MARIMBA_SLAVE_ID_FM_ADDR, .slave_id[MARIMBA_SLAVE_ID_CDC] = MARIMBA_SLAVE_ID_CDC_ADDR, .slave_id[MARIMBA_SLAVE_ID_QMEMBIST] = MARIMBA_SLAVE_ID_QMEMBIST_ADDR, .slave_id[SLAVE_ID_BAHAMA_FM] = BAHAMA_SLAVE_ID_FM_ADDR, .slave_id[SLAVE_ID_BAHAMA_QMEMBIST] = BAHAMA_SLAVE_ID_QMEMBIST_ADDR, .marimba_setup = msm_marimba_setup_power, .marimba_shutdown = msm_marimba_shutdown_power, .bahama_setup = msm_bahama_setup_power, .bahama_shutdown = msm_bahama_shutdown_power, .marimba_gpio_config = msm_marimba_gpio_config_svlte, .bahama_core_config = msm_bahama_core_config, .fm = &marimba_fm_pdata, .codec = &mariba_codec_pdata, .tsadc_ssbi_adap = MARIMBA_SSBI_ADAP, }; static void __init msm7x30_init_marimba(void) { int rc; struct regulator_bulk_data regs[] = { { .supply = "s3", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "gp16", .min_uV = 1200000, .max_uV = 1200000 }, { .supply = "usb2", .min_uV = 1800000, .max_uV = 1800000 }, }; rc = msm_marimba_codec_init(); if (rc) { pr_err("%s: msm_marimba_codec_init failed (%d)\n", __func__, rc); return; } rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); return; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); regulator_bulk_free(ARRAY_SIZE(regs), regs); return; } vreg_marimba_1 = regs[0].consumer; vreg_marimba_2 = regs[1].consumer; vreg_bahama = regs[2].consumer; } static struct marimba_codec_platform_data timpani_codec_pdata = { .marimba_codec_power = msm_marimba_codec_power, #ifdef CONFIG_TIMPANI_CODEC .snddev_profile_init = msm_snddev_init_timpani, #endif }; static struct marimba_platform_data timpani_pdata = { .slave_id[MARIMBA_SLAVE_ID_CDC] = MARIMBA_SLAVE_ID_CDC_ADDR, .slave_id[MARIMBA_SLAVE_ID_QMEMBIST] = MARIMBA_SLAVE_ID_QMEMBIST_ADDR, .marimba_setup = msm_timpani_setup_power, .marimba_shutdown = msm_timpani_shutdown_power, .codec = &timpani_codec_pdata, .tsadc = &marimba_tsadc_pdata, .tsadc_ssbi_adap = MARIMBA_SSBI_ADAP, }; #define TIMPANI_I2C_SLAVE_ADDR 0xD static struct i2c_board_info msm_i2c_gsbi7_timpani_info[] = { { I2C_BOARD_INFO("timpani", TIMPANI_I2C_SLAVE_ADDR), .platform_data = &timpani_pdata, }, }; #ifdef CONFIG_MSM7KV2_AUDIO static struct resource msm_aictl_resources[] = { { .name = "aictl", .start = 0xa5000100, .end = 0xa5000100, .flags = IORESOURCE_MEM, } }; static struct resource msm_mi2s_resources[] = { { .name = "hdmi", .start = 0xac900000, .end = 0xac900038, .flags = IORESOURCE_MEM, }, { .name = "codec_rx", .start = 0xac940040, .end = 0xac940078, .flags = IORESOURCE_MEM, }, { .name = "codec_tx", .start = 0xac980080, .end = 0xac9800B8, .flags = IORESOURCE_MEM, } }; static struct msm_lpa_platform_data lpa_pdata = { .obuf_hlb_size = 0x2BFF8, .dsp_proc_id = 0, .app_proc_id = 2, .nosb_config = { .llb_min_addr = 0, .llb_max_addr = 0x3ff8, .sb_min_addr = 0, .sb_max_addr = 0, }, .sb_config = { .llb_min_addr = 0, .llb_max_addr = 0x37f8, .sb_min_addr = 0x3800, .sb_max_addr = 0x3ff8, } }; static struct resource msm_lpa_resources[] = { { .name = "lpa", .start = 0xa5000000, .end = 0xa50000a0, .flags = IORESOURCE_MEM, } }; static struct resource msm_aux_pcm_resources[] = { { .name = "aux_codec_reg_addr", .start = 0xac9c00c0, .end = 0xac9c00c8, .flags = IORESOURCE_MEM, }, { .name = "aux_pcm_dout", .start = 138, .end = 138, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_din", .start = 139, .end = 139, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_syncout", .start = 140, .end = 140, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_clkin_a", .start = 141, .end = 141, .flags = IORESOURCE_IO, }, }; static struct platform_device msm_aux_pcm_device = { .name = "msm_aux_pcm", .id = 0, .num_resources = ARRAY_SIZE(msm_aux_pcm_resources), .resource = msm_aux_pcm_resources, }; struct platform_device msm_aictl_device = { .name = "audio_interct", .id = 0, .num_resources = ARRAY_SIZE(msm_aictl_resources), .resource = msm_aictl_resources, }; struct platform_device msm_mi2s_device = { .name = "mi2s", .id = 0, .num_resources = ARRAY_SIZE(msm_mi2s_resources), .resource = msm_mi2s_resources, }; struct platform_device msm_lpa_device = { .name = "lpa", .id = 0, .num_resources = ARRAY_SIZE(msm_lpa_resources), .resource = msm_lpa_resources, .dev = { .platform_data = &lpa_pdata, }, }; #endif /* CONFIG_MSM7KV2_AUDIO */ #define DEC0_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC1_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC2_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC3_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC4_FORMAT (1<<MSM_ADSP_CODEC_MIDI) static unsigned int dec_concurrency_table[] = { /* Audio LP */ 0, (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_MODE_LP)| (1<<MSM_ADSP_OP_DM)), /* Concurrency 1 */ (DEC4_FORMAT), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), /* Concurrency 2 */ (DEC4_FORMAT), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), /* Concurrency 3 */ (DEC4_FORMAT), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), /* Concurrency 4 */ (DEC4_FORMAT), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), /* Concurrency 5 */ (DEC4_FORMAT), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), /* Concurrency 6 */ (DEC4_FORMAT), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), }; #define DEC_INFO(name, queueid, decid, nr_codec) { .module_name = name, \ .module_queueid = queueid, .module_decid = decid, \ .nr_codec_support = nr_codec} #define DEC_INSTANCE(max_instance_same, max_instance_diff) { \ .max_instances_same_dec = max_instance_same, \ .max_instances_diff_dec = max_instance_diff} static struct msm_adspdec_info dec_info_list[] = { DEC_INFO("AUDPLAY4TASK", 17, 4, 1), /* AudPlay4BitStreamCtrlQueue */ DEC_INFO("AUDPLAY3TASK", 16, 3, 11), /* AudPlay3BitStreamCtrlQueue */ DEC_INFO("AUDPLAY2TASK", 15, 2, 11), /* AudPlay2BitStreamCtrlQueue */ DEC_INFO("AUDPLAY1TASK", 14, 1, 11), /* AudPlay1BitStreamCtrlQueue */ DEC_INFO("AUDPLAY0TASK", 13, 0, 11), /* AudPlay0BitStreamCtrlQueue */ }; static struct dec_instance_table dec_instance_list[][MSM_MAX_DEC_CNT] = { /* Non Turbo Mode */ { DEC_INSTANCE(4, 3), /* WAV */ DEC_INSTANCE(4, 3), /* ADPCM */ DEC_INSTANCE(4, 2), /* MP3 */ DEC_INSTANCE(0, 0), /* Real Audio */ DEC_INSTANCE(4, 2), /* WMA */ DEC_INSTANCE(3, 2), /* AAC */ DEC_INSTANCE(0, 0), /* Reserved */ DEC_INSTANCE(0, 0), /* MIDI */ DEC_INSTANCE(4, 3), /* YADPCM */ DEC_INSTANCE(4, 3), /* QCELP */ DEC_INSTANCE(4, 3), /* AMRNB */ DEC_INSTANCE(1, 1), /* AMRWB/WB+ */ DEC_INSTANCE(4, 3), /* EVRC */ DEC_INSTANCE(1, 1), /* WMAPRO */ }, /* Turbo Mode */ { DEC_INSTANCE(4, 3), /* WAV */ DEC_INSTANCE(4, 3), /* ADPCM */ DEC_INSTANCE(4, 3), /* MP3 */ DEC_INSTANCE(0, 0), /* Real Audio */ DEC_INSTANCE(4, 3), /* WMA */ DEC_INSTANCE(4, 3), /* AAC */ DEC_INSTANCE(0, 0), /* Reserved */ DEC_INSTANCE(0, 0), /* MIDI */ DEC_INSTANCE(4, 3), /* YADPCM */ DEC_INSTANCE(4, 3), /* QCELP */ DEC_INSTANCE(4, 3), /* AMRNB */ DEC_INSTANCE(2, 3), /* AMRWB/WB+ */ DEC_INSTANCE(4, 3), /* EVRC */ DEC_INSTANCE(1, 2), /* WMAPRO */ }, }; static struct msm_adspdec_database msm_device_adspdec_database = { .num_dec = ARRAY_SIZE(dec_info_list), .num_concurrency_support = (ARRAY_SIZE(dec_concurrency_table) / \ ARRAY_SIZE(dec_info_list)), .dec_concurrency_table = dec_concurrency_table, .dec_info_list = dec_info_list, .dec_instance_list = &dec_instance_list[0][0], }; static struct platform_device msm_device_adspdec = { .name = "msm_adspdec", .id = -1, .dev = { .platform_data = &msm_device_adspdec_database }, }; static struct resource smc91x_resources[] = { [0] = { .start = 0x8A000300, .end = 0x8A0003ff, .flags = IORESOURCE_MEM, }, [1] = { .start = MSM_GPIO_TO_INT(156), .end = MSM_GPIO_TO_INT(156), .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_32BIT, }; static struct resource smsc911x_resources[] = { [0] = { .start = 0x8D000000, .end = 0x8D000100, .flags = IORESOURCE_MEM, }, [1] = { .start = MSM_GPIO_TO_INT(88), .end = MSM_GPIO_TO_INT(88), .flags = IORESOURCE_IRQ, }, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; static struct msm_gpio smsc911x_gpios[] = { { GPIO_CFG(172, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr6" }, { GPIO_CFG(173, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr5" }, { GPIO_CFG(174, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr4" }, { GPIO_CFG(175, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr3" }, { GPIO_CFG(176, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr2" }, { GPIO_CFG(177, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr1" }, { GPIO_CFG(178, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "ebi2_addr0" }, { GPIO_CFG(88, 2, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), "smsc911x_irq" }, }; static void msm7x30_cfg_smsc911x(void) { int rc; rc = msm_gpios_request_enable(smsc911x_gpios, ARRAY_SIZE(smsc911x_gpios)); if (rc) pr_err("%s: unable to enable gpios\n", __func__); } #ifdef CONFIG_USB_G_ANDROID static struct android_usb_platform_data android_usb_pdata = { .update_pid_and_serial_num = usb_diag_update_pid_and_serial_num, }; static struct platform_device android_usb_device = { .name = "android_usb", .id = -1, .dev = { .platform_data = &android_usb_pdata, }, }; #endif static struct msm_gpio optnav_config_data[] = { { GPIO_CFG(OPTNAV_CHIP_SELECT, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "optnav_chip_select" }, }; static struct regulator_bulk_data optnav_regulators[] = { { .supply = "gp7", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "gp4", .min_uV = 2600000, .max_uV = 2600000 }, { .supply = "gp9", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "usb", .min_uV = 3300000, .max_uV = 3300000 }, }; static void __iomem *virtual_optnav; static int optnav_gpio_setup(void) { int rc = -ENODEV; rc = msm_gpios_request_enable(optnav_config_data, ARRAY_SIZE(optnav_config_data)); if (rc) return rc; /* Configure the FPGA for GPIOs */ virtual_optnav = ioremap(FPGA_OPTNAV_GPIO_ADDR, 0x4); if (!virtual_optnav) { pr_err("%s:Could not ioremap region\n", __func__); return -ENOMEM; } /* * Configure the FPGA to set GPIO 19 as * normal, active(enabled), output(MSM to SURF) */ writew(0x311E, virtual_optnav); rc = regulator_bulk_get(NULL, ARRAY_SIZE(optnav_regulators), optnav_regulators); if (rc) return rc; rc = regulator_bulk_set_voltage(ARRAY_SIZE(optnav_regulators), optnav_regulators); if (rc) goto regulator_put; return rc; regulator_put: regulator_bulk_free(ARRAY_SIZE(optnav_regulators), optnav_regulators); return rc; } static void optnav_gpio_release(void) { msm_gpios_disable_free(optnav_config_data, ARRAY_SIZE(optnav_config_data)); iounmap(virtual_optnav); regulator_bulk_free(ARRAY_SIZE(optnav_regulators), optnav_regulators); } static int optnav_enable(void) { int rc; /* * Enable the VREGs L8(gp7), L10(gp4), L12(gp9), L6(usb) * for I2C communication with keyboard. */ rc = regulator_bulk_enable(ARRAY_SIZE(optnav_regulators), optnav_regulators); if (rc) return rc; /* Enable the chip select GPIO */ gpio_set_value(OPTNAV_CHIP_SELECT, 1); gpio_set_value(OPTNAV_CHIP_SELECT, 0); return 0; } static void optnav_disable(void) { regulator_bulk_disable(ARRAY_SIZE(optnav_regulators), optnav_regulators); gpio_set_value(OPTNAV_CHIP_SELECT, 1); } static struct ofn_atlab_platform_data optnav_data = { .gpio_setup = optnav_gpio_setup, .gpio_release = optnav_gpio_release, .optnav_on = optnav_enable, .optnav_off = optnav_disable, .rotate_xy = 0, .function1 = { .no_motion1_en = true, .touch_sensor_en = true, .ofn_en = true, .clock_select_khz = 1500, .cpi_selection = 1200, }, .function2 = { .invert_y = false, .invert_x = true, .swap_x_y = false, .hold_a_b_en = true, .motion_filter_en = true, }, }; static int hdmi_comm_power(int on, int show); static int hdmi_init_irq(void); static int hdmi_enable_5v(int on); static int hdmi_core_power(int on, int show); static int hdmi_cec_power(int on); static bool hdmi_check_hdcp_hw_support(void); static struct msm_hdmi_platform_data adv7520_hdmi_data = { .irq = MSM_GPIO_TO_INT(18), .comm_power = hdmi_comm_power, .init_irq = hdmi_init_irq, .enable_5v = hdmi_enable_5v, .core_power = hdmi_core_power, .cec_power = hdmi_cec_power, .check_hdcp_hw_support = hdmi_check_hdcp_hw_support, }; #ifdef CONFIG_BOSCH_BMA150 static struct regulator_bulk_data sensors_ldo[] = { { .supply = "gp7", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "gp6", .min_uV = 3050000, .max_uV = 3100000 }, }; static int __init sensors_ldo_init(void) { int rc; rc = regulator_bulk_get(NULL, ARRAY_SIZE(sensors_ldo), sensors_ldo); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(sensors_ldo), sensors_ldo); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } return 0; reg_free: regulator_bulk_free(ARRAY_SIZE(sensors_ldo), sensors_ldo); out: return rc; } static int sensors_ldo_set(int on) { int rc = on ? regulator_bulk_enable(ARRAY_SIZE(sensors_ldo), sensors_ldo) : regulator_bulk_disable(ARRAY_SIZE(sensors_ldo), sensors_ldo); if (rc) pr_err("%s: could not %sable regulators: %d\n", __func__, on ? "en" : "dis", rc); return rc; } static int sensors_ldo_enable(void) { return sensors_ldo_set(1); } static void sensors_ldo_disable(void) { sensors_ldo_set(0); } static struct bma150_platform_data bma150_data = { .power_on = sensors_ldo_enable, .power_off = sensors_ldo_disable, }; static struct i2c_board_info bma150_board_info[] __initdata = { { I2C_BOARD_INFO("bma150", 0x38), .flags = I2C_CLIENT_WAKE, .irq = MSM_GPIO_TO_INT(BMA150_GPIO_INT), .platform_data = &bma150_data, }, }; #endif static struct i2c_board_info msm_i2c_board_info[] = { { I2C_BOARD_INFO("m33c01", OPTNAV_I2C_SLAVE_ADDR), .irq = MSM_GPIO_TO_INT(OPTNAV_IRQ), .platform_data = &optnav_data, }, { I2C_BOARD_INFO("adv7520", ADV7520_I2C_ADDR), .platform_data = &adv7520_hdmi_data, }, }; static struct i2c_board_info msm_marimba_board_info[] = { { I2C_BOARD_INFO("marimba", 0xc), .platform_data = &marimba_pdata, } }; static struct msm_handset_platform_data hs_platform_data = { .hs_name = "7k_handset", .pwr_key_delay_ms = 500, /* 0 will disable end key */ }; static struct platform_device hs_device = { .name = "msm-handset", .id = -1, .dev = { .platform_data = &hs_platform_data, }, }; static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = { [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 8594, .residency = 23740, }, [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN)] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 4594, .residency = 23740, }, [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = { #ifdef CONFIG_MSM_STANDALONE_POWER_COLLAPSE .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 0, #else /*CONFIG_MSM_STANDALONE_POWER_COLLAPSE*/ .idle_supported = 0, .suspend_supported = 0, .idle_enabled = 0, .suspend_enabled = 0, #endif /*CONFIG_MSM_STANDALONE_POWER_COLLAPSE*/ .latency = 500, .residency = 6000, }, [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT)] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 0, .suspend_enabled = 1, .latency = 443, .residency = 1098, }, [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 2, .residency = 0, }, }; static struct msm_pm_boot_platform_data msm_pm_boot_pdata __initdata = { .mode = MSM_PM_BOOT_CONFIG_RESET_VECTOR_VIRT, .v_addr = (uint32_t *)PAGE_OFFSET, }; static struct resource qsd_spi_resources[] = { { .name = "spi_irq_in", .start = INT_SPI_INPUT, .end = INT_SPI_INPUT, .flags = IORESOURCE_IRQ, }, { .name = "spi_irq_out", .start = INT_SPI_OUTPUT, .end = INT_SPI_OUTPUT, .flags = IORESOURCE_IRQ, }, { .name = "spi_irq_err", .start = INT_SPI_ERROR, .end = INT_SPI_ERROR, .flags = IORESOURCE_IRQ, }, { .name = "spi_base", .start = 0xA8000000, .end = 0xA8000000 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "spidm_channels", .flags = IORESOURCE_DMA, }, { .name = "spidm_crci", .flags = IORESOURCE_DMA, }, }; #define AMDH0_BASE_PHYS 0xAC200000 #define ADMH0_GP_CTL (ct_adm_base + 0x3D8) static int msm_qsd_spi_dma_config(void) { void __iomem *ct_adm_base = 0; u32 spi_mux = 0; int ret = 0; ct_adm_base = ioremap(AMDH0_BASE_PHYS, PAGE_SIZE); if (!ct_adm_base) { pr_err("%s: Could not remap %x\n", __func__, AMDH0_BASE_PHYS); return -ENOMEM; } spi_mux = (ioread32(ADMH0_GP_CTL) & (0x3 << 12)) >> 12; qsd_spi_resources[4].start = DMOV_USB_CHAN; qsd_spi_resources[4].end = DMOV_TSIF_CHAN; switch (spi_mux) { case (1): qsd_spi_resources[5].start = DMOV_HSUART1_RX_CRCI; qsd_spi_resources[5].end = DMOV_HSUART1_TX_CRCI; break; case (2): qsd_spi_resources[5].start = DMOV_HSUART2_RX_CRCI; qsd_spi_resources[5].end = DMOV_HSUART2_TX_CRCI; break; case (3): qsd_spi_resources[5].start = DMOV_CE_OUT_CRCI; qsd_spi_resources[5].end = DMOV_CE_IN_CRCI; break; default: ret = -ENOENT; } iounmap(ct_adm_base); return ret; } static struct platform_device qsd_device_spi = { .name = "spi_qsd", .id = 0, .num_resources = ARRAY_SIZE(qsd_spi_resources), .resource = qsd_spi_resources, }; #ifdef CONFIG_SPI_QSD static struct spi_board_info lcdc_sharp_spi_board_info[] __initdata = { { .modalias = "lcdc_sharp_ls038y7dx01", .mode = SPI_MODE_1, .bus_num = 0, .chip_select = 0, .max_speed_hz = 26331429, } }; static struct spi_board_info lcdc_toshiba_spi_board_info[] __initdata = { { .modalias = "lcdc_toshiba_ltm030dd40", .mode = SPI_MODE_3|SPI_CS_HIGH, .bus_num = 0, .chip_select = 0, .max_speed_hz = 9963243, } }; #endif static struct msm_gpio qsd_spi_gpio_config_data[] = { { GPIO_CFG(45, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_clk" }, { GPIO_CFG(46, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_cs0" }, { GPIO_CFG(47, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "spi_mosi" }, { GPIO_CFG(48, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_miso" }, }; static int msm_qsd_spi_gpio_config(void) { return msm_gpios_request_enable(qsd_spi_gpio_config_data, ARRAY_SIZE(qsd_spi_gpio_config_data)); } static void msm_qsd_spi_gpio_release(void) { msm_gpios_disable_free(qsd_spi_gpio_config_data, ARRAY_SIZE(qsd_spi_gpio_config_data)); } static struct msm_spi_platform_data qsd_spi_pdata = { .max_clock_speed = 26331429, .gpio_config = msm_qsd_spi_gpio_config, .gpio_release = msm_qsd_spi_gpio_release, .dma_config = msm_qsd_spi_dma_config, }; static void __init msm_qsd_spi_init(void) { qsd_device_spi.dev.platform_data = &qsd_spi_pdata; } #ifdef CONFIG_USB_EHCI_MSM_72K static void msm_hsusb_vbus_power(unsigned phy_info, int on) { int rc; static int vbus_is_on; struct pm8xxx_gpio_init_info usb_vbus = { PM8058_GPIO_PM_TO_SYS(36), { .direction = PM_GPIO_DIR_OUT, .pull = PM_GPIO_PULL_NO, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .vin_sel = 2, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 0, }, }; /* If VBUS is already on (or off), do nothing. */ if (unlikely(on == vbus_is_on)) return; if (on) { rc = pm8xxx_gpio_config(usb_vbus.gpio, &usb_vbus.config); if (rc) { pr_err("%s PMIC GPIO 36 write failed\n", __func__); return; } } else { gpio_set_value_cansleep(PM8058_GPIO_PM_TO_SYS(36), 0); } vbus_is_on = on; } static struct msm_usb_host_platform_data msm_usb_host_pdata = { .phy_info = (USB_PHY_INTEGRATED | USB_PHY_MODEL_45NM), .vbus_power = msm_hsusb_vbus_power, .power_budget = 180, }; #endif #ifdef CONFIG_USB_MSM_OTG_72K static int hsusb_rpc_connect(int connect) { if (connect) return msm_hsusb_rpc_connect(); else return msm_hsusb_rpc_close(); } #endif #ifdef CONFIG_USB_MSM_OTG_72K static struct regulator *vreg_3p3; static int msm_hsusb_ldo_init(int init) { uint32_t version = 0; int def_vol = 3400000; version = socinfo_get_version(); if (SOCINFO_VERSION_MAJOR(version) >= 2 && SOCINFO_VERSION_MINOR(version) >= 1) { def_vol = 3075000; pr_debug("%s: default voltage:%d\n", __func__, def_vol); } if (init) { vreg_3p3 = regulator_get(NULL, "usb"); if (IS_ERR(vreg_3p3)) return PTR_ERR(vreg_3p3); regulator_set_voltage(vreg_3p3, def_vol, def_vol); } else regulator_put(vreg_3p3); return 0; } static int msm_hsusb_ldo_enable(int enable) { static int ldo_status; if (!vreg_3p3 || IS_ERR(vreg_3p3)) return -ENODEV; if (ldo_status == enable) return 0; ldo_status = enable; if (enable) return regulator_enable(vreg_3p3); else return regulator_disable(vreg_3p3); } static int msm_hsusb_ldo_set_voltage(int mV) { static int cur_voltage; if (!vreg_3p3 || IS_ERR(vreg_3p3)) return -ENODEV; if (cur_voltage == mV) return 0; cur_voltage = mV; pr_debug("%s: (%d)\n", __func__, mV); return regulator_set_voltage(vreg_3p3, mV*1000, mV*1000); } #endif #ifndef CONFIG_USB_EHCI_MSM_72K static int msm_hsusb_pmic_notif_init(void (*callback)(int online), int init); #endif static struct msm_otg_platform_data msm_otg_pdata = { .rpc_connect = hsusb_rpc_connect, #ifndef CONFIG_USB_EHCI_MSM_72K .pmic_vbus_notif_init = msm_hsusb_pmic_notif_init, #else .vbus_power = msm_hsusb_vbus_power, #endif .pemp_level = PRE_EMPHASIS_WITH_20_PERCENT, .cdr_autoreset = CDR_AUTO_RESET_DISABLE, .drv_ampl = HS_DRV_AMPLITUDE_DEFAULT, .se1_gating = SE1_GATING_DISABLE, .chg_vbus_draw = hsusb_chg_vbus_draw, .chg_connected = hsusb_chg_connected, .chg_init = hsusb_chg_init, .ldo_enable = msm_hsusb_ldo_enable, .ldo_init = msm_hsusb_ldo_init, .ldo_set_voltage = msm_hsusb_ldo_set_voltage, }; #ifdef CONFIG_USB_GADGET static struct msm_hsusb_gadget_platform_data msm_gadget_pdata = { .is_phy_status_timer_on = 1, }; #endif #ifndef CONFIG_USB_EHCI_MSM_72K typedef void (*notify_vbus_state) (int); notify_vbus_state notify_vbus_state_func_ptr; int vbus_on_irq; static irqreturn_t pmic_vbus_on_irq(int irq, void *data) { pr_info("%s: vbus notification from pmic\n", __func__); (*notify_vbus_state_func_ptr) (1); return IRQ_HANDLED; } static int msm_hsusb_pmic_notif_init(void (*callback)(int online), int init) { int ret; if (init) { if (!callback) return -ENODEV; notify_vbus_state_func_ptr = callback; vbus_on_irq = platform_get_irq_byname(&msm_device_otg, "vbus_on"); if (vbus_on_irq <= 0) { pr_err("%s: unable to get vbus on irq\n", __func__); return -ENODEV; } ret = request_any_context_irq(vbus_on_irq, pmic_vbus_on_irq, IRQF_TRIGGER_RISING, "msm_otg_vbus_on", NULL); if (ret < 0) { pr_info("%s: request_irq for vbus_on" "interrupt failed\n", __func__); return ret; } msm_otg_pdata.pmic_vbus_irq = vbus_on_irq; return 0; } else { free_irq(vbus_on_irq, 0); notify_vbus_state_func_ptr = NULL; return 0; } } #endif #ifndef CONFIG_SPI_QSD static int lcdc_gpio_array_num[] = { 45, /* spi_clk */ 46, /* spi_cs */ 47, /* spi_mosi */ 48, /* spi_miso */ }; static struct msm_gpio lcdc_gpio_config_data[] = { { GPIO_CFG(45, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_clk" }, { GPIO_CFG(46, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_cs0" }, { GPIO_CFG(47, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_mosi" }, { GPIO_CFG(48, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_miso" }, }; static void lcdc_config_gpios(int enable) { if (enable) { msm_gpios_request_enable(lcdc_gpio_config_data, ARRAY_SIZE( lcdc_gpio_config_data)); } else msm_gpios_disable_free(lcdc_gpio_config_data, ARRAY_SIZE( lcdc_gpio_config_data)); } #endif static struct msm_panel_common_pdata lcdc_sharp_panel_data = { #ifndef CONFIG_SPI_QSD .panel_config_gpio = lcdc_config_gpios, .gpio_num = lcdc_gpio_array_num, #endif .gpio = 2, /* LPG PMIC_GPIO26 channel number */ }; static struct platform_device lcdc_sharp_panel_device = { .name = "lcdc_sharp_wvga", .id = 0, .dev = { .platform_data = &lcdc_sharp_panel_data, } }; static struct msm_gpio dtv_panel_irq_gpios[] = { { GPIO_CFG(18, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "hdmi_int" }, }; static struct msm_gpio dtv_panel_gpios[] = { { GPIO_CFG(120, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "wca_mclk" }, { GPIO_CFG(121, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "wca_sd0" }, { GPIO_CFG(122, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "wca_sd1" }, { GPIO_CFG(123, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "wca_sd2" }, { GPIO_CFG(124, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "dtv_pclk" }, { GPIO_CFG(125, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_en" }, { GPIO_CFG(126, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_vsync" }, { GPIO_CFG(127, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_hsync" }, { GPIO_CFG(128, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data0" }, { GPIO_CFG(129, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data1" }, { GPIO_CFG(130, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data2" }, { GPIO_CFG(131, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data3" }, { GPIO_CFG(132, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data4" }, { GPIO_CFG(160, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data5" }, { GPIO_CFG(161, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data6" }, { GPIO_CFG(162, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data7" }, { GPIO_CFG(163, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data8" }, { GPIO_CFG(164, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_data9" }, { GPIO_CFG(165, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat10" }, { GPIO_CFG(166, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat11" }, { GPIO_CFG(167, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat12" }, { GPIO_CFG(168, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat13" }, { GPIO_CFG(169, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat14" }, { GPIO_CFG(170, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat15" }, { GPIO_CFG(171, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat16" }, { GPIO_CFG(172, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat17" }, { GPIO_CFG(173, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat18" }, { GPIO_CFG(174, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat19" }, { GPIO_CFG(175, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat20" }, { GPIO_CFG(176, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat21" }, { GPIO_CFG(177, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat22" }, { GPIO_CFG(178, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_4MA), "dtv_dat23" }, }; #ifdef HDMI_RESET static unsigned dtv_reset_gpio = GPIO_CFG(37, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); #endif static struct regulator_bulk_data hdmi_core_regs[] = { { .supply = "ldo8", .min_uV = 1800000, .max_uV = 1800000 }, }; static struct regulator_bulk_data hdmi_comm_regs[] = { { .supply = "ldo8", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "ldo10", .min_uV = 2600000, .max_uV = 2600000 }, }; static struct regulator_bulk_data hdmi_cec_regs[] = { { .supply = "ldo17", .min_uV = 2600000, .max_uV = 2600000 }, }; static int __init hdmi_init_regs(void) { int rc; rc = regulator_bulk_get(NULL, ARRAY_SIZE(hdmi_core_regs), hdmi_core_regs); if (rc) { pr_err("%s: could not get %s regulators: %d\n", __func__, "core", rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(hdmi_core_regs), hdmi_core_regs); if (rc) { pr_err("%s: could not set %s voltages: %d\n", __func__, "core", rc); goto free_core; } rc = regulator_bulk_get(NULL, ARRAY_SIZE(hdmi_comm_regs), hdmi_comm_regs); if (rc) { pr_err("%s: could not get %s regulators: %d\n", __func__, "comm", rc); goto free_core; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(hdmi_comm_regs), hdmi_comm_regs); if (rc) { pr_err("%s: could not set %s voltages: %d\n", __func__, "comm", rc); goto free_comm; } rc = regulator_bulk_get(NULL, ARRAY_SIZE(hdmi_cec_regs), hdmi_cec_regs); if (rc) { pr_err("%s: could not get %s regulators: %d\n", __func__, "cec", rc); goto free_comm; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(hdmi_cec_regs), hdmi_cec_regs); if (rc) { pr_err("%s: could not set %s voltages: %d\n", __func__, "cec", rc); goto free_cec; } return 0; free_cec: regulator_bulk_free(ARRAY_SIZE(hdmi_cec_regs), hdmi_cec_regs); free_comm: regulator_bulk_free(ARRAY_SIZE(hdmi_comm_regs), hdmi_comm_regs); free_core: regulator_bulk_free(ARRAY_SIZE(hdmi_core_regs), hdmi_core_regs); out: return rc; } static int hdmi_init_irq(void) { int rc = msm_gpios_enable(dtv_panel_irq_gpios, ARRAY_SIZE(dtv_panel_irq_gpios)); if (rc < 0) { pr_err("%s: gpio enable failed: %d\n", __func__, rc); return rc; } pr_info("%s\n", __func__); return 0; } static int hdmi_enable_5v(int on) { int pmic_gpio_hdmi_5v_en ; if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa() || machine_is_msm7x30_fluid()) pmic_gpio_hdmi_5v_en = PMIC_GPIO_HDMI_5V_EN_V2 ; else pmic_gpio_hdmi_5v_en = PMIC_GPIO_HDMI_5V_EN_V3 ; pr_info("%s: %d\n", __func__, on); if (on) { int rc; rc = gpio_request(PM8058_GPIO_PM_TO_SYS(pmic_gpio_hdmi_5v_en), "hdmi_5V_en"); if (rc) { pr_err("%s PMIC_GPIO_HDMI_5V_EN gpio_request failed\n", __func__); return rc; } gpio_set_value_cansleep( PM8058_GPIO_PM_TO_SYS(pmic_gpio_hdmi_5v_en), 1); } else { gpio_set_value_cansleep( PM8058_GPIO_PM_TO_SYS(pmic_gpio_hdmi_5v_en), 0); gpio_free(PM8058_GPIO_PM_TO_SYS(pmic_gpio_hdmi_5v_en)); } return 0; } static int hdmi_comm_power(int on, int show) { if (show) pr_info("%s: i2c comm: %d <LDO8+LDO10>\n", __func__, on); return on ? regulator_bulk_enable(ARRAY_SIZE(hdmi_comm_regs), hdmi_comm_regs) : regulator_bulk_disable(ARRAY_SIZE(hdmi_comm_regs), hdmi_comm_regs); } static int hdmi_core_power(int on, int show) { if (show) pr_info("%s: %d <LDO8>\n", __func__, on); return on ? regulator_bulk_enable(ARRAY_SIZE(hdmi_core_regs), hdmi_core_regs) : regulator_bulk_disable(ARRAY_SIZE(hdmi_core_regs), hdmi_core_regs); } static int hdmi_cec_power(int on) { pr_info("%s: %d <LDO17>\n", __func__, on); return on ? regulator_bulk_enable(ARRAY_SIZE(hdmi_cec_regs), hdmi_cec_regs) : regulator_bulk_disable(ARRAY_SIZE(hdmi_cec_regs), hdmi_cec_regs); } #if defined(CONFIG_FB_MSM_HDMI_ADV7520_PANEL) || defined(CONFIG_BOSCH_BMA150) /* there is an i2c address conflict between adv7520 and bma150 sensor after * power up on fluid. As a solution, the default address of adv7520's packet * memory is changed as soon as possible */ static int __init fluid_i2c_address_fixup(void) { unsigned char wBuff[16]; unsigned char rBuff[16]; struct i2c_msg msgs[3]; int res; int rc = -EINVAL; struct i2c_adapter *adapter; if (machine_is_msm7x30_fluid()) { adapter = i2c_get_adapter(0); if (!adapter) { pr_err("%s: invalid i2c adapter\n", __func__); return PTR_ERR(adapter); } /* turn on LDO8 */ rc = hdmi_core_power(1, 0); if (rc) { pr_err("%s: could not enable hdmi core regs: %d", __func__, rc); goto adapter_put; } /* change packet memory address to 0x74 */ wBuff[0] = 0x45; wBuff[1] = 0x74; msgs[0].addr = ADV7520_I2C_ADDR; msgs[0].flags = 0; msgs[0].buf = (unsigned char *) wBuff; msgs[0].len = 2; res = i2c_transfer(adapter, msgs, 1); if (res != 1) { pr_err("%s: error writing adv7520\n", __func__); goto ldo8_disable; } /* powerdown adv7520 using bit 6 */ /* i2c read first */ wBuff[0] = 0x41; msgs[0].addr = ADV7520_I2C_ADDR; msgs[0].flags = 0; msgs[0].buf = (unsigned char *) wBuff; msgs[0].len = 1; msgs[1].addr = ADV7520_I2C_ADDR; msgs[1].flags = I2C_M_RD; msgs[1].buf = rBuff; msgs[1].len = 1; res = i2c_transfer(adapter, msgs, 2); if (res != 2) { pr_err("%s: error reading adv7520\n", __func__); goto ldo8_disable; } /* i2c write back */ wBuff[0] = 0x41; wBuff[1] = rBuff[0] | 0x40; msgs[0].addr = ADV7520_I2C_ADDR; msgs[0].flags = 0; msgs[0].buf = (unsigned char *) wBuff; msgs[0].len = 2; res = i2c_transfer(adapter, msgs, 1); if (res != 1) { pr_err("%s: error writing adv7520\n", __func__); goto ldo8_disable; } /* for successful fixup, we release the i2c adapter */ /* but leave ldo8 on so that the adv7520 is not repowered */ i2c_put_adapter(adapter); pr_info("%s: fluid i2c address conflict resolved\n", __func__); } return 0; ldo8_disable: hdmi_core_power(0, 0); adapter_put: i2c_put_adapter(adapter); return rc; } fs_initcall_sync(fluid_i2c_address_fixup); #endif static bool hdmi_check_hdcp_hw_support(void) { if (machine_is_msm7x30_fluid()) return false; else return true; } static int dtv_panel_power(int on) { int flag_on = !!on; static int dtv_power_save_on; int rc; if (dtv_power_save_on == flag_on) return 0; dtv_power_save_on = flag_on; pr_info("%s: %d\n", __func__, on); #ifdef HDMI_RESET if (on) { /* reset Toshiba WeGA chip -- toggle reset pin -- gpio_180 */ rc = gpio_tlmm_config(dtv_reset_gpio, GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, dtv_reset_gpio, rc); return rc; } /* bring reset line low to hold reset*/ gpio_set_value(37, 0); } #endif if (on) { rc = msm_gpios_enable(dtv_panel_gpios, ARRAY_SIZE(dtv_panel_gpios)); if (rc < 0) { printk(KERN_ERR "%s: gpio enable failed: %d\n", __func__, rc); return rc; } } else { rc = msm_gpios_disable(dtv_panel_gpios, ARRAY_SIZE(dtv_panel_gpios)); if (rc < 0) { printk(KERN_ERR "%s: gpio disable failed: %d\n", __func__, rc); return rc; } } mdelay(5); /* ensure power is stable */ #ifdef HDMI_RESET if (on) { gpio_set_value(37, 1); /* bring reset line high */ mdelay(10); /* 10 msec before IO can be accessed */ } #endif return rc; } static struct lcdc_platform_data dtv_pdata = { .lcdc_power_save = dtv_panel_power, }; static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { .inject_rx_on_wakeup = 1, .rx_to_inject = 0xFD, }; static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; #ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE static struct resource msm_v4l2_video_overlay_resources[] = { { .flags = IORESOURCE_DMA, } }; #endif static int msm_fb_detect_panel(const char *name) { if (machine_is_msm7x30_fluid()) { if (!strcmp(name, "lcdc_sharp_wvga_pt")) return 0; } else { if (!strncmp(name, "mddi_toshiba_wvga_pt", 20)) return -EPERM; else if (!strncmp(name, "lcdc_toshiba_wvga_pt", 20)) return 0; else if (!strcmp(name, "mddi_orise")) return -EPERM; else if (!strcmp(name, "mddi_quickvx")) return -EPERM; } return -ENODEV; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, .mddi_prescan = 1, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev = { .platform_data = &msm_fb_pdata, } }; #ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE static struct platform_device msm_v4l2_video_overlay_device = { .name = "msm_v4l2_overlay_pd", .id = 0, .num_resources = ARRAY_SIZE(msm_v4l2_video_overlay_resources), .resource = msm_v4l2_video_overlay_resources, }; #endif static struct platform_device msm_migrate_pages_device = { .name = "msm_migrate_pages", .id = -1, }; #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \ defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE) #define QCE_SIZE 0x10000 #define QCE_0_BASE 0xA8400000 #define QCE_HW_KEY_SUPPORT 1 #define QCE_SHA_HMAC_SUPPORT 0 #define QCE_SHARE_CE_RESOURCE 0 #define QCE_CE_SHARED 0 static struct resource qcrypto_resources[] = { [0] = { .start = QCE_0_BASE, .end = QCE_0_BASE + QCE_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "crypto_channels", .start = DMOV_CE_IN_CHAN, .end = DMOV_CE_OUT_CHAN, .flags = IORESOURCE_DMA, }, [2] = { .name = "crypto_crci_in", .start = DMOV_CE_IN_CRCI, .end = DMOV_CE_IN_CRCI, .flags = IORESOURCE_DMA, }, [3] = { .name = "crypto_crci_out", .start = DMOV_CE_OUT_CRCI, .end = DMOV_CE_OUT_CRCI, .flags = IORESOURCE_DMA, }, [4] = { .name = "crypto_crci_hash", .start = DMOV_CE_HASH_CRCI, .end = DMOV_CE_HASH_CRCI, .flags = IORESOURCE_DMA, }, }; static struct resource qcedev_resources[] = { [0] = { .start = QCE_0_BASE, .end = QCE_0_BASE + QCE_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "crypto_channels", .start = DMOV_CE_IN_CHAN, .end = DMOV_CE_OUT_CHAN, .flags = IORESOURCE_DMA, }, [2] = { .name = "crypto_crci_in", .start = DMOV_CE_IN_CRCI, .end = DMOV_CE_IN_CRCI, .flags = IORESOURCE_DMA, }, [3] = { .name = "crypto_crci_out", .start = DMOV_CE_OUT_CRCI, .end = DMOV_CE_OUT_CRCI, .flags = IORESOURCE_DMA, }, [4] = { .name = "crypto_crci_hash", .start = DMOV_CE_HASH_CRCI, .end = DMOV_CE_HASH_CRCI, .flags = IORESOURCE_DMA, }, }; #endif #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \ defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) static struct msm_ce_hw_support qcrypto_ce_hw_suppport = { .ce_shared = QCE_CE_SHARED, .shared_ce_resource = QCE_SHARE_CE_RESOURCE, .hw_key_support = QCE_HW_KEY_SUPPORT, .sha_hmac = QCE_SHA_HMAC_SUPPORT, /* Bus Scaling declaration*/ .bus_scale_table = NULL, }; static struct platform_device qcrypto_device = { .name = "qcrypto", .id = 0, .num_resources = ARRAY_SIZE(qcrypto_resources), .resource = qcrypto_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &qcrypto_ce_hw_suppport, }, }; #endif #if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE) static struct msm_ce_hw_support qcedev_ce_hw_suppport = { .ce_shared = QCE_CE_SHARED, .shared_ce_resource = QCE_SHARE_CE_RESOURCE, .hw_key_support = QCE_HW_KEY_SUPPORT, .sha_hmac = QCE_SHA_HMAC_SUPPORT, /* Bus Scaling declaration*/ .bus_scale_table = NULL, }; static struct platform_device qcedev_device = { .name = "qce", .id = 0, .num_resources = ARRAY_SIZE(qcedev_resources), .resource = qcedev_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &qcedev_ce_hw_suppport, }, }; #endif static int mddi_toshiba_pmic_bl(int level) { int ret = -EPERM; ret = pmic_set_led_intensity(LED_LCD, level); if (ret) printk(KERN_WARNING "%s: can't set lcd backlight!\n", __func__); return ret; } static struct msm_panel_common_pdata mddi_toshiba_pdata = { .pmic_backlight = mddi_toshiba_pmic_bl, }; static struct platform_device mddi_toshiba_device = { .name = "mddi_toshiba", .id = 0, .dev = { .platform_data = &mddi_toshiba_pdata, } }; static unsigned wega_reset_gpio = GPIO_CFG(180, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); static struct msm_gpio fluid_vee_reset_gpio[] = { { GPIO_CFG(20, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "vee_reset" }, }; static unsigned char quickvx_mddi_client = 1, other_mddi_client = 1; static unsigned char quickvx_ldo_enabled; static unsigned quickvx_vlp_gpio = GPIO_CFG(97, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); static struct pm8xxx_gpio_init_info pmic_quickvx_clk_gpio = { PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_QUICKVX_CLK), { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM8058_GPIO_VIN_S3, .out_strength = PM_GPIO_STRENGTH_HIGH, .function = PM_GPIO_FUNC_2, }, }; static struct regulator *mddi_ldo20; static struct regulator *mddi_ldo12; static struct regulator *mddi_ldo16; static struct regulator *mddi_ldo6; static struct regulator *mddi_lcd; static int display_common_init(void) { struct regulator_bulk_data regs[5] = { { .supply = "ldo20", /* voltage set in display_common_power */}, { .supply = "ldo12", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "ldo6", .min_uV = 3075000, .max_uV = 3400000 }, { .supply = "ldo16", .min_uV = 2600000, .max_uV = 2600000 }, { .supply = NULL, /* mddi_lcd, initialized below */ }, }; int rc = 0; if (machine_is_msm7x30_fluid()) { /* lcd: LDO8 @1.8V */ regs[4].supply = "ldo8"; regs[4].min_uV = 1800000; regs[4].max_uV = 1800000; } else { /* lcd: LDO15 @3.1V */ regs[4].supply = "ldo15"; regs[4].min_uV = 3100000; regs[4].max_uV = 3100000; } rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: regulator_bulk_get failed: %d\n", __func__, rc); goto bail; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: regulator_bulk_set_voltage failed: %d\n", __func__, rc); goto put_regs; } mddi_ldo20 = regs[0].consumer; mddi_ldo12 = regs[1].consumer; mddi_ldo6 = regs[2].consumer; mddi_ldo16 = regs[3].consumer; mddi_lcd = regs[4].consumer; return rc; put_regs: regulator_bulk_free(ARRAY_SIZE(regs), regs); bail: return rc; } static int display_common_power(int on) { int rc = 0, flag_on = !!on; static int display_common_power_save_on; static bool display_regs_initialized; if (display_common_power_save_on == flag_on) return 0; display_common_power_save_on = flag_on; if (unlikely(!display_regs_initialized)) { rc = display_common_init(); if (rc) { pr_err("%s: regulator init failed: %d\n", __func__, rc); return rc; } display_regs_initialized = true; } if (on) { /* reset Toshiba WeGA chip -- toggle reset pin -- gpio_180 */ rc = gpio_tlmm_config(wega_reset_gpio, GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, wega_reset_gpio, rc); return rc; } /* bring reset line low to hold reset*/ gpio_set_value(180, 0); if (quickvx_mddi_client) { /* QuickVX chip -- VLP pin -- gpio 97 */ rc = gpio_tlmm_config(quickvx_vlp_gpio, GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, quickvx_vlp_gpio, rc); return rc; } /* bring QuickVX VLP line low */ gpio_set_value(97, 0); rc = pm8xxx_gpio_config(pmic_quickvx_clk_gpio.gpio, &pmic_quickvx_clk_gpio.config); if (rc) { pr_err("%s: pm8xxx_gpio_config(%#x)=%d\n", __func__, pmic_quickvx_clk_gpio.gpio, rc); return rc; } gpio_set_value_cansleep(PM8058_GPIO_PM_TO_SYS( PMIC_GPIO_QUICKVX_CLK), 0); } } if (quickvx_mddi_client) rc = regulator_set_voltage(mddi_ldo20, 1500000, 1800000); else rc = regulator_set_voltage(mddi_ldo20, 1500000, 1500000); if (rc) { pr_err("%s: could not set voltage for ldo20: %d\n", __func__, rc); return rc; } if (on) { rc = regulator_enable(mddi_ldo20); if (rc) { pr_err("%s: LDO20 regulator enable failed (%d)\n", __func__, rc); return rc; } rc = regulator_enable(mddi_ldo12); if (rc) { pr_err("%s: LDO12 regulator enable failed (%d)\n", __func__, rc); return rc; } if (other_mddi_client) { rc = regulator_enable(mddi_ldo16); if (rc) { pr_err("%s: LDO16 regulator enable failed (%d)\n", __func__, rc); return rc; } } if (quickvx_ldo_enabled) { /* Disable LDO6 during display ON */ rc = regulator_disable(mddi_ldo6); if (rc) { pr_err("%s: LDO6 regulator disable failed (%d)\n", __func__, rc); return rc; } quickvx_ldo_enabled = 0; } rc = regulator_enable(mddi_lcd); if (rc) { pr_err("%s: LCD regulator enable failed (%d)\n", __func__, rc); return rc; } mdelay(5); /* ensure power is stable */ if (machine_is_msm7x30_fluid()) { rc = msm_gpios_request_enable(fluid_vee_reset_gpio, ARRAY_SIZE(fluid_vee_reset_gpio)); if (rc) pr_err("%s gpio_request_enable failed rc=%d\n", __func__, rc); else { /* assert vee reset_n */ gpio_set_value(20, 1); gpio_set_value(20, 0); mdelay(1); gpio_set_value(20, 1); } } gpio_set_value(180, 1); /* bring reset line high */ mdelay(10); /* 10 msec before IO can be accessed */ if (quickvx_mddi_client) { gpio_set_value(97, 1); msleep(2); gpio_set_value_cansleep(PM8058_GPIO_PM_TO_SYS( PMIC_GPIO_QUICKVX_CLK), 1); msleep(2); } rc = pmapp_display_clock_config(1); if (rc) { pr_err("%s pmapp_display_clock_config rc=%d\n", __func__, rc); return rc; } } else { rc = regulator_disable(mddi_ldo20); if (rc) { pr_err("%s: LDO20 regulator disable failed (%d)\n", __func__, rc); return rc; } if (other_mddi_client) { rc = regulator_disable(mddi_ldo16); if (rc) { pr_err("%s: LDO16 regulator disable failed (%d)\n", __func__, rc); return rc; } } if (quickvx_mddi_client && !quickvx_ldo_enabled) { /* Enable LDO6 during display OFF for Quicklogic chip to sleep with data retention */ rc = regulator_enable(mddi_ldo6); if (rc) { pr_err("%s: LDO6 regulator enable failed (%d)\n", __func__, rc); return rc; } quickvx_ldo_enabled = 1; } gpio_set_value(180, 0); /* bring reset line low */ if (quickvx_mddi_client) { gpio_set_value(97, 0); gpio_set_value_cansleep(PM8058_GPIO_PM_TO_SYS( PMIC_GPIO_QUICKVX_CLK), 0); } rc = regulator_disable(mddi_lcd); if (rc) { pr_err("%s: LCD regulator disable failed (%d)\n", __func__, rc); return rc; } mdelay(5); /* ensure power is stable */ rc = regulator_disable(mddi_ldo12); if (rc) { pr_err("%s: LDO12 regulator disable failed (%d)\n", __func__, rc); return rc; } if (machine_is_msm7x30_fluid()) { msm_gpios_disable_free(fluid_vee_reset_gpio, ARRAY_SIZE(fluid_vee_reset_gpio)); } rc = pmapp_display_clock_config(0); if (rc) { pr_err("%s pmapp_display_clock_config rc=%d\n", __func__, rc); return rc; } } return rc; } static int msm_fb_mddi_sel_clk(u32 *clk_rate) { *clk_rate *= 2; return 0; } static int msm_fb_mddi_client_power(u32 client_id) { int rc; printk(KERN_NOTICE "\n client_id = 0x%x", client_id); /* Check if it is Quicklogic client */ if (client_id == 0xc5835800) { printk(KERN_NOTICE "\n Quicklogic MDDI client"); other_mddi_client = 0; if (IS_ERR(mddi_ldo16)) { rc = PTR_ERR(mddi_ldo16); pr_err("%s: gp10 vreg get failed (%d)\n", __func__, rc); return rc; } rc = regulator_disable(mddi_ldo16); if (rc) { pr_err("%s: LDO16 vreg enable failed (%d)\n", __func__, rc); return rc; } } else { printk(KERN_NOTICE "\n Non-Quicklogic MDDI client"); quickvx_mddi_client = 0; gpio_set_value(97, 0); gpio_set_value_cansleep(PM8058_GPIO_PM_TO_SYS( PMIC_GPIO_QUICKVX_CLK), 0); } return 0; } static struct mddi_platform_data mddi_pdata = { .mddi_power_save = display_common_power, .mddi_sel_clk = msm_fb_mddi_sel_clk, .mddi_client_power = msm_fb_mddi_client_power, }; static struct msm_panel_common_pdata mdp_pdata = { .hw_revision_addr = 0xac001270, .gpio = 30, .mdp_max_clk = 192000000, .mdp_rev = MDP_REV_40, .mem_hid = MEMTYPE_EBI0, }; static int lcd_panel_spi_gpio_num[] = { 45, /* spi_clk */ 46, /* spi_cs */ 47, /* spi_mosi */ 48, /* spi_miso */ }; static struct msm_gpio lcd_panel_gpios[] = { /* Workaround, since HDMI_INT is using the same GPIO line (18), and is used as * input. if there is a hardware revision; we should reassign this GPIO to a * new open line; and removing it will just ensure that this will be missed in * the future. { GPIO_CFG(18, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn0" }, */ { GPIO_CFG(19, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn1" }, { GPIO_CFG(20, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu0" }, { GPIO_CFG(21, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu1" }, { GPIO_CFG(22, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu2" }, { GPIO_CFG(23, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red0" }, { GPIO_CFG(24, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red1" }, { GPIO_CFG(25, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red2" }, #ifndef CONFIG_SPI_QSD { GPIO_CFG(45, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_clk" }, { GPIO_CFG(46, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_cs0" }, { GPIO_CFG(47, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_mosi" }, { GPIO_CFG(48, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_miso" }, #endif { GPIO_CFG(90, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_pclk" }, { GPIO_CFG(91, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_en" }, { GPIO_CFG(92, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_vsync" }, { GPIO_CFG(93, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_hsync" }, { GPIO_CFG(94, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn2" }, { GPIO_CFG(95, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn3" }, { GPIO_CFG(96, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn4" }, { GPIO_CFG(97, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn5" }, { GPIO_CFG(98, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn6" }, { GPIO_CFG(99, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn7" }, { GPIO_CFG(100, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu3" }, { GPIO_CFG(101, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu4" }, { GPIO_CFG(102, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu5" }, { GPIO_CFG(103, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu6" }, { GPIO_CFG(104, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu7" }, { GPIO_CFG(105, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red3" }, { GPIO_CFG(106, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red4" }, { GPIO_CFG(107, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red5" }, { GPIO_CFG(108, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red6" }, { GPIO_CFG(109, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red7" }, }; static struct msm_gpio lcd_sharp_panel_gpios[] = { { GPIO_CFG(22, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu2" }, { GPIO_CFG(25, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red2" }, { GPIO_CFG(90, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_pclk" }, { GPIO_CFG(91, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_en" }, { GPIO_CFG(92, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_vsync" }, { GPIO_CFG(93, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_hsync" }, { GPIO_CFG(94, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn2" }, { GPIO_CFG(95, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn3" }, { GPIO_CFG(96, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn4" }, { GPIO_CFG(97, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn5" }, { GPIO_CFG(98, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn6" }, { GPIO_CFG(99, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_grn7" }, { GPIO_CFG(100, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu3" }, { GPIO_CFG(101, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu4" }, { GPIO_CFG(102, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu5" }, { GPIO_CFG(103, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu6" }, { GPIO_CFG(104, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_blu7" }, { GPIO_CFG(105, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red3" }, { GPIO_CFG(106, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red4" }, { GPIO_CFG(107, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red5" }, { GPIO_CFG(108, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red6" }, { GPIO_CFG(109, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_red7" }, }; static int lcdc_toshiba_panel_power(int on) { int rc, i; struct msm_gpio *gp; rc = display_common_power(on); if (rc < 0) { printk(KERN_ERR "%s display_common_power failed: %d\n", __func__, rc); return rc; } if (on) { rc = msm_gpios_enable(lcd_panel_gpios, ARRAY_SIZE(lcd_panel_gpios)); if (rc < 0) { printk(KERN_ERR "%s: gpio enable failed: %d\n", __func__, rc); } } else { /* off */ gp = lcd_panel_gpios; for (i = 0; i < ARRAY_SIZE(lcd_panel_gpios); i++) { /* ouput low */ gpio_set_value(GPIO_PIN(gp->gpio_cfg), 0); gp++; } } return rc; } static int lcdc_sharp_panel_power(int on) { int rc, i; struct msm_gpio *gp; rc = display_common_power(on); if (rc < 0) { printk(KERN_ERR "%s display_common_power failed: %d\n", __func__, rc); return rc; } if (on) { rc = msm_gpios_enable(lcd_sharp_panel_gpios, ARRAY_SIZE(lcd_sharp_panel_gpios)); if (rc < 0) { printk(KERN_ERR "%s: gpio enable failed: %d\n", __func__, rc); } } else { /* off */ gp = lcd_sharp_panel_gpios; for (i = 0; i < ARRAY_SIZE(lcd_sharp_panel_gpios); i++) { /* ouput low */ gpio_set_value(GPIO_PIN(gp->gpio_cfg), 0); gp++; } } return rc; } static int lcdc_panel_power(int on) { int flag_on = !!on; static int lcdc_power_save_on, lcdc_power_initialized; if (lcdc_power_save_on == flag_on) return 0; lcdc_power_save_on = flag_on; if (unlikely(!lcdc_power_initialized)) { quickvx_mddi_client = 0; display_common_init(); lcdc_power_initialized = 1; } if (machine_is_msm7x30_fluid()) return lcdc_sharp_panel_power(on); else return lcdc_toshiba_panel_power(on); } static struct lcdc_platform_data lcdc_pdata = { .lcdc_power_save = lcdc_panel_power, }; static struct regulator *atv_s4, *atv_ldo9; static int __init atv_dac_power_init(void) { int rc; struct regulator_bulk_data regs[] = { { .supply = "smps4", .min_uV = 2200000, .max_uV = 2200000 }, { .supply = "ldo9", .min_uV = 2050000, .max_uV = 2050000 }, }; rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto bail; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs), regs); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } atv_s4 = regs[0].consumer; atv_ldo9 = regs[1].consumer; reg_free: regulator_bulk_free(ARRAY_SIZE(regs), regs); bail: return rc; } static int atv_dac_power(int on) { int rc = 0; if (on) { rc = regulator_enable(atv_s4); if (rc) { pr_err("%s: s4 vreg enable failed (%d)\n", __func__, rc); return rc; } rc = regulator_enable(atv_ldo9); if (rc) { pr_err("%s: ldo9 vreg enable failed (%d)\n", __func__, rc); return rc; } } else { rc = regulator_disable(atv_ldo9); if (rc) { pr_err("%s: ldo9 vreg disable failed (%d)\n", __func__, rc); return rc; } rc = regulator_disable(atv_s4); if (rc) { pr_err("%s: s4 vreg disable failed (%d)\n", __func__, rc); return rc; } } return rc; } static struct tvenc_platform_data atv_pdata = { .poll = 1, .pm_vid_en = atv_dac_power, }; static void __init msm_fb_add_devices(void) { msm_fb_register_device("mdp", &mdp_pdata); msm_fb_register_device("pmdh", &mddi_pdata); msm_fb_register_device("lcdc", &lcdc_pdata); msm_fb_register_device("dtv", &dtv_pdata); msm_fb_register_device("tvenc", &atv_pdata); #ifdef CONFIG_FB_MSM_TVOUT msm_fb_register_device("tvout_device", NULL); #endif } static struct msm_panel_common_pdata lcdc_toshiba_panel_data = { .gpio_num = lcd_panel_spi_gpio_num, }; static struct platform_device lcdc_toshiba_panel_device = { .name = "lcdc_toshiba_wvga", .id = 0, .dev = { .platform_data = &lcdc_toshiba_panel_data, } }; #if defined(CONFIG_MARIMBA_CORE) && \ (defined(CONFIG_MSM_BT_POWER) || defined(CONFIG_MSM_BT_POWER_MODULE)) static struct platform_device msm_bt_power_device = { .name = "bt_power", .id = -1 }; enum { BT_RFR, BT_CTS, BT_RX, BT_TX, }; static struct msm_gpio bt_config_power_on[] = { { GPIO_CFG(134, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_RFR" }, { GPIO_CFG(135, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_CTS" }, { GPIO_CFG(136, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_Rx" }, { GPIO_CFG(137, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_Tx" } }; static struct msm_gpio bt_config_power_off[] = { { GPIO_CFG(134, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART1DM_RFR" }, { GPIO_CFG(135, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART1DM_CTS" }, { GPIO_CFG(136, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART1DM_Rx" }, { GPIO_CFG(137, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART1DM_Tx" } }; static u8 bahama_version; static struct regulator_bulk_data regs_bt_marimba[] = { { .supply = "smps3", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "smps2", .min_uV = 1300000, .max_uV = 1300000 }, { .supply = "ldo24", .min_uV = 1200000, .max_uV = 1200000 }, { .supply = "ldo13", .min_uV = 2900000, .max_uV = 3050000 }, }; static struct regulator_bulk_data regs_bt_bahama_v1[] = { { .supply = "smps3", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "ldo7", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "smps2", .min_uV = 1300000, .max_uV = 1300000 }, { .supply = "ldo13", .min_uV = 2900000, .max_uV = 3050000 }, }; static struct regulator_bulk_data regs_bt_bahama_v2[] = { { .supply = "smps3", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "ldo7", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "ldo13", .min_uV = 2900000, .max_uV = 3050000 }, }; static struct regulator_bulk_data *regs_bt; static int regs_bt_count; static int marimba_bt(int on) { int rc; int i; struct marimba config = { .mod_id = MARIMBA_SLAVE_ID_MARIMBA }; struct marimba_config_register { u8 reg; u8 value; u8 mask; }; struct marimba_variant_register { const size_t size; const struct marimba_config_register *set; }; const struct marimba_config_register *p; u8 version; const struct marimba_config_register v10_bt_on[] = { { 0xE5, 0x0B, 0x0F }, { 0x05, 0x02, 0x07 }, { 0x06, 0x88, 0xFF }, { 0xE7, 0x21, 0x21 }, { 0xE3, 0x38, 0xFF }, { 0xE4, 0x06, 0xFF }, }; const struct marimba_config_register v10_bt_off[] = { { 0xE5, 0x0B, 0x0F }, { 0x05, 0x08, 0x0F }, { 0x06, 0x88, 0xFF }, { 0xE7, 0x00, 0x21 }, { 0xE3, 0x00, 0xFF }, { 0xE4, 0x00, 0xFF }, }; const struct marimba_config_register v201_bt_on[] = { { 0x05, 0x08, 0x07 }, { 0x06, 0x88, 0xFF }, { 0xE7, 0x21, 0x21 }, { 0xE3, 0x38, 0xFF }, { 0xE4, 0x06, 0xFF }, }; const struct marimba_config_register v201_bt_off[] = { { 0x05, 0x08, 0x07 }, { 0x06, 0x88, 0xFF }, { 0xE7, 0x00, 0x21 }, { 0xE3, 0x00, 0xFF }, { 0xE4, 0x00, 0xFF }, }; const struct marimba_config_register v210_bt_on[] = { { 0xE9, 0x01, 0x01 }, { 0x06, 0x88, 0xFF }, { 0xE7, 0x21, 0x21 }, { 0xE3, 0x38, 0xFF }, { 0xE4, 0x06, 0xFF }, }; const struct marimba_config_register v210_bt_off[] = { { 0x06, 0x88, 0xFF }, { 0xE7, 0x00, 0x21 }, { 0xE9, 0x00, 0x01 }, { 0xE3, 0x00, 0xFF }, { 0xE4, 0x00, 0xFF }, }; const struct marimba_variant_register bt_marimba[2][4] = { { { ARRAY_SIZE(v10_bt_off), v10_bt_off }, { 0, NULL }, { ARRAY_SIZE(v201_bt_off), v201_bt_off }, { ARRAY_SIZE(v210_bt_off), v210_bt_off } }, { { ARRAY_SIZE(v10_bt_on), v10_bt_on }, { 0, NULL }, { ARRAY_SIZE(v201_bt_on), v201_bt_on }, { ARRAY_SIZE(v210_bt_on), v210_bt_on } } }; on = on ? 1 : 0; rc = marimba_read_bit_mask(&config, 0x11, &version, 1, 0x1F); if (rc < 0) { printk(KERN_ERR "%s: version read failed: %d\n", __func__, rc); return rc; } if ((version >= ARRAY_SIZE(bt_marimba[on])) || (bt_marimba[on][version].size == 0)) { printk(KERN_ERR "%s: unsupported version\n", __func__); return -EIO; } p = bt_marimba[on][version].set; printk(KERN_INFO "%s: found version %d\n", __func__, version); for (i = 0; i < bt_marimba[on][version].size; i++) { u8 value = (p+i)->value; rc = marimba_write_bit_mask(&config, (p+i)->reg, &value, sizeof((p+i)->value), (p+i)->mask); if (rc < 0) { printk(KERN_ERR "%s: reg %d write failed: %d\n", __func__, (p+i)->reg, rc); return rc; } printk(KERN_INFO "%s: reg 0x%02x value 0x%02x mask 0x%02x\n", __func__, (p+i)->reg, value, (p+i)->mask); } return 0; } static int bahama_bt(int on) { int rc; int i; struct marimba config = { .mod_id = SLAVE_ID_BAHAMA }; struct bahama_variant_register { const size_t size; const struct bahama_config_register *set; }; const struct bahama_config_register *p; const struct bahama_config_register v10_bt_on[] = { { 0xE9, 0x00, 0xFF }, { 0xF4, 0x80, 0xFF }, { 0xF0, 0x06, 0xFF }, { 0xE4, 0x00, 0xFF }, { 0xE5, 0x00, 0x0F }, #ifdef CONFIG_WLAN { 0xE6, 0x38, 0x7F }, { 0xE7, 0x06, 0xFF }, #endif { 0x11, 0x13, 0xFF }, { 0xE9, 0x21, 0xFF }, { 0x01, 0x0C, 0x1F }, { 0x01, 0x08, 0x1F }, }; const struct bahama_config_register v20_bt_on_fm_off[] = { { 0x11, 0x0C, 0xFF }, { 0x13, 0x01, 0xFF }, { 0xF4, 0x80, 0xFF }, { 0xF0, 0x00, 0xFF }, { 0xE9, 0x00, 0xFF }, #ifdef CONFIG_WLAN { 0x81, 0x00, 0xFF }, { 0x82, 0x00, 0xFF }, { 0xE6, 0x38, 0x7F }, { 0xE7, 0x06, 0xFF }, #endif { 0xE9, 0x21, 0xFF } }; const struct bahama_config_register v20_bt_on_fm_on[] = { { 0x11, 0x0C, 0xFF }, { 0x13, 0x01, 0xFF }, { 0xF4, 0x86, 0xFF }, { 0xF0, 0x06, 0xFF }, { 0xE9, 0x00, 0xFF }, #ifdef CONFIG_WLAN { 0x81, 0x00, 0xFF }, { 0x82, 0x00, 0xFF }, { 0xE6, 0x38, 0x7F }, { 0xE7, 0x06, 0xFF }, #endif { 0xE9, 0x21, 0xFF } }; const struct bahama_config_register v10_bt_off[] = { { 0xE9, 0x00, 0xFF }, }; const struct bahama_config_register v20_bt_off_fm_off[] = { { 0xF4, 0x84, 0xFF }, { 0xF0, 0x04, 0xFF }, { 0xE9, 0x00, 0xFF } }; const struct bahama_config_register v20_bt_off_fm_on[] = { { 0xF4, 0x86, 0xFF }, { 0xF0, 0x06, 0xFF }, { 0xE9, 0x00, 0xFF } }; const struct bahama_variant_register bt_bahama[2][3] = { { { ARRAY_SIZE(v10_bt_off), v10_bt_off }, { ARRAY_SIZE(v20_bt_off_fm_off), v20_bt_off_fm_off }, { ARRAY_SIZE(v20_bt_off_fm_on), v20_bt_off_fm_on } }, { { ARRAY_SIZE(v10_bt_on), v10_bt_on }, { ARRAY_SIZE(v20_bt_on_fm_off), v20_bt_on_fm_off }, { ARRAY_SIZE(v20_bt_on_fm_on), v20_bt_on_fm_on } } }; u8 offset = 0; /* index into bahama configs */ on = on ? 1 : 0; if (bahama_version == VER_2_0) { if (marimba_get_fm_status(&config)) offset = 0x01; } p = bt_bahama[on][bahama_version + offset].set; dev_info(&msm_bt_power_device.dev, "%s: found version %d\n", __func__, bahama_version); for (i = 0; i < bt_bahama[on][bahama_version + offset].size; i++) { u8 value = (p+i)->value; rc = marimba_write_bit_mask(&config, (p+i)->reg, &value, sizeof((p+i)->value), (p+i)->mask); if (rc < 0) { dev_err(&msm_bt_power_device.dev, "%s: reg %d write failed: %d\n", __func__, (p+i)->reg, rc); return rc; } dev_info(&msm_bt_power_device.dev, "%s: reg 0x%02x write value 0x%02x mask 0x%02x\n", __func__, (p+i)->reg, value, (p+i)->mask); } /* Update BT status */ if (on) marimba_set_bt_status(&config, true); else marimba_set_bt_status(&config, false); return 0; } static int bluetooth_regs_init(int bahama_not_marimba) { int rc = 0; struct device *const dev = &msm_bt_power_device.dev; if (bahama_not_marimba) { bahama_version = read_bahama_ver(); switch (bahama_version) { case VER_1_0: regs_bt = regs_bt_bahama_v1; regs_bt_count = ARRAY_SIZE(regs_bt_bahama_v1); break; case VER_2_0: regs_bt = regs_bt_bahama_v2; regs_bt_count = ARRAY_SIZE(regs_bt_bahama_v2); break; case VER_UNSUPPORTED: default: dev_err(dev, "%s: i2c failure or unsupported version: %d\n", __func__, bahama_version); rc = -EIO; goto out; } } else { regs_bt = regs_bt_marimba; regs_bt_count = ARRAY_SIZE(regs_bt_marimba); } rc = regulator_bulk_get(&msm_bt_power_device.dev, regs_bt_count, regs_bt); if (rc) { dev_err(dev, "%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(regs_bt_count, regs_bt); if (rc) { dev_err(dev, "%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } return 0; reg_free: regulator_bulk_free(regs_bt_count, regs_bt); out: regs_bt_count = 0; regs_bt = NULL; return rc; } static int bluetooth_power(int on) { int rc; const char *id = "BTPW"; int bahama_not_marimba = bahama_present(); if (bahama_not_marimba < 0) { printk(KERN_WARNING "%s: bahama_present: %d\n", __func__, bahama_not_marimba); return -ENODEV; } if (unlikely(regs_bt_count == 0)) { rc = bluetooth_regs_init(bahama_not_marimba); if (rc) return rc; } if (on) { rc = regulator_bulk_enable(regs_bt_count, regs_bt); if (rc) return rc; rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_DO, PMAPP_CLOCK_VOTE_ON); if (rc < 0) return -EIO; if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { rc = marimba_gpio_config(1); if (rc < 0) return -EIO; } rc = (bahama_not_marimba ? bahama_bt(on) : marimba_bt(on)); if (rc < 0) return -EIO; msleep(10); rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_DO, PMAPP_CLOCK_VOTE_PIN_CTRL); if (rc < 0) return -EIO; if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { rc = marimba_gpio_config(0); if (rc < 0) return -EIO; } rc = msm_gpios_enable(bt_config_power_on, ARRAY_SIZE(bt_config_power_on)); if (rc < 0) return rc; } else { rc = msm_gpios_enable(bt_config_power_off, ARRAY_SIZE(bt_config_power_off)); if (rc < 0) return rc; /* check for initial RFKILL block (power off) */ if (platform_get_drvdata(&msm_bt_power_device) == NULL) goto out; rc = (bahama_not_marimba ? bahama_bt(on) : marimba_bt(on)); if (rc < 0) return -EIO; rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_DO, PMAPP_CLOCK_VOTE_OFF); if (rc < 0) return -EIO; rc = regulator_bulk_disable(regs_bt_count, regs_bt); if (rc) return rc; } out: printk(KERN_DEBUG "Bluetooth power switch: %d\n", on); return 0; } static void __init bt_power_init(void) { msm_bt_power_device.dev.platform_data = &bluetooth_power; } #else #define bt_power_init(x) do {} while (0) #endif static struct msm_psy_batt_pdata msm_psy_batt_data = { .voltage_min_design = 2800, .voltage_max_design = 4300, .avail_chg_sources = AC_CHG | USB_CHG , .batt_technology = POWER_SUPPLY_TECHNOLOGY_LION, }; static struct platform_device msm_batt_device = { .name = "msm-battery", .id = -1, .dev.platform_data = &msm_psy_batt_data, }; static char *msm_adc_fluid_device_names[] = { "LTC_ADC1", "LTC_ADC2", "LTC_ADC3", }; static char *msm_adc_surf_device_names[] = { "XO_ADC", }; static struct msm_adc_platform_data msm_adc_pdata; static struct platform_device msm_adc_device = { .name = "msm_adc", .id = -1, .dev = { .platform_data = &msm_adc_pdata, }, }; #ifdef CONFIG_MSM_SDIO_AL static struct msm_gpio mdm2ap_status = { GPIO_CFG(77, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "mdm2ap_status" }; static int configure_mdm2ap_status(int on) { if (on) return msm_gpios_request_enable(&mdm2ap_status, 1); else { msm_gpios_disable_free(&mdm2ap_status, 1); return 0; } } static int get_mdm2ap_status(void) { return gpio_get_value(GPIO_PIN(mdm2ap_status.gpio_cfg)); } static struct sdio_al_platform_data sdio_al_pdata = { .config_mdm2ap_status = configure_mdm2ap_status, .get_mdm2ap_status = get_mdm2ap_status, .allow_sdioc_version_major_2 = 1, .peer_sdioc_version_minor = 0x0001, .peer_sdioc_version_major = 0x0003, .peer_sdioc_boot_version_minor = 0x0001, .peer_sdioc_boot_version_major = 0x0003, }; struct platform_device msm_device_sdio_al = { .name = "msm_sdio_al", .id = -1, .dev = { .platform_data = &sdio_al_pdata, }, }; #endif /* CONFIG_MSM_SDIO_AL */ static struct platform_device *devices[] __initdata = { #if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER) &msm_device_uart2, #endif #ifdef CONFIG_MSM_PROC_COMM_REGULATOR &msm_proccomm_regulator_dev, #endif &asoc_msm_pcm, &asoc_msm_dai0, &asoc_msm_dai1, #if defined (CONFIG_SND_MSM_MVS_DAI_SOC) &asoc_msm_mvs, &asoc_mvs_dai0, &asoc_mvs_dai1, #endif &msm_device_smd, &msm_device_dmov, &smc91x_device, &smsc911x_device, &msm_device_nand, #ifdef CONFIG_USB_MSM_OTG_72K &msm_device_otg, #ifdef CONFIG_USB_GADGET &msm_device_gadget_peripheral, #endif #endif #ifdef CONFIG_USB_G_ANDROID &android_usb_device, #endif &qsd_device_spi, #ifdef CONFIG_MSM_SSBI &msm_device_ssbi_pmic1, #endif #ifdef CONFIG_I2C_SSBI &msm_device_ssbi7, #endif &msm_fb_device, #ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE &msm_v4l2_video_overlay_device, #endif &msm_migrate_pages_device, &mddi_toshiba_device, &lcdc_toshiba_panel_device, #ifdef CONFIG_MSM_ROTATOR &msm_rotator_device, #endif &lcdc_sharp_panel_device, &msm_device_i2c, &msm_device_i2c_2, &msm_device_uart_dm1, &hs_device, #ifdef CONFIG_MSM7KV2_AUDIO &msm_aictl_device, &msm_mi2s_device, &msm_lpa_device, &msm_aux_pcm_device, #endif &msm_device_adspdec, &qup_device_i2c, #if defined(CONFIG_MARIMBA_CORE) && \ (defined(CONFIG_MSM_BT_POWER) || defined(CONFIG_MSM_BT_POWER_MODULE)) &msm_bt_power_device, #endif &msm_kgsl_3d0, &msm_kgsl_2d0, #ifndef CONFIG_MSM_CAMERA_V4L2 #ifdef CONFIG_MT9T013 &msm_camera_sensor_mt9t013, #endif #ifdef CONFIG_MT9D112 &msm_camera_sensor_mt9d112, #endif #ifdef CONFIG_WEBCAM_OV9726 &msm_camera_sensor_ov9726, #endif #ifdef CONFIG_S5K3E2FX &msm_camera_sensor_s5k3e2fx, #endif #ifdef CONFIG_MT9P012 &msm_camera_sensor_mt9p012, #endif #ifdef CONFIG_MT9E013 &msm_camera_sensor_mt9e013, #endif #ifdef CONFIG_VX6953 &msm_camera_sensor_vx6953, #endif #ifdef CONFIG_SN12M0PZ &msm_camera_sensor_sn12m0pz, #endif #endif &msm_device_vidc_720p, #ifdef CONFIG_MSM_GEMINI &msm_gemini_device, #endif #ifndef CONFIG_MSM_CAMERA_V4L2 #ifdef CONFIG_MSM_VPE &msm_vpe_device, #endif #endif #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) &msm_device_tsif, #endif #ifdef CONFIG_MSM_SDIO_AL &msm_device_sdio_al, #endif #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \ defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) &qcrypto_device, #endif #if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE) &qcedev_device, #endif &msm_batt_device, &msm_adc_device, &msm_ebi0_thermal, &msm_ebi1_thermal, &msm_adsp_device, #ifdef CONFIG_ION_MSM &ion_dev, #endif }; static struct msm_gpio msm_i2c_gpios_hw[] = { { GPIO_CFG(70, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "i2c_scl" }, { GPIO_CFG(71, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "i2c_sda" }, }; static struct msm_gpio msm_i2c_gpios_io[] = { { GPIO_CFG(70, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "i2c_scl" }, { GPIO_CFG(71, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "i2c_sda" }, }; static struct msm_gpio qup_i2c_gpios_io[] = { { GPIO_CFG(16, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_scl" }, { GPIO_CFG(17, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_sda" }, }; static struct msm_gpio qup_i2c_gpios_hw[] = { { GPIO_CFG(16, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_scl" }, { GPIO_CFG(17, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_sda" }, }; static void msm_i2c_gpio_config(int adap_id, int config_type) { struct msm_gpio *msm_i2c_table; /* Each adapter gets 2 lines from the table */ if (adap_id > 0) return; if (config_type) msm_i2c_table = &msm_i2c_gpios_hw[adap_id*2]; else msm_i2c_table = &msm_i2c_gpios_io[adap_id*2]; msm_gpios_enable(msm_i2c_table, 2); } /*This needs to be enabled only for OEMS*/ #ifndef CONFIG_QUP_EXCLUSIVE_TO_CAMERA static struct regulator *qup_vreg; #endif static void qup_i2c_gpio_config(int adap_id, int config_type) { int rc = 0; struct msm_gpio *qup_i2c_table; /* Each adapter gets 2 lines from the table */ if (adap_id != 4) return; if (config_type) qup_i2c_table = qup_i2c_gpios_hw; else qup_i2c_table = qup_i2c_gpios_io; rc = msm_gpios_enable(qup_i2c_table, 2); if (rc < 0) printk(KERN_ERR "QUP GPIO enable failed: %d\n", rc); /*This needs to be enabled only for OEMS*/ #ifndef CONFIG_QUP_EXCLUSIVE_TO_CAMERA if (!IS_ERR_OR_NULL(qup_vreg)) { rc = regulator_enable(qup_vreg); if (rc) { pr_err("%s: regulator_enable failed: %d\n", __func__, rc); } } #endif } static struct msm_i2c_platform_data msm_i2c_pdata = { .clk_freq = 100000, .pri_clk = 70, .pri_dat = 71, .rmutex = 1, .rsl_id = "D:I2C02000021", .msm_i2c_config_gpio = msm_i2c_gpio_config, }; static void __init msm_device_i2c_init(void) { if (msm_gpios_request(msm_i2c_gpios_hw, ARRAY_SIZE(msm_i2c_gpios_hw))) pr_err("failed to request I2C gpios\n"); msm_device_i2c.dev.platform_data = &msm_i2c_pdata; } static struct msm_i2c_platform_data msm_i2c_2_pdata = { .clk_freq = 100000, .rmutex = 1, .rsl_id = "D:I2C02000022", .msm_i2c_config_gpio = msm_i2c_gpio_config, }; static void __init msm_device_i2c_2_init(void) { msm_device_i2c_2.dev.platform_data = &msm_i2c_2_pdata; } static struct msm_i2c_platform_data qup_i2c_pdata = { .clk_freq = 384000, .msm_i2c_config_gpio = qup_i2c_gpio_config, }; static void __init qup_device_i2c_init(void) { if (msm_gpios_request(qup_i2c_gpios_hw, ARRAY_SIZE(qup_i2c_gpios_hw))) pr_err("failed to request I2C gpios\n"); qup_device_i2c.dev.platform_data = &qup_i2c_pdata; /*This needs to be enabled only for OEMS*/ #ifndef CONFIG_QUP_EXCLUSIVE_TO_CAMERA qup_vreg = regulator_get(&qup_device_i2c.dev, "lvsw1"); if (IS_ERR(qup_vreg)) { dev_err(&qup_device_i2c.dev, "%s: regulator_get failed: %ld\n", __func__, PTR_ERR(qup_vreg)); } #endif } #ifdef CONFIG_I2C_SSBI static struct msm_i2c_ssbi_platform_data msm_i2c_ssbi7_pdata = { .rsl_id = "D:CODEC_SSBI", .controller_type = MSM_SBI_CTRL_SSBI, }; #endif static void __init msm7x30_init_irq(void) { msm_init_irq(); } static struct msm_gpio msm_nand_ebi2_cfg_data[] = { {GPIO_CFG(86, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "ebi2_cs1"}, {GPIO_CFG(115, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "ebi2_busy1"}, }; #if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC4_SUPPORT)) struct sdcc_gpio { struct msm_gpio *cfg_data; uint32_t size; struct msm_gpio *sleep_cfg_data; }; #if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) static struct msm_gpio sdc1_lvlshft_cfg_data[] = { {GPIO_CFG(35, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_16MA), "sdc1_lvlshft"}, }; #endif static struct msm_gpio sdc1_cfg_data[] = { {GPIO_CFG(38, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "sdc1_clk"}, {GPIO_CFG(39, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_cmd"}, {GPIO_CFG(40, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_3"}, {GPIO_CFG(41, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_2"}, {GPIO_CFG(42, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_1"}, {GPIO_CFG(43, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_0"}, }; static struct msm_gpio sdc2_cfg_data[] = { {GPIO_CFG(64, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "sdc2_clk"}, {GPIO_CFG(65, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_cmd"}, {GPIO_CFG(66, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_3"}, {GPIO_CFG(67, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_2"}, {GPIO_CFG(68, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_1"}, {GPIO_CFG(69, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_0"}, #ifdef CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT {GPIO_CFG(115, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_4"}, {GPIO_CFG(114, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_5"}, {GPIO_CFG(113, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_6"}, {GPIO_CFG(112, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_7"}, #endif }; static struct msm_gpio sdc3_cfg_data[] = { {GPIO_CFG(110, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "sdc3_clk"}, {GPIO_CFG(111, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_cmd"}, {GPIO_CFG(116, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_3"}, {GPIO_CFG(117, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_2"}, {GPIO_CFG(118, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_1"}, {GPIO_CFG(119, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_0"}, }; static struct msm_gpio sdc3_sleep_cfg_data[] = { {GPIO_CFG(110, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc3_clk"}, {GPIO_CFG(111, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc3_cmd"}, {GPIO_CFG(116, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc3_dat_3"}, {GPIO_CFG(117, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc3_dat_2"}, {GPIO_CFG(118, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc3_dat_1"}, {GPIO_CFG(119, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc3_dat_0"}, }; static struct msm_gpio sdc4_cfg_data[] = { {GPIO_CFG(58, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "sdc4_clk"}, {GPIO_CFG(59, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_cmd"}, {GPIO_CFG(60, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_3"}, {GPIO_CFG(61, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_2"}, {GPIO_CFG(62, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_1"}, {GPIO_CFG(63, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_0"}, }; static struct sdcc_gpio sdcc_cfg_data[] = { { .cfg_data = sdc1_cfg_data, .size = ARRAY_SIZE(sdc1_cfg_data), .sleep_cfg_data = NULL, }, { .cfg_data = sdc2_cfg_data, .size = ARRAY_SIZE(sdc2_cfg_data), .sleep_cfg_data = NULL, }, { .cfg_data = sdc3_cfg_data, .size = ARRAY_SIZE(sdc3_cfg_data), .sleep_cfg_data = sdc3_sleep_cfg_data, }, { .cfg_data = sdc4_cfg_data, .size = ARRAY_SIZE(sdc4_cfg_data), .sleep_cfg_data = NULL, }, }; static struct regulator *sdcc_vreg_data[ARRAY_SIZE(sdcc_cfg_data)]; static unsigned long vreg_sts, gpio_sts; static uint32_t msm_sdcc_setup_gpio(int dev_id, unsigned int enable) { int rc = 0; struct sdcc_gpio *curr; curr = &sdcc_cfg_data[dev_id - 1]; if (!(test_bit(dev_id, &gpio_sts)^enable)) return rc; if (enable) { set_bit(dev_id, &gpio_sts); rc = msm_gpios_request_enable(curr->cfg_data, curr->size); if (rc) printk(KERN_ERR "%s: Failed to turn on GPIOs for slot %d\n", __func__, dev_id); } else { clear_bit(dev_id, &gpio_sts); if (curr->sleep_cfg_data) { msm_gpios_enable(curr->sleep_cfg_data, curr->size); msm_gpios_free(curr->sleep_cfg_data, curr->size); } else { msm_gpios_disable_free(curr->cfg_data, curr->size); } } return rc; } static uint32_t msm_sdcc_setup_vreg(int dev_id, unsigned int enable) { int rc = 0; struct regulator *curr = sdcc_vreg_data[dev_id - 1]; static int enabled_once[] = {0, 0, 0, 0}; if (test_bit(dev_id, &vreg_sts) == enable) return rc; if (dev_id == 4) { if (enable) { pr_debug("Enable Vdd dev_%d\n", dev_id); gpio_set_value_cansleep( PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC4_PWR_EN_N), 0); set_bit(dev_id, &vreg_sts); } else { pr_debug("Disable Vdd dev_%d\n", dev_id); gpio_set_value_cansleep( PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC4_PWR_EN_N), 1); clear_bit(dev_id, &vreg_sts); } } if (!enable || enabled_once[dev_id - 1]) return 0; if (!curr) return -ENODEV; if (IS_ERR(curr)) return PTR_ERR(curr); if (enable) { set_bit(dev_id, &vreg_sts); rc = regulator_enable(curr); if (rc) pr_err("%s: could not enable regulator: %d\n", __func__, rc); enabled_once[dev_id - 1] = 1; } else { clear_bit(dev_id, &vreg_sts); rc = regulator_disable(curr); if (rc) pr_err("%s: could not disable regulator: %d\n", __func__, rc); } return rc; } static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd) { int rc = 0; struct platform_device *pdev; pdev = container_of(dv, struct platform_device, dev); rc = msm_sdcc_setup_gpio(pdev->id, (vdd ? 1 : 0)); if (rc) goto out; if (pdev->id == 4) /* S3 is always ON and cannot be disabled */ rc = msm_sdcc_setup_vreg(pdev->id, (vdd ? 1 : 0)); out: return rc; } #if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) && \ defined(CONFIG_CSDIO_VENDOR_ID) && \ defined(CONFIG_CSDIO_DEVICE_ID) && \ (CONFIG_CSDIO_VENDOR_ID == 0x70 && CONFIG_CSDIO_DEVICE_ID == 0x1117) #define MBP_ON 1 #define MBP_OFF 0 #define MBP_RESET_N \ GPIO_CFG(44, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA) #define MBP_INT0 \ GPIO_CFG(46, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA) #define MBP_MODE_CTRL_0 \ GPIO_CFG(35, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA) #define MBP_MODE_CTRL_1 \ GPIO_CFG(36, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA) #define MBP_MODE_CTRL_2 \ GPIO_CFG(34, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA) #define TSIF_EN \ GPIO_CFG(35, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_DATA \ GPIO_CFG(36, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_CLK \ GPIO_CFG(34, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) static struct msm_gpio mbp_cfg_data[] = { {GPIO_CFG(44, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "mbp_reset"}, {GPIO_CFG(85, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "mbp_io_voltage"}, }; static int mbp_config_gpios_pre_init(int enable) { int rc = 0; if (enable) { rc = msm_gpios_request_enable(mbp_cfg_data, ARRAY_SIZE(mbp_cfg_data)); if (rc) { printk(KERN_ERR "%s: Failed to turnon GPIOs for mbp chip(%d)\n", __func__, rc); } } else msm_gpios_disable_free(mbp_cfg_data, ARRAY_SIZE(mbp_cfg_data)); return rc; } static struct regulator_bulk_data mbp_regs_io[2]; static struct regulator_bulk_data mbp_regs_rf[2]; static struct regulator_bulk_data mbp_regs_adc[1]; static struct regulator_bulk_data mbp_regs_core[1]; static int mbp_init_regs(struct device *dev) { struct regulator_bulk_data regs[] = { /* Analog and I/O regs */ { .supply = "gp4", .min_uV = 2600000, .max_uV = 2600000 }, { .supply = "s3", .min_uV = 1800000, .max_uV = 1800000 }, /* RF regs */ { .supply = "s2", .min_uV = 1300000, .max_uV = 1300000 }, { .supply = "rf", .min_uV = 2600000, .max_uV = 2600000 }, /* ADC regs */ { .supply = "s4", .min_uV = 2200000, .max_uV = 2200000 }, /* Core regs */ { .supply = "gp16", .min_uV = 1200000, .max_uV = 1200000 }, }; struct regulator_bulk_data *regptr = regs; int rc; rc = regulator_bulk_get(dev, ARRAY_SIZE(regs), regs); if (rc) { dev_err(dev, "%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs), regs); if (rc) { dev_err(dev, "%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } memcpy(mbp_regs_io, regptr, sizeof(mbp_regs_io)); regptr += ARRAY_SIZE(mbp_regs_io); memcpy(mbp_regs_rf, regptr, sizeof(mbp_regs_rf)); regptr += ARRAY_SIZE(mbp_regs_rf); memcpy(mbp_regs_adc, regptr, sizeof(mbp_regs_adc)); regptr += ARRAY_SIZE(mbp_regs_adc); memcpy(mbp_regs_core, regptr, sizeof(mbp_regs_core)); return 0; reg_free: regulator_bulk_free(ARRAY_SIZE(regs), regs); out: return rc; } static int mbp_setup_rf_vregs(int state) { return state ? regulator_bulk_enable(ARRAY_SIZE(mbp_regs_rf), mbp_regs_rf) : regulator_bulk_disable(ARRAY_SIZE(mbp_regs_rf), mbp_regs_rf); } static int mbp_setup_vregs(int state) { return state ? regulator_bulk_enable(ARRAY_SIZE(mbp_regs_io), mbp_regs_io) : regulator_bulk_disable(ARRAY_SIZE(mbp_regs_io), mbp_regs_io); } static int mbp_set_tcxo_en(int enable) { int rc; const char *id = "UBMC"; struct vreg *vreg_analog = NULL; rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A1, enable ? PMAPP_CLOCK_VOTE_ON : PMAPP_CLOCK_VOTE_OFF); if (rc < 0) { printk(KERN_ERR "%s: unable to %svote for a1 clk\n", __func__, enable ? "" : "de-"); return -EIO; } return rc; } static void mbp_set_freeze_io(int state) { if (state) gpio_set_value(85, 0); else gpio_set_value(85, 1); } static int mbp_set_core_voltage_en(int enable) { static bool is_enabled; int rc = 0; if (enable && !is_enabled) { rc = regulator_bulk_enable(ARRAY_SIZE(mbp_regs_core), mbp_regs_core); if (rc) { pr_err("%s: could not enable regulators: %d\n", __func__, rc); } else { is_enabled = true; } } return rc; } static void mbp_set_reset(int state) { if (state) gpio_set_value(GPIO_PIN(MBP_RESET_N), 0); else gpio_set_value(GPIO_PIN(MBP_RESET_N), 1); } static int mbp_config_interface_mode(int state) { if (state) { gpio_tlmm_config(MBP_MODE_CTRL_0, GPIO_CFG_ENABLE); gpio_tlmm_config(MBP_MODE_CTRL_1, GPIO_CFG_ENABLE); gpio_tlmm_config(MBP_MODE_CTRL_2, GPIO_CFG_ENABLE); gpio_set_value(GPIO_PIN(MBP_MODE_CTRL_0), 0); gpio_set_value(GPIO_PIN(MBP_MODE_CTRL_1), 1); gpio_set_value(GPIO_PIN(MBP_MODE_CTRL_2), 0); } else { gpio_tlmm_config(MBP_MODE_CTRL_0, GPIO_CFG_DISABLE); gpio_tlmm_config(MBP_MODE_CTRL_1, GPIO_CFG_DISABLE); gpio_tlmm_config(MBP_MODE_CTRL_2, GPIO_CFG_DISABLE); } return 0; } static int mbp_setup_adc_vregs(int state) { return state ? regulator_bulk_enable(ARRAY_SIZE(mbp_regs_adc), mbp_regs_adc) : regulator_bulk_disable(ARRAY_SIZE(mbp_regs_adc), mbp_regs_adc); } static int mbp_power_up(void) { int rc; rc = mbp_config_gpios_pre_init(MBP_ON); if (rc) goto exit; pr_debug("%s: mbp_config_gpios_pre_init() done\n", __func__); rc = mbp_setup_vregs(MBP_ON); if (rc) goto exit; pr_debug("%s: gp4 (2.6) and s3 (1.8) done\n", __func__); rc = mbp_set_tcxo_en(MBP_ON); if (rc) goto exit; pr_debug("%s: tcxo clock done\n", __func__); mbp_set_freeze_io(MBP_OFF); pr_debug("%s: set gpio 85 to 1 done\n", __func__); udelay(100); mbp_set_reset(MBP_ON); udelay(300); rc = mbp_config_interface_mode(MBP_ON); if (rc) goto exit; pr_debug("%s: mbp_config_interface_mode() done\n", __func__); udelay(100 + mbp_set_core_voltage_en(MBP_ON)); pr_debug("%s: power gp16 1.2V done\n", __func__); mbp_set_freeze_io(MBP_ON); pr_debug("%s: set gpio 85 to 0 done\n", __func__); udelay(100); rc = mbp_setup_rf_vregs(MBP_ON); if (rc) goto exit; pr_debug("%s: s2 1.3V and rf 2.6V done\n", __func__); rc = mbp_setup_adc_vregs(MBP_ON); if (rc) goto exit; pr_debug("%s: s4 2.2V done\n", __func__); udelay(200); mbp_set_reset(MBP_OFF); pr_debug("%s: close gpio 44 done\n", __func__); msleep(20); exit: return rc; } static int mbp_power_down(void) { int rc; mbp_set_reset(MBP_ON); pr_debug("%s: mbp_set_reset(MBP_ON) done\n", __func__); udelay(100); rc = mbp_setup_adc_vregs(MBP_OFF); if (rc) goto exit; pr_debug("%s: vreg_disable(vreg_adc) done\n", __func__); udelay(5); rc = mbp_setup_rf_vregs(MBP_OFF); if (rc) goto exit; pr_debug("%s: mbp_setup_rf_vregs(MBP_OFF) done\n", __func__); udelay(5); mbp_set_freeze_io(MBP_OFF); pr_debug("%s: mbp_set_freeze_io(MBP_OFF) done\n", __func__); udelay(100); rc = mbp_set_core_voltage_en(MBP_OFF); if (rc) goto exit; pr_debug("%s: mbp_set_core_voltage_en(MBP_OFF) done\n", __func__); rc = mbp_set_tcxo_en(MBP_OFF); if (rc) goto exit; pr_debug("%s: mbp_set_tcxo_en(MBP_OFF) done\n", __func__); rc = mbp_setup_vregs(MBP_OFF); if (rc) goto exit; pr_debug("%s: mbp_setup_vregs(MBP_OFF) done\n", __func__); rc = mbp_config_gpios_pre_init(MBP_OFF); if (rc) goto exit; exit: return rc; } static void (*mbp_status_notify_cb)(int card_present, void *dev_id); static void *mbp_status_notify_cb_devid; static int mbp_power_status; static int mbp_power_init_done; static uint32_t mbp_setup_power(struct device *dv, unsigned int power_status) { int rc = 0; struct platform_device *pdev; pdev = container_of(dv, struct platform_device, dev); if (power_status == mbp_power_status) goto exit; if (power_status) { pr_debug("turn on power of mbp slot"); rc = mbp_power_up(); mbp_power_status = 1; } else { pr_debug("turn off power of mbp slot"); rc = mbp_power_down(); mbp_power_status = 0; } exit: return rc; }; int mbp_register_status_notify(void (*callback)(int, void *), void *dev_id) { mbp_status_notify_cb = callback; mbp_status_notify_cb_devid = dev_id; return 0; } static unsigned int mbp_status(struct device *dev) { return mbp_power_status; } static uint32_t msm_sdcc_setup_power_mbp(struct device *dv, unsigned int vdd) { struct platform_device *pdev; uint32_t rc = 0; pdev = container_of(dv, struct platform_device, dev); rc = msm_sdcc_setup_power(dv, vdd); if (rc) { pr_err("%s: Failed to setup power (%d)\n", __func__, rc); goto out; } if (!mbp_power_init_done) { rc = mbp_init_regs(dv); if (rc) { dev_err(dv, "%s: regulator init failed: %d\n", __func__, rc); goto out; } mbp_setup_power(dv, 1); mbp_setup_power(dv, 0); mbp_power_init_done = 1; } if (vdd >= 0x8000) { rc = mbp_setup_power(dv, (0x8000 == vdd) ? 0 : 1); if (rc) { pr_err("%s: Failed to config mbp chip power (%d)\n", __func__, rc); goto out; } if (mbp_status_notify_cb) { mbp_status_notify_cb(mbp_power_status, mbp_status_notify_cb_devid); } } out: /* should return 0 only */ return 0; } #endif #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT static unsigned int msm7x30_sdcc_slot_status(struct device *dev) { return (unsigned int) gpio_get_value_cansleep( PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1)); } static int msm_sdcc_get_wpswitch(struct device *dv) { void __iomem *wp_addr = 0; uint32_t ret = 0; struct platform_device *pdev; if (!(machine_is_msm7x30_surf())) return -1; pdev = container_of(dv, struct platform_device, dev); wp_addr = ioremap(FPGA_SDCC_STATUS, 4); if (!wp_addr) { pr_err("%s: Could not remap %x\n", __func__, FPGA_SDCC_STATUS); return -ENOMEM; } ret = (((readl(wp_addr) >> 4) >> (pdev->id-1)) & 0x01); pr_info("%s: WP Status for Slot %d = 0x%x \n", __func__, pdev->id, ret); iounmap(wp_addr); return ret; } #endif #if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) #if defined(CONFIG_CSDIO_VENDOR_ID) && \ defined(CONFIG_CSDIO_DEVICE_ID) && \ (CONFIG_CSDIO_VENDOR_ID == 0x70 && CONFIG_CSDIO_DEVICE_ID == 0x1117) static struct mmc_platform_data msm7x30_sdc1_data = { .ocr_mask = MMC_VDD_165_195 | MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power_mbp, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .status = mbp_status, .register_status_notify = mbp_register_status_notify, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 24576000, .nonremovable = 0, }; #else static struct mmc_platform_data msm7x30_sdc1_data = { .ocr_mask = MMC_VDD_165_195, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT static struct mmc_platform_data msm7x30_sdc2_data = { .ocr_mask = MMC_VDD_165_195 | MMC_VDD_27_28, .translate_vdd = msm_sdcc_setup_power, #ifdef CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT .mmc_bus_width = MMC_CAP_8_BIT_DATA, #else .mmc_bus_width = MMC_CAP_4_BIT_DATA, #endif .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 1, }; #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static struct mmc_platform_data msm7x30_sdc3_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .sdiowakeup_irq = MSM_GPIO_TO_INT(118), .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT static struct mmc_platform_data msm7x30_sdc4_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .status = msm7x30_sdcc_slot_status, .status_irq = PM8058_GPIO_IRQ(PMIC8058_IRQ_BASE, PMIC_GPIO_SD_DET - 1), .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, .wpswitch = msm_sdcc_get_wpswitch, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT static int msm_sdc1_lvlshft_enable(void) { static struct regulator *ldo5; int rc; /* Enable LDO5, an input to the FET that powers slot 1 */ ldo5 = regulator_get(NULL, "ldo5"); if (IS_ERR(ldo5)) { rc = PTR_ERR(ldo5); pr_err("%s: could not get ldo5: %d\n", __func__, rc); goto out; } rc = regulator_set_voltage(ldo5, 2850000, 2850000); if (rc) { pr_err("%s: could not set ldo5 voltage: %d\n", __func__, rc); goto ldo5_free; } rc = regulator_enable(ldo5); if (rc) { pr_err("%s: could not enable ldo5: %d\n", __func__, rc); goto ldo5_free; } /* Enable GPIO 35, to turn on the FET that powers slot 1 */ rc = msm_gpios_request_enable(sdc1_lvlshft_cfg_data, ARRAY_SIZE(sdc1_lvlshft_cfg_data)); if (rc) printk(KERN_ERR "%s: Failed to enable GPIO 35\n", __func__); rc = gpio_direction_output(GPIO_PIN(sdc1_lvlshft_cfg_data[0].gpio_cfg), 1); if (rc) printk(KERN_ERR "%s: Failed to turn on GPIO 35\n", __func__); return 0; ldo5_free: regulator_put(ldo5); out: ldo5 = NULL; return rc; } #endif static int mmc_regulator_init(int sdcc_no, const char *supply, int uV) { int rc; BUG_ON(sdcc_no < 1 || sdcc_no > 4); sdcc_no--; sdcc_vreg_data[sdcc_no] = regulator_get(NULL, supply); if (IS_ERR(sdcc_vreg_data[sdcc_no])) { rc = PTR_ERR(sdcc_vreg_data[sdcc_no]); pr_err("%s: could not get regulator \"%s\": %d\n", __func__, supply, rc); goto out; } rc = regulator_set_voltage(sdcc_vreg_data[sdcc_no], uV, uV); if (rc) { pr_err("%s: could not set voltage for \"%s\" to %d uV: %d\n", __func__, supply, uV, rc); goto reg_free; } return rc; reg_free: regulator_put(sdcc_vreg_data[sdcc_no]); out: sdcc_vreg_data[sdcc_no] = NULL; return rc; } static void __init msm7x30_init_mmc(void) { #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT if (mmc_regulator_init(1, "s3", 1800000)) goto out1; if (machine_is_msm7x30_fluid()) { msm7x30_sdc1_data.ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29; if (msm_sdc1_lvlshft_enable()) { pr_err("%s: could not enable level shift\n"); goto out1; } } msm_add_sdcc(1, &msm7x30_sdc1_data); out1: #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT if (mmc_regulator_init(2, "s3", 1800000)) goto out2; if (machine_is_msm8x55_svlte_surf()) msm7x30_sdc2_data.msmsdcc_fmax = 24576000; if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { msm7x30_sdc2_data.sdiowakeup_irq = MSM_GPIO_TO_INT(68); msm7x30_sdc2_data.is_sdio_al_client = 1; } msm_add_sdcc(2, &msm7x30_sdc2_data); out2: #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT if (mmc_regulator_init(3, "s3", 1800000)) goto out3; msm_sdcc_setup_gpio(3, 1); msm_add_sdcc(3, &msm7x30_sdc3_data); out3: #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT if (mmc_regulator_init(4, "mmc", 2850000)) return; msm_add_sdcc(4, &msm7x30_sdc4_data); #endif } static void __init msm7x30_init_nand(void) { char *build_id; struct flash_platform_data *plat_data; build_id = socinfo_get_build_id(); if (build_id == NULL) { pr_err("%s: Build ID not available from socinfo\n", __func__); return; } if (build_id[8] == 'C' && !msm_gpios_request_enable(msm_nand_ebi2_cfg_data, ARRAY_SIZE(msm_nand_ebi2_cfg_data))) { plat_data = msm_device_nand.dev.platform_data; plat_data->interleave = 1; printk(KERN_INFO "%s: Interleave mode Build ID found\n", __func__); } } #ifdef CONFIG_SERIAL_MSM_CONSOLE static struct msm_gpio uart2_config_data[] = { { GPIO_CFG(49, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART2_RFR"}, { GPIO_CFG(50, 2, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART2_CTS"}, { GPIO_CFG(51, 2, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART2_Rx"}, { GPIO_CFG(52, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UART2_Tx"}, }; static void msm7x30_init_uart2(void) { msm_gpios_request_enable(uart2_config_data, ARRAY_SIZE(uart2_config_data)); } #endif /* TSIF begin */ #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) #define TSIF_B_SYNC GPIO_CFG(37, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_B_DATA GPIO_CFG(36, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_B_EN GPIO_CFG(35, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_B_CLK GPIO_CFG(34, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) static const struct msm_gpio tsif_gpios[] = { { .gpio_cfg = TSIF_B_CLK, .label = "tsif_clk", }, { .gpio_cfg = TSIF_B_EN, .label = "tsif_en", }, { .gpio_cfg = TSIF_B_DATA, .label = "tsif_data", }, { .gpio_cfg = TSIF_B_SYNC, .label = "tsif_sync", }, }; static struct msm_tsif_platform_data tsif_platform_data = { .num_gpios = ARRAY_SIZE(tsif_gpios), .gpios = tsif_gpios, .tsif_pclk = "iface_clk", .tsif_ref_clk = "ref_clk", }; #endif /* defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) */ /* TSIF end */ static void __init pmic8058_leds_init(void) { if (machine_is_msm7x30_surf()) pm8058_7x30_data.leds_pdata = &pm8058_surf_leds_data; else if (!machine_is_msm7x30_fluid()) pm8058_7x30_data.leds_pdata = &pm8058_ffa_leds_data; else if (machine_is_msm7x30_fluid()) pm8058_7x30_data.leds_pdata = &pm8058_fluid_leds_data; } static struct msm_spm_platform_data msm_spm_data __initdata = { .reg_base_addr = MSM_SAW0_BASE, .reg_init_values[MSM_SPM_REG_SAW_CFG] = 0x05, .reg_init_values[MSM_SPM_REG_SAW_SPM_CTL] = 0x18, .reg_init_values[MSM_SPM_REG_SAW_SPM_SLP_TMR_DLY] = 0x00006666, .reg_init_values[MSM_SPM_REG_SAW_SPM_WAKE_TMR_DLY] = 0xFF000666, .reg_init_values[MSM_SPM_REG_SAW_SLP_CLK_EN] = 0x01, .reg_init_values[MSM_SPM_REG_SAW_SLP_HSFS_PRECLMP_EN] = 0x03, .reg_init_values[MSM_SPM_REG_SAW_SLP_HSFS_POSTCLMP_EN] = 0x00, .reg_init_values[MSM_SPM_REG_SAW_SLP_CLMP_EN] = 0x01, .reg_init_values[MSM_SPM_REG_SAW_SLP_RST_EN] = 0x00, .reg_init_values[MSM_SPM_REG_SAW_SPM_MPM_CFG] = 0x00, .awake_vlevel = 0xF2, .retention_vlevel = 0xE0, .collapse_vlevel = 0x72, .retention_mid_vlevel = 0xE0, .collapse_mid_vlevel = 0xE0, .vctl_timeout_us = 50, }; #if defined(CONFIG_TOUCHSCREEN_TSC2007) || \ defined(CONFIG_TOUCHSCREEN_TSC2007_MODULE) #define TSC2007_TS_PEN_INT 20 static struct msm_gpio tsc2007_config_data[] = { { GPIO_CFG(TSC2007_TS_PEN_INT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "tsc2007_irq" }, }; static struct regulator_bulk_data tsc2007_regs[] = { { .supply = "s3", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "s2", .min_uV = 1300000, .max_uV = 1300000 }, }; static int tsc2007_init(void) { int rc; rc = regulator_bulk_get(NULL, ARRAY_SIZE(tsc2007_regs), tsc2007_regs); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } rc = regulator_bulk_enable(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); if (rc) { pr_err("%s: could not enable regulators: %d\n", __func__, rc); goto reg_free; } rc = msm_gpios_request_enable(tsc2007_config_data, ARRAY_SIZE(tsc2007_config_data)); if (rc) { pr_err("%s: Unable to request gpios\n", __func__); goto reg_disable; } return 0; reg_disable: regulator_bulk_disable(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); reg_free: regulator_bulk_free(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); out: return rc; } static int tsc2007_get_pendown_state(void) { int rc; rc = gpio_get_value(TSC2007_TS_PEN_INT); if (rc < 0) { pr_err("%s: MSM GPIO %d read failed\n", __func__, TSC2007_TS_PEN_INT); return rc; } return (rc == 0 ? 1 : 0); } static void tsc2007_exit(void) { regulator_bulk_disable(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); regulator_bulk_free(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); msm_gpios_disable_free(tsc2007_config_data, ARRAY_SIZE(tsc2007_config_data)); } static int tsc2007_power_shutdown(bool enable) { int rc; rc = (enable == false) ? regulator_bulk_enable(ARRAY_SIZE(tsc2007_regs), tsc2007_regs) : regulator_bulk_disable(ARRAY_SIZE(tsc2007_regs), tsc2007_regs); if (rc) { pr_err("%s: could not %sable regulators: %d\n", __func__, enable ? "dis" : "en", rc); return rc; } if (enable == false) msleep(20); return 0; } static struct tsc2007_platform_data tsc2007_ts_data = { .model = 2007, .x_plate_ohms = 300, .min_x = 210, .max_x = 3832, .min_y = 150, .max_y = 3936, .irq_flags = IRQF_TRIGGER_LOW, .init_platform_hw = tsc2007_init, .exit_platform_hw = tsc2007_exit, .power_shutdown = tsc2007_power_shutdown, .invert_x = true, .invert_y = true, /* REVISIT: Temporary fix for reversed pressure */ .invert_z1 = true, .invert_z2 = true, .get_pendown_state = tsc2007_get_pendown_state, }; static struct i2c_board_info tsc_i2c_board_info[] = { { I2C_BOARD_INFO("tsc2007", 0x48), .irq = MSM_GPIO_TO_INT(TSC2007_TS_PEN_INT), .platform_data = &tsc2007_ts_data, }, }; #endif static struct regulator_bulk_data regs_isa1200[] = { { .supply = "gp7", .min_uV = 1800000, .max_uV = 1800000 }, { .supply = "gp10", .min_uV = 2600000, .max_uV = 2600000 }, }; static int isa1200_power(int vreg_on) { int rc = 0; rc = vreg_on ? regulator_bulk_enable(ARRAY_SIZE(regs_isa1200), regs_isa1200) : regulator_bulk_disable(ARRAY_SIZE(regs_isa1200), regs_isa1200); if (rc) { pr_err("%s: could not %sable regulators: %d\n", __func__, vreg_on ? "en" : "dis", rc); goto out; } /* vote for DO buffer */ rc = pmapp_clock_vote("VIBR", PMAPP_CLOCK_ID_DO, vreg_on ? PMAPP_CLOCK_VOTE_ON : PMAPP_CLOCK_VOTE_OFF); if (rc) { pr_err("%s: unable to %svote for d0 clk\n", __func__, vreg_on ? "" : "de-"); goto vreg_fail; } return 0; vreg_fail: if (vreg_on) regulator_bulk_disable(ARRAY_SIZE(regs_isa1200), regs_isa1200); else regulator_bulk_enable(ARRAY_SIZE(regs_isa1200), regs_isa1200); out: return rc; } static int isa1200_dev_setup(bool enable) { int rc; if (enable == true) { rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs_isa1200), regs_isa1200); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs_isa1200), regs_isa1200); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } rc = gpio_tlmm_config(GPIO_CFG(HAP_LVL_SHFT_MSM_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (rc) { pr_err("%s: Could not configure gpio %d\n", __func__, HAP_LVL_SHFT_MSM_GPIO); goto reg_free; } rc = gpio_request(HAP_LVL_SHFT_MSM_GPIO, "haptics_shft_lvl_oe"); if (rc) { pr_err("%s: unable to request gpio %d (%d)\n", __func__, HAP_LVL_SHFT_MSM_GPIO, rc); goto reg_free; } gpio_set_value(HAP_LVL_SHFT_MSM_GPIO, 1); } else { regulator_bulk_free(ARRAY_SIZE(regs_isa1200), regs_isa1200); gpio_free(HAP_LVL_SHFT_MSM_GPIO); } return 0; reg_free: regulator_bulk_free(ARRAY_SIZE(regs_isa1200), regs_isa1200); out: return rc; } static struct isa1200_platform_data isa1200_1_pdata = { .name = "vibrator", .power_on = isa1200_power, .dev_setup = isa1200_dev_setup, .pwm_ch_id = 1, /*channel id*/ /*gpio to enable haptic*/ .hap_en_gpio = PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_HAP_ENABLE), .hap_len_gpio = -1, .max_timeout = 15000, .mode_ctrl = PWM_GEN_MODE, .pwm_fd = { .pwm_div = 256, }, .is_erm = false, .smart_en = true, .ext_clk_en = true, .chip_en = 1, }; static struct i2c_board_info msm_isa1200_board_info[] = { { I2C_BOARD_INFO("isa1200_1", 0x90>>1), .platform_data = &isa1200_1_pdata, }, }; static int kp_flip_mpp_config(void) { struct pm8xxx_mpp_config_data kp_flip_mpp = { .type = PM8XXX_MPP_TYPE_D_INPUT, .level = PM8018_MPP_DIG_LEVEL_S3, .control = PM8XXX_MPP_DIN_TO_INT, }; return pm8xxx_mpp_config(PM8058_MPP_PM_TO_SYS(PM_FLIP_MPP), &kp_flip_mpp); } static struct flip_switch_pdata flip_switch_data = { .name = "kp_flip_switch", .flip_gpio = PM8058_GPIO_PM_TO_SYS(PM8058_GPIOS) + PM_FLIP_MPP, .left_key = KEY_OPEN, .right_key = KEY_CLOSE, .active_low = 0, .wakeup = 1, .flip_mpp_config = kp_flip_mpp_config, }; static struct platform_device flip_switch_device = { .name = "kp_flip_switch", .id = -1, .dev = { .platform_data = &flip_switch_data, } }; static struct regulator_bulk_data regs_tma300[] = { { .supply = "gp6", .min_uV = 3050000, .max_uV = 3100000 }, { .supply = "gp7", .min_uV = 1800000, .max_uV = 1800000 }, }; static int tma300_power(int vreg_on) { int rc; rc = vreg_on ? regulator_bulk_enable(ARRAY_SIZE(regs_tma300), regs_tma300) : regulator_bulk_disable(ARRAY_SIZE(regs_tma300), regs_tma300); if (rc) pr_err("%s: could not %sable regulators: %d\n", __func__, vreg_on ? "en" : "dis", rc); return rc; } #define TS_GPIO_IRQ 150 static int tma300_dev_setup(bool enable) { int rc; if (enable) { rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs_tma300), regs_tma300); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs_tma300), regs_tma300); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } /* enable interrupt gpio */ rc = gpio_tlmm_config(GPIO_CFG(TS_GPIO_IRQ, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_6MA), GPIO_CFG_ENABLE); if (rc) { pr_err("%s: Could not configure gpio %d\n", __func__, TS_GPIO_IRQ); goto reg_free; } /* virtual keys */ tma300_vkeys_attr.attr.name = "virtualkeys.msm_tma300_ts"; properties_kobj = kobject_create_and_add("board_properties", NULL); if (!properties_kobj) { pr_err("%s: failed to create a kobject " "for board_properties\n", __func__); rc = -ENOMEM; goto reg_free; } rc = sysfs_create_group(properties_kobj, &tma300_properties_attr_group); if (rc) { pr_err("%s: failed to create a sysfs entry %s\n", __func__, tma300_vkeys_attr.attr.name); goto kobj_free; } } else { regulator_bulk_free(ARRAY_SIZE(regs_tma300), regs_tma300); /* destroy virtual keys */ if (properties_kobj) { sysfs_remove_group(properties_kobj, &tma300_properties_attr_group); kobject_put(properties_kobj); } } return 0; kobj_free: kobject_put(properties_kobj); properties_kobj = NULL; reg_free: regulator_bulk_free(ARRAY_SIZE(regs_tma300), regs_tma300); out: return rc; } static struct cy8c_ts_platform_data cy8ctma300_pdata = { .power_on = tma300_power, .dev_setup = tma300_dev_setup, .ts_name = "msm_tma300_ts", .dis_min_x = 0, .dis_max_x = 479, .dis_min_y = 0, .dis_max_y = 799, .res_x = 479, .res_y = 1009, .min_tid = 1, .max_tid = 255, .min_touch = 0, .max_touch = 255, .min_width = 0, .max_width = 255, .invert_y = 1, .nfingers = 4, .irq_gpio = TS_GPIO_IRQ, .resout_gpio = -1, }; static struct i2c_board_info cy8ctma300_board_info[] = { { I2C_BOARD_INFO("cy8ctma300", 0x2), .platform_data = &cy8ctma300_pdata, } }; static void __init msm7x30_init(void) { int rc; unsigned smem_size; uint32_t usb_hub_gpio_cfg_value = GPIO_CFG(56, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA); uint32_t soc_version = 0; soc_version = socinfo_get_version(); msm_clock_init(&msm7x30_clock_init_data); #ifdef CONFIG_SERIAL_MSM_CONSOLE msm7x30_init_uart2(); #endif msm_spm_init(&msm_spm_data, 1); platform_device_register(&msm7x30_device_acpuclk); if (machine_is_msm7x30_surf() || machine_is_msm7x30_fluid()) msm7x30_cfg_smsc911x(); #ifdef CONFIG_USB_MSM_OTG_72K if (SOCINFO_VERSION_MAJOR(soc_version) >= 2 && SOCINFO_VERSION_MINOR(soc_version) >= 1) { pr_debug("%s: SOC Version:2.(1 or more)\n", __func__); msm_otg_pdata.ldo_set_voltage = 0; } msm_device_otg.dev.platform_data = &msm_otg_pdata; #ifdef CONFIG_USB_GADGET msm_otg_pdata.swfi_latency = msm_pm_data [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency; msm_device_gadget_peripheral.dev.platform_data = &msm_gadget_pdata; #endif #endif msm_uart_dm1_pdata.wakeup_irq = gpio_to_irq(136); msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) msm_device_tsif.dev.platform_data = &tsif_platform_data; #endif if (machine_is_msm7x30_fluid()) { msm_adc_pdata.dev_names = msm_adc_fluid_device_names; msm_adc_pdata.num_adc = ARRAY_SIZE(msm_adc_fluid_device_names); } else { msm_adc_pdata.dev_names = msm_adc_surf_device_names; msm_adc_pdata.num_adc = ARRAY_SIZE(msm_adc_surf_device_names); } pmic8058_leds_init(); buses_init(); #ifdef CONFIG_MSM_SSBI msm_device_ssbi_pmic1.dev.platform_data = &msm7x30_ssbi_pm8058_pdata; #endif platform_add_devices(msm_footswitch_devices, msm_num_footswitch_devices); platform_add_devices(devices, ARRAY_SIZE(devices)); #ifdef CONFIG_USB_EHCI_MSM_72K msm_add_host(0, &msm_usb_host_pdata); #endif #ifdef CONFIG_MSM_CAMERA_V4L2 msm7x30_init_cam(); #endif msm7x30_init_mmc(); msm7x30_init_nand(); msm_qsd_spi_init(); #ifdef CONFIG_SPI_QSD if (machine_is_msm7x30_fluid()) spi_register_board_info(lcdc_sharp_spi_board_info, ARRAY_SIZE(lcdc_sharp_spi_board_info)); else spi_register_board_info(lcdc_toshiba_spi_board_info, ARRAY_SIZE(lcdc_toshiba_spi_board_info)); #endif atv_dac_power_init(); sensors_ldo_init(); hdmi_init_regs(); msm_fb_add_devices(); msm_pm_set_platform_data(msm_pm_data, ARRAY_SIZE(msm_pm_data)); BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata)); msm_pm_register_irqs(); msm_device_i2c_init(); msm_device_i2c_2_init(); qup_device_i2c_init(); msm7x30_init_marimba(); #ifdef CONFIG_MSM7KV2_AUDIO snddev_poweramp_gpio_init(); snddev_hsed_voltage_init(); aux_pcm_gpio_init(); #endif i2c_register_board_info(0, msm_i2c_board_info, ARRAY_SIZE(msm_i2c_board_info)); if (!machine_is_msm8x55_svlte_ffa() && !machine_is_msm7x30_fluid()) marimba_pdata.tsadc = &marimba_tsadc_pdata; if (machine_is_msm7x30_fluid()) i2c_register_board_info(0, cy8info, ARRAY_SIZE(cy8info)); #ifdef CONFIG_BOSCH_BMA150 if (machine_is_msm7x30_fluid()) i2c_register_board_info(0, bma150_board_info, ARRAY_SIZE(bma150_board_info)); #endif i2c_register_board_info(2, msm_marimba_board_info, ARRAY_SIZE(msm_marimba_board_info)); i2c_register_board_info(2, msm_i2c_gsbi7_timpani_info, ARRAY_SIZE(msm_i2c_gsbi7_timpani_info)); i2c_register_board_info(4 /* QUP ID */, msm_camera_boardinfo, ARRAY_SIZE(msm_camera_boardinfo)); bt_power_init(); #ifdef CONFIG_I2C_SSBI msm_device_ssbi7.dev.platform_data = &msm_i2c_ssbi7_pdata; #endif if (machine_is_msm7x30_fluid()) i2c_register_board_info(0, msm_isa1200_board_info, ARRAY_SIZE(msm_isa1200_board_info)); #if defined(CONFIG_TOUCHSCREEN_TSC2007) || \ defined(CONFIG_TOUCHSCREEN_TSC2007_MODULE) if (machine_is_msm8x55_svlte_ffa()) i2c_register_board_info(2, tsc_i2c_board_info, ARRAY_SIZE(tsc_i2c_board_info)); #endif if (machine_is_msm7x30_surf()) platform_device_register(&flip_switch_device); pm8058_gpios_init(); if (machine_is_msm7x30_fluid()) { /* Initialize platform data for fluid v2 hardware */ if (SOCINFO_VERSION_MAJOR( socinfo_get_platform_version()) == 2) { cy8ctma300_pdata.res_y = 920; cy8ctma300_pdata.invert_y = 0; } i2c_register_board_info(0, cy8ctma300_board_info, ARRAY_SIZE(cy8ctma300_board_info)); } if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa()) { rc = gpio_tlmm_config(usb_hub_gpio_cfg_value, GPIO_CFG_ENABLE); if (rc) pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, usb_hub_gpio_cfg_value, rc); } boot_reason = *(unsigned int *) (smem_get_entry(SMEM_POWER_ON_STATUS_INFO, &smem_size)); printk(KERN_NOTICE "Boot Reason = 0x%02x\n", boot_reason); } static unsigned pmem_sf_size = MSM_PMEM_SF_SIZE; static int __init pmem_sf_size_setup(char *p) { pmem_sf_size = memparse(p, NULL); return 0; } early_param("pmem_sf_size", pmem_sf_size_setup); static unsigned fb_size; static int __init fb_size_setup(char *p) { fb_size = memparse(p, NULL); return 0; } early_param("fb_size", fb_size_setup); static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE; static int __init pmem_adsp_size_setup(char *p) { pmem_adsp_size = memparse(p, NULL); return 0; } early_param("pmem_adsp_size", pmem_adsp_size_setup); static unsigned fluid_pmem_adsp_size = MSM_FLUID_PMEM_ADSP_SIZE; static int __init fluid_pmem_adsp_size_setup(char *p) { fluid_pmem_adsp_size = memparse(p, NULL); return 0; } early_param("fluid_pmem_adsp_size", fluid_pmem_adsp_size_setup); static unsigned pmem_audio_size = MSM_PMEM_AUDIO_SIZE; static int __init pmem_audio_size_setup(char *p) { pmem_audio_size = memparse(p, NULL); return 0; } early_param("pmem_audio_size", pmem_audio_size_setup); static unsigned pmem_kernel_ebi0_size = PMEM_KERNEL_EBI0_SIZE; static int __init pmem_kernel_ebi0_size_setup(char *p) { pmem_kernel_ebi0_size = memparse(p, NULL); return 0; } early_param("pmem_kernel_ebi0_size", pmem_kernel_ebi0_size_setup); #ifdef CONFIG_ION_MSM #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION static struct ion_co_heap_pdata co_ion_pdata = { .adjacent_mem_id = INVALID_HEAP_ID, .align = PAGE_SIZE, }; #endif /** * These heaps are listed in the order they will be allocated. * Don't swap the order unless you know what you are doing! */ struct ion_platform_heap msm7x30_heaps[] = { { .id = ION_SYSTEM_HEAP_ID, .type = ION_HEAP_TYPE_SYSTEM, .name = ION_VMALLOC_HEAP_NAME, }, #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION /* PMEM_ADSP = CAMERA */ { .id = ION_CAMERA_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_CAMERA_HEAP_NAME, .memory_type = ION_EBI_TYPE, .extra_data = (void *)&co_ion_pdata, }, /* PMEM_AUDIO */ { .id = ION_AUDIO_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_AUDIO_HEAP_NAME, .memory_type = ION_EBI_TYPE, .extra_data = (void *)&co_ion_pdata, }, /* PMEM_MDP = SF */ { .id = ION_SF_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_SF_HEAP_NAME, .memory_type = ION_EBI_TYPE, .extra_data = (void *)&co_ion_pdata, }, #endif }; static struct ion_platform_data ion_pdata = { .nr = MSM_ION_HEAP_NUM, .heaps = msm7x30_heaps, }; static struct platform_device ion_dev = { .name = "ion-msm", .id = 1, .dev = { .platform_data = &ion_pdata }, }; #endif static struct memtype_reserve msm7x30_reserve_table[] __initdata = { [MEMTYPE_SMI] = { }, [MEMTYPE_EBI0] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, [MEMTYPE_EBI1] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, }; unsigned long size; unsigned long msm_ion_camera_size; static void fix_sizes(void) { if machine_is_msm7x30_fluid() size = fluid_pmem_adsp_size; else size = pmem_adsp_size; #ifdef CONFIG_ION_MSM msm_ion_camera_size = size; #endif } static void __init reserve_mdp_memory(void) { mdp_pdata.ov0_wb_size = MSM_FB_OVERLAY0_WRITEBACK_SIZE; msm7x30_reserve_table[mdp_pdata.mem_hid].size += mdp_pdata.ov0_wb_size; } static void __init size_ion_devices(void) { #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_pdata.heaps[1].size = msm_ion_camera_size; ion_pdata.heaps[2].size = MSM_ION_AUDIO_SIZE; ion_pdata.heaps[3].size = MSM_ION_SF_SIZE; #endif } static void __init reserve_ion_memory(void) { #if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION) msm7x30_reserve_table[MEMTYPE_EBI0].size += msm_ion_camera_size; msm7x30_reserve_table[MEMTYPE_EBI0].size += MSM_ION_AUDIO_SIZE; msm7x30_reserve_table[MEMTYPE_EBI0].size += MSM_ION_SF_SIZE; #endif } static void __init msm7x30_calculate_reserve_sizes(void) { fix_sizes(); reserve_mdp_memory(); size_ion_devices(); reserve_ion_memory(); } static int msm7x30_paddr_to_memtype(unsigned int paddr) { if (paddr < phys_add) return MEMTYPE_EBI0; if (paddr >= phys_add && paddr < 0x80000000) return MEMTYPE_EBI1; return MEMTYPE_NONE; } static struct reserve_info msm7x30_reserve_info __initdata = { .memtype_reserve_table = msm7x30_reserve_table, .calculate_reserve_sizes = msm7x30_calculate_reserve_sizes, .paddr_to_memtype = msm7x30_paddr_to_memtype, }; static void __init msm7x30_reserve(void) { reserve_info = &msm7x30_reserve_info; msm_reserve(); } static void __init msm7x30_allocate_memory_regions(void) { void *addr; unsigned long size; size = fb_size ? : MSM_FB_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); #ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE size = MSM_V4L2_VIDEO_OVERLAY_BUF_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_v4l2_video_overlay_resources[0].start = __pa(addr); msm_v4l2_video_overlay_resources[0].end = msm_v4l2_video_overlay_resources[0].start + size - 1; pr_debug("allocating %lu bytes at %p (%lx physical) for v4l2\n", size, addr, __pa(addr)); #endif } static void __init msm7x30_map_io(void) { msm_shared_ram_phys = 0x00100000; msm_map_msm7x30_io(); if (socinfo_init() < 0) pr_err("socinfo_init() failed!\n"); } static void __init msm7x30_init_early(void) { msm7x30_allocate_memory_regions(); } static void __init msm7x30_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { for (; tags->hdr.size; tags = tag_next(tags)) { if (tags->hdr.tag == ATAG_MEM && tags->u.mem.start == DDR1_BANK_BASE) { ebi1_phys_offset = DDR1_BANK_BASE; phys_add = DDR1_BANK_BASE; break; } } } MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END MACHINE_START(MSM8X55_SURF, "QCT MSM8X55 SURF") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END MACHINE_START(MSM8X55_FFA, "QCT MSM8X55 FFA") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END MACHINE_START(MSM8X55_SVLTE_SURF, "QCT MSM8X55 SVLTE SURF") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END MACHINE_START(MSM8X55_SVLTE_FFA, "QCT MSM8X55 SVLTE FFA") .atag_offset = 0x100, .map_io = msm7x30_map_io, .reserve = msm7x30_reserve, .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, .init_early = msm7x30_init_early, .handle_irq = vic_handle_irq, .fixup = msm7x30_fixup, MACHINE_END
gpl-2.0
yu-aosp-staging/android_kernel_yu_msm8916
arch/powerpc/kernel/udbg.c
1940
4297
/* * polling mode stateless debugging stuff, originally for NS16550 Serial Ports * * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/console.h> #include <linux/init.h> #include <asm/processor.h> #include <asm/udbg.h> void (*udbg_putc)(char c); void (*udbg_flush)(void); int (*udbg_getc)(void); int (*udbg_getc_poll)(void); /* * Early debugging facilities. You can enable _one_ of these via .config, * if you do so your kernel _will not boot_ on anything else. Be careful. */ void __init udbg_early_init(void) { #if defined(CONFIG_PPC_EARLY_DEBUG_LPAR) /* For LPAR machines that have an HVC console on vterm 0 */ udbg_init_debug_lpar(); #elif defined(CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI) /* For LPAR machines that have an HVSI console on vterm 0 */ udbg_init_debug_lpar_hvsi(); #elif defined(CONFIG_PPC_EARLY_DEBUG_G5) /* For use on Apple G5 machines */ udbg_init_pmac_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL) /* RTAS panel debug */ udbg_init_rtas_panel(); #elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE) /* RTAS console debug */ udbg_init_rtas_console(); #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) /* Maple real mode debug */ udbg_init_maple_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT) udbg_init_debug_beat(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) udbg_init_pas_realmode(); #elif defined(CONFIG_BOOTX_TEXT) udbg_init_btext(); #elif defined(CONFIG_PPC_EARLY_DEBUG_44x) /* PPC44x debug */ udbg_init_44x_as1(); #elif defined(CONFIG_PPC_EARLY_DEBUG_40x) /* PPC40x debug */ udbg_init_40x_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_CPM) udbg_init_cpm(); #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) udbg_init_usbgecko(); #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) udbg_init_wsp(); #elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) /* In memory console */ udbg_init_memcons(); #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) udbg_init_ehv_bc(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) udbg_init_ps3gelic(); #elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_RAW) udbg_init_debug_opal_raw(); #elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI) udbg_init_debug_opal_hvsi(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG console_loglevel = 10; register_early_udbg_console(); #endif } /* udbg library, used by xmon et al */ void udbg_puts(const char *s) { if (udbg_putc) { char c; if (s && *s != '\0') { while ((c = *s++) != '\0') udbg_putc(c); } if (udbg_flush) udbg_flush(); } #if 0 else { printk("%s", s); } #endif } int udbg_write(const char *s, int n) { int remain = n; char c; if (!udbg_putc) return 0; if (s && *s != '\0') { while (((c = *s++) != '\0') && (remain-- > 0)) { udbg_putc(c); } } if (udbg_flush) udbg_flush(); return n - remain; } #define UDBG_BUFSIZE 256 void udbg_printf(const char *fmt, ...) { char buf[UDBG_BUFSIZE]; va_list args; va_start(args, fmt); vsnprintf(buf, UDBG_BUFSIZE, fmt, args); udbg_puts(buf); va_end(args); } void __init udbg_progress(char *s, unsigned short hex) { udbg_puts(s); udbg_puts("\n"); } /* * Early boot console based on udbg */ static void udbg_console_write(struct console *con, const char *s, unsigned int n) { udbg_write(s, n); } static struct console udbg_console = { .name = "udbg", .write = udbg_console_write, .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME, .index = 0, }; /* * Called by setup_system after ppc_md->probe and ppc_md->early_init. * Call it again after setting udbg_putc in ppc_md->setup_arch. */ void __init register_early_udbg_console(void) { if (early_console) return; if (!udbg_putc) return; if (strstr(boot_command_line, "udbg-immortal")) { printk(KERN_INFO "early console immortal !\n"); udbg_console.flags &= ~CON_BOOT; } early_console = &udbg_console; register_console(&udbg_console); } #if 0 /* if you want to use this as a regular output console */ console_initcall(register_udbg_console); #endif
gpl-2.0
Project-Elite/elite_kernel_jf_tw
arch/parisc/kernel/process.c
2964
10763
/* * PARISC Architecture-dependent parts of process handling * based on the work for i386 * * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org> * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com> * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <stdarg.h> #include <linux/elf.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/personality.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/asm-offsets.h> #include <asm/pdc.h> #include <asm/pdc_chassis.h> #include <asm/pgalloc.h> #include <asm/unwind.h> #include <asm/sections.h> /* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) barrier(); schedule_preempt_disabled(); check_pgt_cache(); } } #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) #define CMD_RESET 5 /* reset any module */ /* ** The Wright Brothers and Gecko systems have a H/W problem ** (Lasi...'nuf said) may cause a broadcast reset to lockup ** the system. An HVERSION dependent PDC call was developed ** to perform a "safe", platform specific broadcast reset instead ** of kludging up all the code. ** ** Older machines which do not implement PDC_BROADCAST_RESET will ** return (with an error) and the regular broadcast reset can be ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET ** the PDC call will not return (the system will be reset). */ void machine_restart(char *cmd) { #ifdef FASTBOOT_SELFTEST_SUPPORT /* ** If user has modified the Firmware Selftest Bitmap, ** run the tests specified in the bitmap after the ** system is rebooted w/PDC_DO_RESET. ** ** ftc_bitmap = 0x1AUL "Skip destructive memory tests" ** ** Using "directed resets" at each processor with the MEM_TOC ** vector cleared will also avoid running destructive ** memory self tests. (Not implemented yet) */ if (ftc_bitmap) { pdc_do_firm_test_reset(ftc_bitmap); } #endif /* set up a new led state on systems shipped with a LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); /* "Normal" system reset */ pdc_do_reset(); /* Nope...box should reset with just CMD_RESET now */ gsc_writel(CMD_RESET, COMMAND_GLOBAL); /* Wait for RESET to lay us to rest. */ while (1) ; } void machine_halt(void) { /* ** The LED/ChassisCodes are updated by the led_halt() ** function, called by the reboot notifier chain. */ } void (*chassis_power_off)(void); /* * This routine is called from sys_reboot to actually turn off the * machine */ void machine_power_off(void) { /* If there is a registered power off handler, call it. */ if (chassis_power_off) chassis_power_off(); /* Put the soft power button back under hardware control. * If the user had already pressed the power button, the * following call will immediately power off. */ pdc_soft_power_button(0); pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); /* It seems we have no way to power the system off via * software. The user has to press the button himself. */ printk(KERN_EMERG "System shut down completed.\n" "Please power this system off now."); } void (*pm_power_off)(void) = machine_power_off; EXPORT_SYMBOL(pm_power_off); /* * Create a kernel thread */ extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { /* * FIXME: Once we are sure we don't need any debug here, * kernel_thread can become a #define. */ return __kernel_thread(fn, arg, flags); } EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. */ void exit_thread(void) { } void flush_thread(void) { /* Only needs to handle fpu stuff or perf monitors. ** REVISIT: several arches implement a "lazy fpu state". */ } void release_thread(struct task_struct *dead_task) { } /* * Fill in the FPU structure for a core dump. */ int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r) { if (regs == NULL) return 0; memcpy(r, regs->fr, sizeof *r); return 1; } int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) { memcpy(r, tsk->thread.regs.fr, sizeof(*r)); return 1; } /* Note that "fork()" is implemented in terms of clone, with parameters (SIGCHLD, regs->gr[30], regs). */ int sys_clone(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs) { /* Arugments from userspace are: r26 = Clone flags. r25 = Child stack. r24 = parent_tidptr. r23 = Is the TLS storage descriptor r22 = child_tidptr However, these last 3 args are only examined if the proper flags are set. */ int __user *parent_tidptr = (int __user *)regs->gr[24]; int __user *child_tidptr = (int __user *)regs->gr[22]; /* usp must be word aligned. This also prevents users from * passing in the value 1 (which is the signal for a special * return for a kernel thread) */ usp = ALIGN(usp, 4); /* A zero value for usp means use the current stack */ if (usp == 0) usp = regs->gr[30]; return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr); } int sys_vfork(struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL); } int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, /* in ia64 this is "user_stack_size" */ struct task_struct * p, struct pt_regs * pregs) { struct pt_regs * cregs = &(p->thread.regs); void *stack = task_stack_page(p); /* We have to use void * instead of a function pointer, because * function pointers aren't a pointer to the function on 64-bit. * Make them const so the compiler knows they live in .text */ extern void * const ret_from_kernel_thread; extern void * const child_return; #ifdef CONFIG_HPUX extern void * const hpux_child_return; #endif *cregs = *pregs; /* Set the return value for the child. Note that this is not actually restored by the syscall exit path, but we put it here for consistency in case of signals. */ cregs->gr[28] = 0; /* child */ /* * We need to differentiate between a user fork and a * kernel fork. We can't use user_mode, because the * the syscall path doesn't save iaoq. Right now * We rely on the fact that kernel_thread passes * in zero for usp. */ if (usp == 1) { /* kernel thread */ cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN; /* Must exit via ret_from_kernel_thread in order * to call schedule_tail() */ cregs->kpc = (unsigned long) &ret_from_kernel_thread; /* * Copy function and argument to be called from * ret_from_kernel_thread. */ #ifdef CONFIG_64BIT cregs->gr[27] = pregs->gr[27]; #endif cregs->gr[26] = pregs->gr[26]; cregs->gr[25] = pregs->gr[25]; } else { /* user thread */ /* * Note that the fork wrappers are responsible * for setting gr[21]. */ /* Use same stack depth as parent */ cregs->ksp = (unsigned long)stack + (pregs->gr[21] & (THREAD_SIZE - 1)); cregs->gr[30] = usp; if (p->personality == PER_HPUX) { #ifdef CONFIG_HPUX cregs->kpc = (unsigned long) &hpux_child_return; #else BUG(); #endif } else { cregs->kpc = (unsigned long) &child_return; } /* Setup thread TLS area from the 4th parameter in clone */ if (clone_flags & CLONE_SETTLS) cregs->cr27 = pregs->gr[23]; } return 0; } unsigned long thread_saved_pc(struct task_struct *t) { return t->thread.regs.kpc; } /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(struct pt_regs *regs) { int error; char *filename; filename = getname((const char __user *) regs->gr[26]); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (const char __user *const __user *) regs->gr[25], (const char __user *const __user *) regs->gr[24], regs); putname(filename); out: return error; } extern int __execve(const char *filename, const char *const argv[], const char *const envp[], struct task_struct *task); int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { return __execve(filename, argv, envp, current); } unsigned long get_wchan(struct task_struct *p) { struct unwind_frame_info info; unsigned long ip; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; /* * These bracket the sleeping functions.. */ unwind_frame_init_from_blocked_task(&info, p); do { if (unwind_once(&info) < 0) return 0; ip = info.ip; if (!in_sched_functions(ip)) return ip; } while (count++ < 16); return 0; } #ifdef CONFIG_64BIT void *dereference_function_descriptor(void *ptr) { Elf64_Fdesc *desc = ptr; void *p; if (!probe_kernel_address(&desc->addr, p)) ptr = p; return ptr; } #endif
gpl-2.0
mmontuori/tegra-olympus
drivers/pci/hotplug/pciehp_core.c
3220
10045
/* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include "pciehp.h" #include <linux/interrupt.h> #include <linux/time.h> /* Global variables */ int pciehp_debug; int pciehp_poll_mode; int pciehp_poll_time; int pciehp_force; struct workqueue_struct *pciehp_wq; struct workqueue_struct *pciehp_ordered_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" #define DRIVER_DESC "PCI Express Hot Plug Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(pciehp_debug, bool, 0644); module_param(pciehp_poll_mode, bool, 0644); module_param(pciehp_poll_time, int, 0644); module_param(pciehp_force, bool, 0644); MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing"); #define PCIE_MODULE_NAME "pciehp" static int set_attention_status (struct hotplug_slot *slot, u8 value); static int enable_slot (struct hotplug_slot *slot); static int disable_slot (struct hotplug_slot *slot); static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); /** * release_slot - free up the memory used by a slot * @hotplug_slot: slot to free */ static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); kfree(hotplug_slot->ops); kfree(hotplug_slot->info); kfree(hotplug_slot); } static int init_slot(struct controller *ctrl) { struct slot *slot = ctrl->slot; struct hotplug_slot *hotplug = NULL; struct hotplug_slot_info *info = NULL; struct hotplug_slot_ops *ops = NULL; char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL); if (!hotplug) goto out; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto out; /* Setup hotplug slot ops */ ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) goto out; ops->enable_slot = enable_slot; ops->disable_slot = disable_slot; ops->get_power_status = get_power_status; ops->get_adapter_status = get_adapter_status; if (MRL_SENS(ctrl)) ops->get_latch_status = get_latch_status; if (ATTN_LED(ctrl)) { ops->get_attention_status = get_attention_status; ops->set_attention_status = set_attention_status; } /* register this slot with the hotplug pci core */ hotplug->info = info; hotplug->private = slot; hotplug->release = &release_slot; hotplug->ops = ops; slot->hotplug_slot = hotplug; snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n", pci_domain_nr(ctrl->pcie->port->subordinate), ctrl->pcie->port->subordinate->number, PSN(ctrl)); retval = pci_hp_register(hotplug, ctrl->pcie->port->subordinate, 0, name); if (retval) ctrl_err(ctrl, "pci_hp_register failed with error %d\n", retval); out: if (retval) { kfree(ops); kfree(info); kfree(hotplug); } return retval; } static void cleanup_slot(struct controller *ctrl) { pci_hp_deregister(ctrl->slot->hotplug_slot); } /* * set_attention_status - Turns the Amber LED for a slot on, off or blink */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_set_attention_status(slot, status); } static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_sysfs_enable_slot(slot); } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_sysfs_disable_slot(slot); } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_get_power_status(slot, value); } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_get_attention_status(slot, value); } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_get_latch_status(slot, value); } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return pciehp_get_adapter_status(slot, value); } static int pciehp_probe(struct pcie_device *dev) { int rc; struct controller *ctrl; struct slot *slot; u8 occupied, poweron; if (pciehp_force) dev_info(&dev->device, "Bypassing BIOS check for pciehp use on %s\n", pci_name(dev->port)); else if (pciehp_acpi_slot_detection_check(dev->port)) goto err_out_none; ctrl = pcie_init(dev); if (!ctrl) { dev_err(&dev->device, "Controller initialization failed\n"); goto err_out_none; } set_service_data(dev, ctrl); /* Setup the slot information structures */ rc = init_slot(ctrl); if (rc) { if (rc == -EBUSY) ctrl_warn(ctrl, "Slot already registered by another " "hotplug driver\n"); else ctrl_err(ctrl, "Slot initialization failed\n"); goto err_out_release_ctlr; } /* Enable events after we have setup the data structures */ rc = pcie_init_notification(ctrl); if (rc) { ctrl_err(ctrl, "Notification initialization failed\n"); goto err_out_free_ctrl_slot; } /* Check if slot is occupied */ slot = ctrl->slot; pciehp_get_adapter_status(slot, &occupied); pciehp_get_power_status(slot, &poweron); if (occupied && pciehp_force) pciehp_enable_slot(slot); /* If empty slot's power status is on, turn power off */ if (!occupied && poweron && POWER_CTRL(ctrl)) pciehp_power_off_slot(slot); return 0; err_out_free_ctrl_slot: cleanup_slot(ctrl); err_out_release_ctlr: pciehp_release_ctrl(ctrl); err_out_none: return -ENODEV; } static void pciehp_remove(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); cleanup_slot(ctrl); pciehp_release_ctrl(ctrl); } #ifdef CONFIG_PM static int pciehp_suspend (struct pcie_device *dev) { dev_info(&dev->device, "%s ENTRY\n", __func__); return 0; } static int pciehp_resume (struct pcie_device *dev) { dev_info(&dev->device, "%s ENTRY\n", __func__); if (pciehp_force) { struct controller *ctrl = get_service_data(dev); struct slot *slot; u8 status; /* reinitialize the chipset's event detection logic */ pcie_enable_notification(ctrl); slot = ctrl->slot; /* Check if slot is occupied */ pciehp_get_adapter_status(slot, &status); if (status) pciehp_enable_slot(slot); else pciehp_disable_slot(slot); } return 0; } #endif /* PM */ static struct pcie_port_service_driver hpdriver_portdrv = { .name = PCIE_MODULE_NAME, .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_HP, .probe = pciehp_probe, .remove = pciehp_remove, #ifdef CONFIG_PM .suspend = pciehp_suspend, .resume = pciehp_resume, #endif /* PM */ }; static int __init pcied_init(void) { int retval = 0; pciehp_wq = alloc_workqueue("pciehp", 0, 0); if (!pciehp_wq) return -ENOMEM; pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0); if (!pciehp_ordered_wq) { destroy_workqueue(pciehp_wq); return -ENOMEM; } pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); dbg("pcie_port_service_register = %d\n", retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) { destroy_workqueue(pciehp_ordered_wq); destroy_workqueue(pciehp_wq); dbg("Failure to register service\n"); } return retval; } static void __exit pcied_cleanup(void) { dbg("unload_pciehpd()\n"); destroy_workqueue(pciehp_ordered_wq); destroy_workqueue(pciehp_wq); pcie_port_service_unregister(&hpdriver_portdrv); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } module_init(pcied_init); module_exit(pcied_cleanup);
gpl-2.0
sakindia123/android_kernel_samsung_j700F
sound/soc/fsl/fsl_utils.c
4244
2633
/** * Freescale ALSA SoC Machine driver utility * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2010 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/of_address.h> #include <sound/soc.h> #include "fsl_utils.h" /** * fsl_asoc_get_dma_channel - determine the dma channel for a SSI node * * @ssi_np: pointer to the SSI device tree node * @name: name of the phandle pointing to the dma channel * @dai: ASoC DAI link pointer to be filled with platform_name * @dma_channel_id: dma channel id to be returned * @dma_id: dma id to be returned * * This function determines the dma and channel id for given SSI node. It * also discovers the platform_name for the ASoC DAI link. */ int fsl_asoc_get_dma_channel(struct device_node *ssi_np, const char *name, struct snd_soc_dai_link *dai, unsigned int *dma_channel_id, unsigned int *dma_id) { struct resource res; struct device_node *dma_channel_np, *dma_np; const u32 *iprop; int ret; dma_channel_np = of_parse_phandle(ssi_np, name, 0); if (!dma_channel_np) return -EINVAL; if (!of_device_is_compatible(dma_channel_np, "fsl,ssi-dma-channel")) { of_node_put(dma_channel_np); return -EINVAL; } /* Determine the dev_name for the device_node. This code mimics the * behavior of of_device_make_bus_id(). We need this because ASoC uses * the dev_name() of the device to match the platform (DMA) device with * the CPU (SSI) device. It's all ugly and hackish, but it works (for * now). * * dai->platform name should already point to an allocated buffer. */ ret = of_address_to_resource(dma_channel_np, 0, &res); if (ret) { of_node_put(dma_channel_np); return ret; } snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s", (unsigned long long) res.start, dma_channel_np->name); iprop = of_get_property(dma_channel_np, "cell-index", NULL); if (!iprop) { of_node_put(dma_channel_np); return -EINVAL; } *dma_channel_id = be32_to_cpup(iprop); dma_np = of_get_parent(dma_channel_np); iprop = of_get_property(dma_np, "cell-index", NULL); if (!iprop) { of_node_put(dma_np); return -EINVAL; } *dma_id = be32_to_cpup(iprop); of_node_put(dma_np); of_node_put(dma_channel_np); return 0; } EXPORT_SYMBOL(fsl_asoc_get_dma_channel); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale ASoC utility code"); MODULE_LICENSE("GPL v2");
gpl-2.0
EnJens/android-tegra-nv-2.6.39
drivers/hwmon/lm80.c
4244
20116
/* * lm80.c - From lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> * and Philip Edelbrock <phil@netroedge.com> * * Ported to Linux 2.6 by Tiago Sousa <mirage@kaotik.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Many LM80 constants specified below */ /* The LM80 registers */ #define LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2) #define LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2) #define LM80_REG_IN(nr) (0x20 + (nr)) #define LM80_REG_FAN1 0x28 #define LM80_REG_FAN2 0x29 #define LM80_REG_FAN_MIN(nr) (0x3b + (nr)) #define LM80_REG_TEMP 0x27 #define LM80_REG_TEMP_HOT_MAX 0x38 #define LM80_REG_TEMP_HOT_HYST 0x39 #define LM80_REG_TEMP_OS_MAX 0x3a #define LM80_REG_TEMP_OS_HYST 0x3b #define LM80_REG_CONFIG 0x00 #define LM80_REG_ALARM1 0x01 #define LM80_REG_ALARM2 0x02 #define LM80_REG_MASK1 0x03 #define LM80_REG_MASK2 0x04 #define LM80_REG_FANDIV 0x05 #define LM80_REG_RES 0x06 /* Conversions. Rounding and limit checking is only done on the TO_REG variants. Note that you should be a bit careful with which arguments these macros are called: arguments may be evaluated more than once. Fixing this is just not worth it. */ #define IN_TO_REG(val) (SENSORS_LIMIT(((val)+5)/10,0,255)) #define IN_FROM_REG(val) ((val)*10) static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm*div / 2) / (rpm*div), 1, 254); } #define FAN_FROM_REG(val,div) ((val)==0?-1:\ (val)==255?0:1350000/((div)*(val))) static inline long TEMP_FROM_REG(u16 temp) { long res; temp >>= 4; if (temp < 0x0800) res = 625 * (long) temp; else res = ((long) temp - 0x01000) * 625; return res / 10; } #define TEMP_LIMIT_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000) #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val)<0?\ ((val)-500)/1000:((val)+500)/1000,0,255) #define DIV_FROM_REG(val) (1 << (val)) /* * Client data (each client gets its own) */ struct lm80_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[7]; /* Register value */ u8 in_max[7]; /* Register value */ u8 in_min[7]; /* Register value */ u8 fan[2]; /* Register value */ u8 fan_min[2]; /* Register value */ u8 fan_div[2]; /* Register encoding, shifted right */ u16 temp; /* Register values, shifted right */ u8 temp_hot_max; /* Register value */ u8 temp_hot_hyst; /* Register value */ u8 temp_os_max; /* Register value */ u8 temp_os_hyst; /* Register value */ u16 alarms; /* Register encoding, combined */ }; /* * Functions declaration */ static int lm80_probe(struct i2c_client *client, const struct i2c_device_id *id); static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm80_init_client(struct i2c_client *client); static int lm80_remove(struct i2c_client *client); static struct lm80_data *lm80_update_device(struct device *dev); static int lm80_read_value(struct i2c_client *client, u8 reg); static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); /* * Driver data (common to all clients) */ static const struct i2c_device_id lm80_id[] = { { "lm80", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm80_id); static struct i2c_driver lm80_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm80", }, .probe = lm80_probe, .remove = lm80_remove, .id_table = lm80_id, .detect = lm80_detect, .address_list = normal_i2c, }; /* * Sysfs stuff */ #define show_in(suffix, value) \ static ssize_t show_in_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct lm80_data *data = lm80_update_device(dev); \ return sprintf(buf, "%d\n", IN_FROM_REG(data->value[nr])); \ } show_in(min, in_min) show_in(max, in_max) show_in(input, in) #define set_in(suffix, value, reg) \ static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \ size_t count) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct i2c_client *client = to_i2c_client(dev); \ struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ mutex_lock(&data->update_lock);\ data->value[nr] = IN_TO_REG(val); \ lm80_write_value(client, reg(nr), data->value[nr]); \ mutex_unlock(&data->update_lock);\ return count; \ } set_in(min, in_min, LM80_REG_IN_MIN) set_in(max, in_max, LM80_REG_IN_MAX) #define show_fan(suffix, value) \ static ssize_t show_fan_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct lm80_data *data = lm80_update_device(dev); \ return sprintf(buf, "%d\n", FAN_FROM_REG(data->value[nr], \ DIV_FROM_REG(data->fan_div[nr]))); \ } show_fan(min, fan_min) show_fan(input, fan) static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct lm80_data *data = i2c_get_clientdata(client); long val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } /* Note: we save and restore the fan minimum here, because its value is determined in part by the fan divisor. This follows the principle of least surprise; the user doesn't expect the fan minimum to change just because the divisor changed. */ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct lm80_data *data = i2c_get_clientdata(client); unsigned long min, val = simple_strtoul(buf, NULL, 10); u8 reg; /* Save fan_min */ mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: dev_err(&client->dev, "fan_div value %ld not " "supported. Choose one of 1, 2, 4 or 8!\n", val); mutex_unlock(&data->update_lock); return -EINVAL; } reg = (lm80_read_value(client, LM80_REG_FANDIV) & ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1))); lm80_write_value(client, LM80_REG_FANDIV, reg); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_input1(struct device *dev, struct device_attribute *attr, char *buf) { struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp)); } #define show_temp(suffix, value) \ static ssize_t show_temp_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct lm80_data *data = lm80_update_device(dev); \ return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \ } show_temp(hot_max, temp_hot_max); show_temp(hot_hyst, temp_hot_hyst); show_temp(os_max, temp_os_max); show_temp(os_hyst, temp_os_hyst); #define set_temp(suffix, value, reg) \ static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \ size_t count) \ { \ struct i2c_client *client = to_i2c_client(dev); \ struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtoul(buf, NULL, 10); \ \ mutex_lock(&data->update_lock); \ data->value = TEMP_LIMIT_TO_REG(val); \ lm80_write_value(client, reg, data->value); \ mutex_unlock(&data->update_lock); \ return count; \ } set_temp(hot_max, temp_hot_max, LM80_REG_TEMP_HOT_MAX); set_temp(hot_hyst, temp_hot_hyst, LM80_REG_TEMP_HOT_HYST); set_temp(os_max, temp_os_max, LM80_REG_TEMP_OS_MAX); set_temp(os_hyst, temp_os_hyst, LM80_REG_TEMP_OS_HYST); static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 0); static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 1); static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 2); static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 3); static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 4); static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 5); static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 6); static SENSOR_DEVICE_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 0); static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 1); static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 2); static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 3); static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 4); static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 5); static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 6); static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_input, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_input, NULL, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_input, NULL, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_input, NULL, 4); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in_input, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in_input, NULL, 6); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, set_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, set_fan_div, 1); static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL); static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max, set_temp_hot_max); static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst, set_temp_hot_hyst); static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max, set_temp_os_max); static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst, set_temp_os_hyst); static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13); /* * Real code */ static struct attribute *lm80_attributes[] = { &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &dev_attr_temp1_input.attr, &dev_attr_temp1_max.attr, &dev_attr_temp1_max_hyst.attr, &dev_attr_temp1_crit.attr, &dev_attr_temp1_crit_hyst.attr, &dev_attr_alarms.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, NULL }; static const struct attribute_group lm80_group = { .attrs = lm80_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int i, cur; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Now, we do the remaining detection. It is lousy. */ if (lm80_read_value(client, LM80_REG_ALARM2) & 0xc0) return -ENODEV; for (i = 0x2a; i <= 0x3d; i++) { cur = i2c_smbus_read_byte_data(client, i); if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur) || (i2c_smbus_read_byte_data(client, i + 0x80) != cur) || (i2c_smbus_read_byte_data(client, i + 0xc0) != cur)) return -ENODEV; } strlcpy(info->type, "lm80", I2C_NAME_SIZE); return 0; } static int lm80_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm80_data *data; int err; data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Initialize the LM80 chip */ lm80_init_client(client); /* A few vars need to be filled upon startup */ data->fan_min[0] = lm80_read_value(client, LM80_REG_FAN_MIN(1)); data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2)); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group))) goto error_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error_remove; } return 0; error_remove: sysfs_remove_group(&client->dev.kobj, &lm80_group); error_free: kfree(data); exit: return err; } static int lm80_remove(struct i2c_client *client) { struct lm80_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm80_group); kfree(data); return 0; } static int lm80_read_value(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /* Called when we have found a new LM80. */ static void lm80_init_client(struct i2c_client *client) { /* Reset all except Watchdog values and last conversion values This sets fan-divs to 2, among others. This makes most other initializations unnecessary */ lm80_write_value(client, LM80_REG_CONFIG, 0x80); /* Set 11-bit temperature resolution */ lm80_write_value(client, LM80_REG_RES, 0x08); /* Start monitoring */ lm80_write_value(client, LM80_REG_CONFIG, 0x01); } static struct lm80_data *lm80_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm80_data *data = i2c_get_clientdata(client); int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { dev_dbg(&client->dev, "Starting lm80 update\n"); for (i = 0; i <= 6; i++) { data->in[i] = lm80_read_value(client, LM80_REG_IN(i)); data->in_min[i] = lm80_read_value(client, LM80_REG_IN_MIN(i)); data->in_max[i] = lm80_read_value(client, LM80_REG_IN_MAX(i)); } data->fan[0] = lm80_read_value(client, LM80_REG_FAN1); data->fan_min[0] = lm80_read_value(client, LM80_REG_FAN_MIN(1)); data->fan[1] = lm80_read_value(client, LM80_REG_FAN2); data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2)); data->temp = (lm80_read_value(client, LM80_REG_TEMP) << 8) | (lm80_read_value(client, LM80_REG_RES) & 0xf0); data->temp_os_max = lm80_read_value(client, LM80_REG_TEMP_OS_MAX); data->temp_os_hyst = lm80_read_value(client, LM80_REG_TEMP_OS_HYST); data->temp_hot_max = lm80_read_value(client, LM80_REG_TEMP_HOT_MAX); data->temp_hot_hyst = lm80_read_value(client, LM80_REG_TEMP_HOT_HYST); i = lm80_read_value(client, LM80_REG_FANDIV); data->fan_div[0] = (i >> 2) & 0x03; data->fan_div[1] = (i >> 4) & 0x03; data->alarms = lm80_read_value(client, LM80_REG_ALARM1) + (lm80_read_value(client, LM80_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init sensors_lm80_init(void) { return i2c_add_driver(&lm80_driver); } static void __exit sensors_lm80_exit(void) { i2c_del_driver(&lm80_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " "Philip Edelbrock <phil@netroedge.com>"); MODULE_DESCRIPTION("LM80 driver"); MODULE_LICENSE("GPL"); module_init(sensors_lm80_init); module_exit(sensors_lm80_exit);
gpl-2.0
SamueleCiprietti/nova_kernel
drivers/hwmon/lm80.c
4244
20116
/* * lm80.c - From lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> * and Philip Edelbrock <phil@netroedge.com> * * Ported to Linux 2.6 by Tiago Sousa <mirage@kaotik.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Many LM80 constants specified below */ /* The LM80 registers */ #define LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2) #define LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2) #define LM80_REG_IN(nr) (0x20 + (nr)) #define LM80_REG_FAN1 0x28 #define LM80_REG_FAN2 0x29 #define LM80_REG_FAN_MIN(nr) (0x3b + (nr)) #define LM80_REG_TEMP 0x27 #define LM80_REG_TEMP_HOT_MAX 0x38 #define LM80_REG_TEMP_HOT_HYST 0x39 #define LM80_REG_TEMP_OS_MAX 0x3a #define LM80_REG_TEMP_OS_HYST 0x3b #define LM80_REG_CONFIG 0x00 #define LM80_REG_ALARM1 0x01 #define LM80_REG_ALARM2 0x02 #define LM80_REG_MASK1 0x03 #define LM80_REG_MASK2 0x04 #define LM80_REG_FANDIV 0x05 #define LM80_REG_RES 0x06 /* Conversions. Rounding and limit checking is only done on the TO_REG variants. Note that you should be a bit careful with which arguments these macros are called: arguments may be evaluated more than once. Fixing this is just not worth it. */ #define IN_TO_REG(val) (SENSORS_LIMIT(((val)+5)/10,0,255)) #define IN_FROM_REG(val) ((val)*10) static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm*div / 2) / (rpm*div), 1, 254); } #define FAN_FROM_REG(val,div) ((val)==0?-1:\ (val)==255?0:1350000/((div)*(val))) static inline long TEMP_FROM_REG(u16 temp) { long res; temp >>= 4; if (temp < 0x0800) res = 625 * (long) temp; else res = ((long) temp - 0x01000) * 625; return res / 10; } #define TEMP_LIMIT_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000) #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val)<0?\ ((val)-500)/1000:((val)+500)/1000,0,255) #define DIV_FROM_REG(val) (1 << (val)) /* * Client data (each client gets its own) */ struct lm80_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[7]; /* Register value */ u8 in_max[7]; /* Register value */ u8 in_min[7]; /* Register value */ u8 fan[2]; /* Register value */ u8 fan_min[2]; /* Register value */ u8 fan_div[2]; /* Register encoding, shifted right */ u16 temp; /* Register values, shifted right */ u8 temp_hot_max; /* Register value */ u8 temp_hot_hyst; /* Register value */ u8 temp_os_max; /* Register value */ u8 temp_os_hyst; /* Register value */ u16 alarms; /* Register encoding, combined */ }; /* * Functions declaration */ static int lm80_probe(struct i2c_client *client, const struct i2c_device_id *id); static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm80_init_client(struct i2c_client *client); static int lm80_remove(struct i2c_client *client); static struct lm80_data *lm80_update_device(struct device *dev); static int lm80_read_value(struct i2c_client *client, u8 reg); static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); /* * Driver data (common to all clients) */ static const struct i2c_device_id lm80_id[] = { { "lm80", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm80_id); static struct i2c_driver lm80_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm80", }, .probe = lm80_probe, .remove = lm80_remove, .id_table = lm80_id, .detect = lm80_detect, .address_list = normal_i2c, }; /* * Sysfs stuff */ #define show_in(suffix, value) \ static ssize_t show_in_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct lm80_data *data = lm80_update_device(dev); \ return sprintf(buf, "%d\n", IN_FROM_REG(data->value[nr])); \ } show_in(min, in_min) show_in(max, in_max) show_in(input, in) #define set_in(suffix, value, reg) \ static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \ size_t count) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct i2c_client *client = to_i2c_client(dev); \ struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ mutex_lock(&data->update_lock);\ data->value[nr] = IN_TO_REG(val); \ lm80_write_value(client, reg(nr), data->value[nr]); \ mutex_unlock(&data->update_lock);\ return count; \ } set_in(min, in_min, LM80_REG_IN_MIN) set_in(max, in_max, LM80_REG_IN_MAX) #define show_fan(suffix, value) \ static ssize_t show_fan_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \ { \ int nr = to_sensor_dev_attr(attr)->index; \ struct lm80_data *data = lm80_update_device(dev); \ return sprintf(buf, "%d\n", FAN_FROM_REG(data->value[nr], \ DIV_FROM_REG(data->fan_div[nr]))); \ } show_fan(min, fan_min) show_fan(input, fan) static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct lm80_data *data = i2c_get_clientdata(client); long val = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } /* Note: we save and restore the fan minimum here, because its value is determined in part by the fan divisor. This follows the principle of least surprise; the user doesn't expect the fan minimum to change just because the divisor changed. */ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct lm80_data *data = i2c_get_clientdata(client); unsigned long min, val = simple_strtoul(buf, NULL, 10); u8 reg; /* Save fan_min */ mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: dev_err(&client->dev, "fan_div value %ld not " "supported. Choose one of 1, 2, 4 or 8!\n", val); mutex_unlock(&data->update_lock); return -EINVAL; } reg = (lm80_read_value(client, LM80_REG_FANDIV) & ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1))); lm80_write_value(client, LM80_REG_FANDIV, reg); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_input1(struct device *dev, struct device_attribute *attr, char *buf) { struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp)); } #define show_temp(suffix, value) \ static ssize_t show_temp_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct lm80_data *data = lm80_update_device(dev); \ return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \ } show_temp(hot_max, temp_hot_max); show_temp(hot_hyst, temp_hot_hyst); show_temp(os_max, temp_os_max); show_temp(os_hyst, temp_os_hyst); #define set_temp(suffix, value, reg) \ static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \ size_t count) \ { \ struct i2c_client *client = to_i2c_client(dev); \ struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtoul(buf, NULL, 10); \ \ mutex_lock(&data->update_lock); \ data->value = TEMP_LIMIT_TO_REG(val); \ lm80_write_value(client, reg, data->value); \ mutex_unlock(&data->update_lock); \ return count; \ } set_temp(hot_max, temp_hot_max, LM80_REG_TEMP_HOT_MAX); set_temp(hot_hyst, temp_hot_hyst, LM80_REG_TEMP_HOT_HYST); set_temp(os_max, temp_os_max, LM80_REG_TEMP_OS_MAX); set_temp(os_hyst, temp_os_hyst, LM80_REG_TEMP_OS_HYST); static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct lm80_data *data = lm80_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 0); static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 1); static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 2); static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 3); static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 4); static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 5); static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 6); static SENSOR_DEVICE_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 0); static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 1); static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 2); static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 3); static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 4); static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 5); static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 6); static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_input, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_input, NULL, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_input, NULL, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_input, NULL, 4); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in_input, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in_input, NULL, 6); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, set_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, set_fan_div, 1); static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL); static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max, set_temp_hot_max); static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst, set_temp_hot_hyst); static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max, set_temp_os_max); static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst, set_temp_os_hyst); static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13); /* * Real code */ static struct attribute *lm80_attributes[] = { &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &dev_attr_temp1_input.attr, &dev_attr_temp1_max.attr, &dev_attr_temp1_max_hyst.attr, &dev_attr_temp1_crit.attr, &dev_attr_temp1_crit_hyst.attr, &dev_attr_alarms.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, NULL }; static const struct attribute_group lm80_group = { .attrs = lm80_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int i, cur; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Now, we do the remaining detection. It is lousy. */ if (lm80_read_value(client, LM80_REG_ALARM2) & 0xc0) return -ENODEV; for (i = 0x2a; i <= 0x3d; i++) { cur = i2c_smbus_read_byte_data(client, i); if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur) || (i2c_smbus_read_byte_data(client, i + 0x80) != cur) || (i2c_smbus_read_byte_data(client, i + 0xc0) != cur)) return -ENODEV; } strlcpy(info->type, "lm80", I2C_NAME_SIZE); return 0; } static int lm80_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm80_data *data; int err; data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Initialize the LM80 chip */ lm80_init_client(client); /* A few vars need to be filled upon startup */ data->fan_min[0] = lm80_read_value(client, LM80_REG_FAN_MIN(1)); data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2)); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group))) goto error_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error_remove; } return 0; error_remove: sysfs_remove_group(&client->dev.kobj, &lm80_group); error_free: kfree(data); exit: return err; } static int lm80_remove(struct i2c_client *client) { struct lm80_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm80_group); kfree(data); return 0; } static int lm80_read_value(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /* Called when we have found a new LM80. */ static void lm80_init_client(struct i2c_client *client) { /* Reset all except Watchdog values and last conversion values This sets fan-divs to 2, among others. This makes most other initializations unnecessary */ lm80_write_value(client, LM80_REG_CONFIG, 0x80); /* Set 11-bit temperature resolution */ lm80_write_value(client, LM80_REG_RES, 0x08); /* Start monitoring */ lm80_write_value(client, LM80_REG_CONFIG, 0x01); } static struct lm80_data *lm80_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm80_data *data = i2c_get_clientdata(client); int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { dev_dbg(&client->dev, "Starting lm80 update\n"); for (i = 0; i <= 6; i++) { data->in[i] = lm80_read_value(client, LM80_REG_IN(i)); data->in_min[i] = lm80_read_value(client, LM80_REG_IN_MIN(i)); data->in_max[i] = lm80_read_value(client, LM80_REG_IN_MAX(i)); } data->fan[0] = lm80_read_value(client, LM80_REG_FAN1); data->fan_min[0] = lm80_read_value(client, LM80_REG_FAN_MIN(1)); data->fan[1] = lm80_read_value(client, LM80_REG_FAN2); data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2)); data->temp = (lm80_read_value(client, LM80_REG_TEMP) << 8) | (lm80_read_value(client, LM80_REG_RES) & 0xf0); data->temp_os_max = lm80_read_value(client, LM80_REG_TEMP_OS_MAX); data->temp_os_hyst = lm80_read_value(client, LM80_REG_TEMP_OS_HYST); data->temp_hot_max = lm80_read_value(client, LM80_REG_TEMP_HOT_MAX); data->temp_hot_hyst = lm80_read_value(client, LM80_REG_TEMP_HOT_HYST); i = lm80_read_value(client, LM80_REG_FANDIV); data->fan_div[0] = (i >> 2) & 0x03; data->fan_div[1] = (i >> 4) & 0x03; data->alarms = lm80_read_value(client, LM80_REG_ALARM1) + (lm80_read_value(client, LM80_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init sensors_lm80_init(void) { return i2c_add_driver(&lm80_driver); } static void __exit sensors_lm80_exit(void) { i2c_del_driver(&lm80_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " "Philip Edelbrock <phil@netroedge.com>"); MODULE_DESCRIPTION("LM80 driver"); MODULE_LICENSE("GPL"); module_init(sensors_lm80_init); module_exit(sensors_lm80_exit);
gpl-2.0
skyem123/android_kernel_oneplus_one
drivers/edac/amd64_edac_inj.c
8084
5121
#include "amd64_edac.h" static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) { struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.section); } /* * store error injection section value which refers to one of 4 16-byte sections * within a 64-byte cacheline * * range: 0..3 */ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret = 0; ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { if (value > 3) { amd64_warn("%s: invalid section 0x%lx\n", __func__, value); return -EINVAL; } pvt->injection.section = (u32) value; return count; } return ret; } static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) { struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.word); } /* * store error injection word value which refers to one of 9 16-bit word of the * 16-byte (128-bit + ECC bits) section * * range: 0..8 */ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret = 0; ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { if (value > 8) { amd64_warn("%s: invalid word 0x%lx\n", __func__, value); return -EINVAL; } pvt->injection.word = (u32) value; return count; } return ret; } static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) { struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.bit_map); } /* * store 16 bit error injection vector which enables injecting errors to the * corresponding bit within the error injection word above. When used during a * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. */ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret = 0; ret = strict_strtoul(data, 16, &value); if (ret != -EINVAL) { if (value & 0xFFFF0000) { amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value); return -EINVAL; } pvt->injection.bit_map = (u32) value; return count; } return ret; } /* * Do a DRAM ECC read. Assemble staged values in the pvt area, format into * fields needed by the injection registers and read the NB Array Data Port. */ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; u32 section, word_bits; int ret = 0; ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { /* Form value to choose 16-byte section of cacheline */ section = F10_NB_ARRAY_DRAM_ECC | SET_NB_ARRAY_ADDRESS(pvt->injection.section); amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, pvt->injection.bit_map); /* Issue 'word' and 'bit' along with the READ request */ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); return count; } return ret; } /* * Do a DRAM ECC write. Assemble staged values in the pvt area and format into * fields needed by the injection registers. */ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; u32 section, word_bits; int ret = 0; ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { /* Form value to choose 16-byte section of cacheline */ section = F10_NB_ARRAY_DRAM_ECC | SET_NB_ARRAY_ADDRESS(pvt->injection.section); amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, pvt->injection.bit_map); /* Issue 'word' and 'bit' along with the READ request */ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); return count; } return ret; } /* * update NUM_INJ_ATTRS in case you add new members */ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = amd64_inject_section_show, .store = amd64_inject_section_store, }, { .attr = { .name = "inject_word", .mode = (S_IRUGO | S_IWUSR) }, .show = amd64_inject_word_show, .store = amd64_inject_word_store, }, { .attr = { .name = "inject_ecc_vector", .mode = (S_IRUGO | S_IWUSR) }, .show = amd64_inject_ecc_vector_show, .store = amd64_inject_ecc_vector_store, }, { .attr = { .name = "inject_write", .mode = (S_IRUGO | S_IWUSR) }, .show = NULL, .store = amd64_inject_write_store, }, { .attr = { .name = "inject_read", .mode = (S_IRUGO | S_IWUSR) }, .show = NULL, .store = amd64_inject_read_store, }, };
gpl-2.0
defconoi/L-Kernel-Mako
drivers/scsi/scsi_trace.c
10900
6723
/* * Copyright (C) 2010 FUJITSU LIMITED * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/trace_seq.h> #include <trace/events/scsi.h> #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) #define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9]) static const char * scsi_trace_misc(struct trace_seq *, unsigned char *, int); static const char * scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len; sector_t lba = 0, txlen = 0; lba |= ((cdb[1] & 0x1F) << 16); lba |= (cdb[2] << 8); lba |= cdb[3]; txlen = cdb[4]; trace_seq_printf(p, "lba=%llu txlen=%llu", (unsigned long long)lba, (unsigned long long)txlen); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len; sector_t lba = 0, txlen = 0; lba |= (cdb[2] << 24); lba |= (cdb[3] << 16); lba |= (cdb[4] << 8); lba |= cdb[5]; txlen |= (cdb[7] << 8); txlen |= cdb[8]; trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", (unsigned long long)lba, (unsigned long long)txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME) trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len; sector_t lba = 0, txlen = 0; lba |= (cdb[2] << 24); lba |= (cdb[3] << 16); lba |= (cdb[4] << 8); lba |= cdb[5]; txlen |= (cdb[6] << 24); txlen |= (cdb[7] << 16); txlen |= (cdb[8] << 8); txlen |= cdb[9]; trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", (unsigned long long)lba, (unsigned long long)txlen, cdb[1] >> 5); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len; sector_t lba = 0, txlen = 0; lba |= ((u64)cdb[2] << 56); lba |= ((u64)cdb[3] << 48); lba |= ((u64)cdb[4] << 40); lba |= ((u64)cdb[5] << 32); lba |= (cdb[6] << 24); lba |= (cdb[7] << 16); lba |= (cdb[8] << 8); lba |= cdb[9]; txlen |= (cdb[10] << 24); txlen |= (cdb[11] << 16); txlen |= (cdb[12] << 8); txlen |= cdb[13]; trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", (unsigned long long)lba, (unsigned long long)txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME_16) trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len, *cmd; sector_t lba = 0, txlen = 0; u32 ei_lbrt = 0; switch (SERVICE_ACTION32(cdb)) { case READ_32: cmd = "READ"; break; case VERIFY_32: cmd = "VERIFY"; break; case WRITE_32: cmd = "WRITE"; break; case WRITE_SAME_32: cmd = "WRITE_SAME"; break; default: trace_seq_printf(p, "UNKNOWN"); goto out; } lba |= ((u64)cdb[12] << 56); lba |= ((u64)cdb[13] << 48); lba |= ((u64)cdb[14] << 40); lba |= ((u64)cdb[15] << 32); lba |= (cdb[16] << 24); lba |= (cdb[17] << 16); lba |= (cdb[18] << 8); lba |= cdb[19]; ei_lbrt |= (cdb[20] << 24); ei_lbrt |= (cdb[21] << 16); ei_lbrt |= (cdb[22] << 8); ei_lbrt |= cdb[23]; txlen |= (cdb[28] << 24); txlen |= (cdb[29] << 16); txlen |= (cdb[30] << 8); txlen |= cdb[31]; trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u", cmd, (unsigned long long)lba, (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt); if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len; unsigned int regions = cdb[7] << 8 | cdb[8]; trace_seq_printf(p, "regions=%u", (regions - 8) / 16); trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len, *cmd; sector_t lba = 0; u32 alloc_len = 0; switch (SERVICE_ACTION16(cdb)) { case SAI_READ_CAPACITY_16: cmd = "READ_CAPACITY_16"; break; case SAI_GET_LBA_STATUS: cmd = "GET_LBA_STATUS"; break; default: trace_seq_printf(p, "UNKNOWN"); goto out; } lba |= ((u64)cdb[2] << 56); lba |= ((u64)cdb[3] << 48); lba |= ((u64)cdb[4] << 40); lba |= ((u64)cdb[5] << 32); lba |= (cdb[6] << 24); lba |= (cdb[7] << 16); lba |= (cdb[8] << 8); lba |= cdb[9]; alloc_len |= (cdb[10] << 24); alloc_len |= (cdb[11] << 16); alloc_len |= (cdb[12] << 8); alloc_len |= cdb[13]; trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, (unsigned long long)lba, alloc_len); out: trace_seq_putc(p, 0); return ret; } static const char * scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) { switch (SERVICE_ACTION32(cdb)) { case READ_32: case VERIFY_32: case WRITE_32: case WRITE_SAME_32: return scsi_trace_rw32(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } } static const char * scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = p->buffer + p->len; trace_seq_printf(p, "-"); trace_seq_putc(p, 0); return ret; } const char * scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) { switch (cdb[0]) { case READ_6: case WRITE_6: return scsi_trace_rw6(p, cdb, len); case READ_10: case VERIFY: case WRITE_10: case WRITE_SAME: return scsi_trace_rw10(p, cdb, len); case READ_12: case VERIFY_12: case WRITE_12: return scsi_trace_rw12(p, cdb, len); case READ_16: case VERIFY_16: case WRITE_16: case WRITE_SAME_16: return scsi_trace_rw16(p, cdb, len); case UNMAP: return scsi_trace_unmap(p, cdb, len); case SERVICE_ACTION_IN: return scsi_trace_service_action_in(p, cdb, len); case VARIABLE_LENGTH_CMD: return scsi_trace_varlen(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } }
gpl-2.0
AOKP/kernel_asus_tf101
drivers/gpu/drm/radeon/radeon_atombios.c
149
100055
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #include "atom-bits.h" /* from radeon_encoder.c */ extern uint32_t radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac); extern void radeon_link_encoder_connector(struct drm_device *dev); extern void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device, u16 caps); /* from radeon_connector.c */ extern void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, struct radeon_router *router); /* from radeon_legacy_encoder.c */ extern void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO info; struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2; struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, uint8_t id) { struct atom_context *ctx = rdev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.valid = false; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((i == 7) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; gpio->ucDataEnShift = 8; gpio->ucDataY_Shift = 8; gpio->ucDataA_Shift = 8; } } /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((i == 4) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } if (gpio->sucI2cId.ucAccess == id) { i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); i2c.en_clk_mask = (1 << gpio->ucClkEnShift); i2c.en_data_mask = (1 << gpio->ucDataEnShift); i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); i2c.y_data_mask = (1 << gpio->ucDataY_Shift); i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); i2c.a_data_mask = (1 << gpio->ucDataA_Shift); if (gpio->sucI2cId.sbfAccess.bfHW_Capable) i2c.hw_capable = true; else i2c.hw_capable = false; if (gpio->sucI2cId.ucAccess == 0xa0) i2c.mm_i2c = true; else i2c.mm_i2c = false; i2c.i2c_id = gpio->sucI2cId.ucAccess; if (i2c.mask_clk_reg) i2c.valid = true; break; } } } return i2c; } void radeon_atombios_i2c_init(struct radeon_device *rdev) { struct atom_context *ctx = rdev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; char stmp[32]; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; i2c.valid = false; /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((i == 7) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; gpio->ucDataEnShift = 8; gpio->ucDataY_Shift = 8; gpio->ucDataA_Shift = 8; } } /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((i == 4) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); i2c.en_clk_mask = (1 << gpio->ucClkEnShift); i2c.en_data_mask = (1 << gpio->ucDataEnShift); i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); i2c.y_data_mask = (1 << gpio->ucDataY_Shift); i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); i2c.a_data_mask = (1 << gpio->ucDataA_Shift); if (gpio->sucI2cId.sbfAccess.bfHW_Capable) i2c.hw_capable = true; else i2c.hw_capable = false; if (gpio->sucI2cId.ucAccess == 0xa0) i2c.mm_i2c = true; else i2c.mm_i2c = false; i2c.i2c_id = gpio->sucI2cId.ucAccess; if (i2c.mask_clk_reg) { i2c.valid = true; sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } } } } static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, u8 id) { struct atom_context *ctx = rdev->mode_info.atom_context; struct radeon_gpio_rec gpio; int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); struct _ATOM_GPIO_PIN_LUT *gpio_info; ATOM_GPIO_PIN_ASSIGNMENT *pin; u16 data_offset, size; int i, num_indices; memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); gpio.valid = false; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); for (i = 0; i < num_indices; i++) { pin = &gpio_info->asGPIO_Pin[i]; if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; gpio.mask = (1 << pin->ucGpioPinBitShift); gpio.valid = true; break; } } } return gpio; } static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev, struct radeon_gpio_rec *gpio) { struct radeon_hpd hpd; u32 reg; memset(&hpd, 0, sizeof(struct radeon_hpd)); if (ASIC_IS_DCE4(rdev)) reg = EVERGREEN_DC_GPIO_HPD_A; else reg = AVIVO_DC_GPIO_HPD_A; hpd.gpio = *gpio; if (gpio->reg == reg) { switch(gpio->mask) { case (1 << 0): hpd.hpd = RADEON_HPD_1; break; case (1 << 8): hpd.hpd = RADEON_HPD_2; break; case (1 << 16): hpd.hpd = RADEON_HPD_3; break; case (1 << 24): hpd.hpd = RADEON_HPD_4; break; case (1 << 26): hpd.hpd = RADEON_HPD_5; break; case (1 << 28): hpd.hpd = RADEON_HPD_6; break; default: hpd.hpd = RADEON_HPD_NONE; break; } } else hpd.hpd = RADEON_HPD_NONE; return hpd; } static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint16_t *line_mux, struct radeon_hpd *hpd) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x791e) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x826d)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* Asrock RS600 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x1849) && (dev->pdev->subsystem_device == 0x7941)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ if ((dev->pdev->device == 0x796e) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x7302)) { if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) return false; } /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && (dev->pdev->subsystem_device == 0x2412)) { if (*connector_type == DRM_MODE_CONNECTOR_DVII) return false; } /* Falcon NW laptop lists vga ddc line for LVDS */ if ((dev->pdev->device == 0x5653) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x0291)) { if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { i2c_bus->valid = false; *line_mux = 53; } } /* HIS X1300 is DVI+VGA, not DVI+DVI */ if ((dev->pdev->device == 0x7146) && (dev->pdev->subsystem_vendor == 0x17af) && (dev->pdev->subsystem_device == 0x2058)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ if ((dev->pdev->device == 0x7142) && (dev->pdev->subsystem_vendor == 0x1458) && (dev->pdev->subsystem_device == 0x2134)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Funky macbooks */ if ((dev->pdev->device == 0x71C5) && (dev->pdev->subsystem_vendor == 0x106b) && (dev->pdev->subsystem_device == 0x0080)) { if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) return false; if (supported_device == ATOM_DEVICE_CRT2_SUPPORT) *line_mux = 0x90; } /* mac rv630, rv730, others */ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && (*connector_type == DRM_MODE_CONNECTOR_DVII)) { *connector_type = DRM_MODE_CONNECTOR_9PinDIN; *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; } /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01da)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3600 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01e4)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3450 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x95C5) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01e2)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* some BIOSes seem to report DAC on HDMI - usually this is a board with * HDMI + VGA reporting as HDMI */ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { *connector_type = DRM_MODE_CONNECTOR_VGA; *line_mux = 0; } } /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, * we drop the DVI port here since xrandr has no concept of * encoders and will try and drive both connectors * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { /* actually it's a DVI-D port not DVI-I */ *connector_type = DRM_MODE_CONNECTOR_DVID; return false; } } /* XFX Pine Group device rv730 reports no VGA DDC lines * even though they are wired up to record 0x93 */ if ((dev->pdev->device == 0x9498) && (dev->pdev->subsystem_vendor == 0x1682) && (dev->pdev->subsystem_device == 0x2452)) { struct radeon_device *rdev = dev->dev_private; *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); } return true; } const int supported_devices_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_DVIA, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_DisplayPort }; const uint16_t supported_devices_connector_object_id_convert[] = { CONNECTOR_OBJECT_ID_NONE, CONNECTOR_OBJECT_ID_VGA, CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */ CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */ CONNECTOR_OBJECT_ID_COMPOSITE, CONNECTOR_OBJECT_ID_SVIDEO, CONNECTOR_OBJECT_ID_LVDS, CONNECTOR_OBJECT_ID_9PIN_DIN, CONNECTOR_OBJECT_ID_9PIN_DIN, CONNECTOR_OBJECT_ID_DISPLAYPORT, CONNECTOR_OBJECT_ID_HDMI_TYPE_A, CONNECTOR_OBJECT_ID_HDMI_TYPE_B, CONNECTOR_OBJECT_ID_SVIDEO }; const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DisplayPort, DRM_MODE_CONNECTOR_eDP, DRM_MODE_CONNECTOR_Unknown }; bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); u16 size, data_offset; u8 frev, crev; ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ATOM_ENCODER_OBJECT_TABLE *enc_obj; ATOM_OBJECT_TABLE *router_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_OBJECT_HEADER *obj_header; int i, j, k, path_size, device_support; int connector_type; u16 igp_lane_info, conn_id, connector_object_id; struct radeon_i2c_bus_rec ddc_bus; struct radeon_router router; struct radeon_gpio_rec gpio; struct radeon_hpd hpd; if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) return false; if (crev < 2) return false; obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usDisplayPathTableOffset)); con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usConnectorObjectTableOffset)); enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usEncoderObjectTableOffset)); router_obj = (ATOM_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usRouterObjectTableOffset)); device_support = le16_to_cpu(obj_header->usDeviceSupport); path_size = 0; for (i = 0; i < path_obj->ucNumOfDispPath; i++) { uint8_t *addr = (uint8_t *) path_obj->asDispPath; ATOM_DISPLAY_OBJECT_PATH *path; addr += path_size; path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; con_obj_num = (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; con_obj_type = (le16_to_cpu(path->usConnObjectId) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* TODO CV support */ if (le16_to_cpu(path->usDeviceTag) == ATOM_DEVICE_CV_SUPPORT) continue; /* IGP chips */ if ((rdev->flags & RADEON_IS_IGP) && (con_obj_id == CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { uint16_t igp_offset = 0; ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj; index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &igp_offset)) { if (crev >= 2) { igp_obj = (ATOM_INTEGRATED_SYSTEM_INFO_V2 *) (ctx->bios + igp_offset); if (igp_obj) { uint32_t slot_config, ct; if (con_obj_num == 1) slot_config = igp_obj-> ulDDISlot1Config; else slot_config = igp_obj-> ulDDISlot2Config; ct = (slot_config >> 16) & 0xff; connector_type = object_connector_convert [ct]; connector_object_id = ct; igp_lane_info = slot_config & 0xffff; } else continue; } else continue; } else { igp_lane_info = 0; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; } } else { igp_lane_info = 0; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; } if (connector_type == DRM_MODE_CONNECTOR_Unknown) continue; router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { uint8_t grph_obj_id, grph_obj_num, grph_obj_type; grph_obj_id = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; grph_obj_num = (le16_to_cpu(path->usGraphicObjIds[j]) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); ATOM_ENCODER_CAP_RECORD *cap_record; u16 caps = 0; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_ENCODER_CAP_RECORD_TYPE: cap_record =(ATOM_ENCODER_CAP_RECORD *) record; caps = le16_to_cpu(cap_record->usEncoderCap); break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } radeon_add_atom_encoder(dev, encoder_obj, le16_to_cpu (path-> usDeviceTag), caps); } } } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { for (k = 0; k < router_obj->ucNumberOfObjects; k++) { u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); int enum_id; router.router_id = router_obj_id; for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; enum_id++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) break; } while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; router.i2c_info = radeon_lookup_i2c_gpio(rdev, i2c_config-> ucAccess); router.i2c_addr = i2c_record->ucI2CAddr >> 1; break; case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) record; router.ddc_valid = true; router.ddc_mux_type = ddc_path->ucMuxType; router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; break; case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) record; router.cd_valid = true; router.cd_mux_type = cd_path->ucMuxType; router.cd_mux_control_pin = cd_path->ucMuxControlPin; router.cd_mux_state = cd_path->ucMuxState[enum_id]; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } } } } } /* look up gpio for ddc, hpd */ ddc_bus.valid = false; hpd.hpd = RADEON_HPD_NONE; if ((le16_to_cpu(path->usDeviceTag) & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(con_obj->asObjects[j]. usObjectID)) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(con_obj-> asObjects[j]. usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_HPD_INT_RECORD *hpd_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; ddc_bus = radeon_lookup_i2c_gpio(rdev, i2c_config-> ucAccess); break; case ATOM_HPD_INT_RECORD_TYPE: hpd_record = (ATOM_HPD_INT_RECORD *) record; gpio = radeon_lookup_gpio(rdev, hpd_record->ucHPDIntGPIOID); hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); hpd.plugged_state = hpd_record->ucPlugged_PinState; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record-> ucRecordSize); } break; } } } /* needed for aux chan transactions */ ddc_bus.hpd = hpd.hpd; conn_id = le16_to_cpu(path->usConnObjectId); if (!radeon_atom_apply_quirks (dev, le16_to_cpu(path->usDeviceTag), &connector_type, &ddc_bus, &conn_id, &hpd)) continue; radeon_add_atom_connector(dev, conn_id, le16_to_cpu(path-> usDeviceTag), connector_type, &ddc_bus, igp_lane_info, connector_object_id, &hpd, &router); } } radeon_link_encoder_connector(dev); return true; } static uint16_t atombios_get_connector_object_id(struct drm_device *dev, int connector_type, uint16_t devices) { struct radeon_device *rdev = dev->dev_private; if (rdev->flags & RADEON_IS_IGP) { return supported_devices_connector_object_id_convert [connector_type]; } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) || (connector_type == DRM_MODE_CONNECTOR_DVID)) && (devices & ATOM_DEVICE_DFP2_SUPPORT)) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, XTMDS_Info); uint16_t size, data_offset; uint8_t frev, crev; ATOM_XTMDS_INFO *xtmds; if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { if (connector_type == DRM_MODE_CONNECTOR_DVII) return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; else return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; } else { if (connector_type == DRM_MODE_CONNECTOR_DVII) return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; else return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; } } else return supported_devices_connector_object_id_convert [connector_type]; } else { return supported_devices_connector_object_id_convert [connector_type]; } } struct bios_connector { bool valid; uint16_t line_mux; uint16_t devices; int connector_type; struct radeon_i2c_bus_rec ddc_bus; struct radeon_hpd hpd; }; bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo); uint16_t size, data_offset; uint8_t frev, crev; uint16_t device_support; uint8_t dac; union atom_supported_devices *supported_devices; int i, j, max_device; struct bios_connector *bios_connectors; size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; struct radeon_router router; router.ddc_valid = false; router.cd_valid = false; bios_connectors = kzalloc(bc_size, GFP_KERNEL); if (!bios_connectors) return false; if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { kfree(bios_connectors); return false; } supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); if (frev > 1) max_device = ATOM_MAX_SUPPORTED_DEVICE; else max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; for (i = 0; i < max_device; i++) { ATOM_CONNECTOR_INFO_I2C ci = supported_devices->info.asConnInfo[i]; bios_connectors[i].valid = false; if (!(device_support & (1 << i))) { continue; } if (i == ATOM_DEVICE_CV_INDEX) { DRM_DEBUG_KMS("Skipping Component Video\n"); continue; } bios_connectors[i].connector_type = supported_devices_connector_convert[ci.sucConnectorInfo. sbfAccess. bfConnectorType]; if (bios_connectors[i].connector_type == DRM_MODE_CONNECTOR_Unknown) continue; dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; bios_connectors[i].line_mux = ci.sucI2cId.ucAccess; /* give tv unique connector ids */ if (i == ATOM_DEVICE_TV1_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 50; } else if (i == ATOM_DEVICE_TV2_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 51; } else if (i == ATOM_DEVICE_CV_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 52; } else bios_connectors[i].ddc_bus = radeon_lookup_i2c_gpio(rdev, bios_connectors[i].line_mux); if ((crev > 1) && (frev > 1)) { u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap; switch (isb) { case 0x4: bios_connectors[i].hpd.hpd = RADEON_HPD_1; break; case 0xa: bios_connectors[i].hpd.hpd = RADEON_HPD_2; break; default: bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; break; } } else { if (i == ATOM_DEVICE_DFP1_INDEX) bios_connectors[i].hpd.hpd = RADEON_HPD_1; else if (i == ATOM_DEVICE_DFP2_INDEX) bios_connectors[i].hpd.hpd = RADEON_HPD_2; else bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; } /* Always set the connector type to VGA for CRT1/CRT2. if they are * shared with a DVI port, we'll pick up the DVI connector when we * merge the outputs. Some bioses incorrectly list VGA ports as DVI. */ if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX) bios_connectors[i].connector_type = DRM_MODE_CONNECTOR_VGA; if (!radeon_atom_apply_quirks (dev, (1 << i), &bios_connectors[i].connector_type, &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux, &bios_connectors[i].hpd)) continue; bios_connectors[i].valid = true; bios_connectors[i].devices = (1 << i); if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) radeon_add_atom_encoder(dev, radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i), 0); else radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i)); } /* combine shared connectors */ for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { for (j = 0; j < max_device; j++) { if (bios_connectors[j].valid && (i != j)) { if (bios_connectors[i].line_mux == bios_connectors[j].line_mux) { /* make sure not to combine LVDS */ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) { bios_connectors[i].line_mux = 53; bios_connectors[i].ddc_bus.valid = false; continue; } if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) { bios_connectors[j].line_mux = 53; bios_connectors[j].ddc_bus.valid = false; continue; } /* combine analog and digital for DVI-I */ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) && (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) || ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) && (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) { bios_connectors[i].devices |= bios_connectors[j].devices; bios_connectors[i].connector_type = DRM_MODE_CONNECTOR_DVII; if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) bios_connectors[i].hpd = bios_connectors[j].hpd; bios_connectors[j].valid = false; } } } } } } /* add the connectors */ for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { uint16_t connector_object_id = atombios_get_connector_object_id(dev, bios_connectors[i].connector_type, bios_connectors[i].devices); radeon_add_atom_connector(dev, bios_connectors[i].line_mux, bios_connectors[i].devices, bios_connectors[i]. connector_type, &bios_connectors[i].ddc_bus, 0, connector_object_id, &bios_connectors[i].hpd, &router); } } radeon_link_encoder_connector(dev); kfree(bios_connectors); return true; } union firmware_info { ATOM_FIRMWARE_INFO info; ATOM_FIRMWARE_INFO_V1_2 info_12; ATOM_FIRMWARE_INFO_V1_3 info_13; ATOM_FIRMWARE_INFO_V1_4 info_14; ATOM_FIRMWARE_INFO_V2_1 info_21; ATOM_FIRMWARE_INFO_V2_2 info_22; }; bool radeon_atom_get_clock_info(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); union firmware_info *firmware_info; uint8_t frev, crev; struct radeon_pll *p1pll = &rdev->clock.p1pll; struct radeon_pll *p2pll = &rdev->clock.p2pll; struct radeon_pll *dcpll = &rdev->clock.dcpll; struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *mpll = &rdev->clock.mpll; uint16_t data_offset; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); /* pixel clocks */ p1pll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; if (crev < 2) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else p1pll->pll_out_min = le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); if (crev >= 4) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_max == 0) p1pll->lcd_pll_out_max = p1pll->pll_out_max; } else { p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = p1pll->pll_out_max; } if (p1pll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) p1pll->pll_out_min = 64800; else p1pll->pll_out_min = 20000; } p1pll->pll_in_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input); p1pll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input); *p2pll = *p1pll; /* system clock */ if (ASIC_IS_DCE4(rdev)) spll->reference_freq = le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); else spll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); spll->reference_div = 0; spll->pll_out_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output); spll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output); /* ??? */ if (spll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) spll->pll_out_min = 64800; else spll->pll_out_min = 20000; } spll->pll_in_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input); spll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); /* memory clock */ if (ASIC_IS_DCE4(rdev)) mpll->reference_freq = le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); else mpll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); mpll->reference_div = 0; mpll->pll_out_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output); mpll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output); /* ??? */ if (mpll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) mpll->pll_out_min = 64800; else mpll->pll_out_min = 20000; } mpll->pll_in_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input); mpll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input); rdev->clock.default_sclk = le32_to_cpu(firmware_info->info.ulDefaultEngineClock); rdev->clock.default_mclk = le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); if (ASIC_IS_DCE4(rdev)) { rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); } *dcpll = *p1pll; return true; } return false; } union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); union igp_info *igp_info; u8 frev, crev; u16 data_offset; /* sideport is AMD only */ if (rdev->family == CHIP_RS600) return false; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { igp_info = (union igp_info *)(mode_info->atom_context->bios + data_offset); switch (crev) { case 1: if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) return true; break; case 2: if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) return true; break; default: DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } } return false; } bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, TMDS_Info); uint16_t data_offset; struct _ATOM_TMDS_INFO *tmds_info; uint8_t frev, crev; uint16_t maxfreq; int i; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { tmds_info = (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + data_offset); maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); for (i = 0; i < 4; i++) { tmds->tmds_pll[i].freq = le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency); tmds->tmds_pll[i].value = tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_VCO_Gain & 0x3f) << 6; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_DutyCycle & 0xf) << 12; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_VoltageSwing & 0xf) << 16; DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); if (maxfreq == tmds->tmds_pll[i].freq) { tmds->tmds_pll[i].freq = 0xffffffff; break; } } return true; } return false; } bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; uint8_t frev, crev; int i, num_indices; memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); for (i = 0; i < num_indices; i++) { if (ss_info->asSS_Info[i].ucSS_Id == id) { ss->percentage = le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; ss->step = ss_info->asSS_Info[i].ucSS_Step; ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; return true; } } } return false; } static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) (mode_info->atom_context->bios + data_offset); switch (id) { case ASIC_INTERNAL_SS_ON_TMDS: percentage = le16_to_cpu(igp_info->usDVISSPercentage); rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); break; case ASIC_INTERNAL_SS_ON_HDMI: percentage = le16_to_cpu(igp_info->usHDMISSPercentage); rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); break; case ASIC_INTERNAL_SS_ON_LVDS: percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); break; } if (percentage) ss->percentage = percentage; if (rate) ss->rate = rate; } } union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO info; struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; }; bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); uint16_t data_offset, size; union asic_ss_info *ss_info; uint8_t frev, crev; int i, num_indices; memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); switch (frev) { case 1: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT); for (i = 0; i < num_indices; i++) { if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); return true; } } break; case 2: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); for (i = 0; i < num_indices; i++) { if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); return true; } } break; case 3: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); for (i = 0; i < num_indices; i++) { if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); if (rdev->flags & RADEON_IS_IGP) radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } } break; default: DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); break; } } return false; } union lvds_info { struct _ATOM_LVDS_INFO info; struct _ATOM_LVDS_INFO_V12 info_12; }; struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, LVDS_Info); uint16_t data_offset, misc; union lvds_info *lvds_info; uint8_t frev, crev; struct radeon_encoder_atom_dig *lvds = NULL; int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { lvds_info = (union lvds_info *)(mode_info->atom_context->bios + data_offset); lvds = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); if (!lvds) return NULL; lvds->native_mode.clock = le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; lvds->native_mode.hdisplay = le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); lvds->native_mode.vdisplay = le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); lvds->native_mode.htotal = lvds->native_mode.hdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); lvds->native_mode.vtotal = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); lvds->lcd_ss_id = lvds_info->info.ucSS_Id; encoder->native_mode = lvds->native_mode; if (encoder_enum == 2) lvds->linkb = true; else lvds->linkb = false; /* parse the lcd record table */ if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; u8 *record; if ((frev == 1) && (crev < 2)) /* absolute */ record = (u8 *)(mode_info->atom_context->bios + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); else /* relative */ record = (u8 *)(mode_info->atom_context->bios + data_offset + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: record += sizeof(ATOM_PATCH_RECORD_MODE); break; case LCD_RTS_RECORD_TYPE: record += sizeof(ATOM_LCD_RTS_RECORD); break; case LCD_CAP_RECORD_TYPE: record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); break; case LCD_FAKE_EDID_PATCH_RECORD_TYPE: fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; int edid_size = max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); edid = kmalloc(edid_size, GFP_KERNEL); if (edid) { memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], fake_edid_record->ucFakeEDIDLength); if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; rdev->mode_info.bios_hardcoded_edid_size = edid_size; } else kfree(edid); } } record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; lvds->native_mode.width_mm = panel_res_record->usHSize; lvds->native_mode.height_mm = panel_res_record->usVSize; record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); break; default: DRM_ERROR("Bad LCD record %d\n", *record); bad_record = true; break; } if (bad_record) break; } } } return lvds; } struct radeon_encoder_primary_dac * radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, CompassionateData); uint16_t data_offset; struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { dac_info = (struct _COMPASSIONATE_DATA *) (mode_info->atom_context->bios + data_offset); p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); if (!p_dac) return NULL; bg = dac_info->ucDAC1_BG_Adjustment; dac = dac_info->ucDAC1_DAC_Adjustment; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } return p_dac; } bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode) { struct radeon_mode_info *mode_info = &rdev->mode_info; ATOM_ANALOG_TV_INFO *tv_info; ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2; ATOM_DTD_FORMAT *dtd_timings; int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); u8 frev, crev; u16 data_offset, misc; if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset)) return false; switch (crev) { case 1: tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); if (index >= MAX_SUPPORTED_TV_TIMING) return false; mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) + le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) + le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); mode->flags = 0; misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) mode->flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; if (index == 1) { /* PAL timings appear to have wrong values for totals */ mode->crtc_htotal -= 1; mode->crtc_vtotal -= 1; } break; case 2: tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); if (index >= MAX_SUPPORTED_TV_TIMING_V1_2) return false; dtd_timings = &tv_info_v1_2->aModeTimings[index]; mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time); mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive); mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset); mode->crtc_hsync_end = mode->crtc_hsync_start + le16_to_cpu(dtd_timings->usHSyncWidth); mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time); mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive); mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset); mode->crtc_vsync_end = mode->crtc_vsync_start + le16_to_cpu(dtd_timings->usVSyncWidth); mode->flags = 0; misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) mode->flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10; break; } return true; } enum radeon_tv_std radeon_atombios_get_tv_info(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); uint16_t data_offset; uint8_t frev, crev; struct _ATOM_ANALOG_TV_INFO *tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { tv_info = (struct _ATOM_ANALOG_TV_INFO *) (mode_info->atom_context->bios + data_offset); switch (tv_info->ucTV_BootUpDefaultStandard) { case ATOM_TV_NTSC: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Default TV standard: NTSC\n"); break; case ATOM_TV_NTSCJ: tv_std = TV_STD_NTSC_J; DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); break; case ATOM_TV_PAL: tv_std = TV_STD_PAL; DRM_DEBUG_KMS("Default TV standard: PAL\n"); break; case ATOM_TV_PALM: tv_std = TV_STD_PAL_M; DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); break; case ATOM_TV_PALN: tv_std = TV_STD_PAL_N; DRM_DEBUG_KMS("Default TV standard: PAL-N\n"); break; case ATOM_TV_PALCN: tv_std = TV_STD_PAL_CN; DRM_DEBUG_KMS("Default TV standard: PAL-CN\n"); break; case ATOM_TV_PAL60: tv_std = TV_STD_PAL_60; DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); break; case ATOM_TV_SECAM: tv_std = TV_STD_SECAM; DRM_DEBUG_KMS("Default TV standard: SECAM\n"); break; default: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n"); break; } } return tv_std; } struct radeon_encoder_tv_dac * radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, CompassionateData); uint16_t data_offset; struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; struct radeon_encoder_tv_dac *tv_dac = NULL; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { dac_info = (struct _COMPASSIONATE_DATA *) (mode_info->atom_context->bios + data_offset); tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); if (!tv_dac) return NULL; bg = dac_info->ucDAC2_CRT2_BG_Adjustment; dac = dac_info->ucDAC2_CRT2_DAC_Adjustment; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); bg = dac_info->ucDAC2_PAL_BG_Adjustment; dac = dac_info->ucDAC2_PAL_DAC_Adjustment; tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20); bg = dac_info->ucDAC2_NTSC_BG_Adjustment; dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); tv_dac->tv_std = radeon_atombios_get_tv_info(rdev); } return tv_dac; } static const char *thermal_controller_names[] = { "NONE", "lm63", "adm1032", "adm1030", "max6649", "lm64", "f75375", "asc7xxx", }; static const char *pp_lib_thermal_controller_names[] = { "NONE", "lm63", "adm1032", "adm1030", "max6649", "lm64", "f75375", "RV6xx", "RV770", "adt7473", "NONE", "External GPIO", "Evergreen", "emc2103", "Sumo", "Northern Islands", }; union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; struct _ATOM_PPLIB_POWERPLAYTABLE pplib; struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; }; union pplib_clock_info { struct _ATOM_PPLIB_R600_CLOCK_INFO r600; struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; }; union pplib_power_state { struct _ATOM_PPLIB_STATE v1; struct _ATOM_PPLIB_STATE_V2 v2; }; static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, int state_index, u32 misc, u32 misc2) { rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; /* order matters! */ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_POWERSAVE; if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; } if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; } else if (state_index == 0) { rdev->pm.power_state[state_index].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } } static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; u32 misc, misc2 = 0; int num_modes = 0, i; int state_index = 0; struct radeon_i2c_bus_rec i2c_bus; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); /* add the i2c bus for thermal/fan chip */ if (power_info->info.ucOverdriveThermalController > 0) { DRM_INFO("Possible %s thermal controller at 0x%02x\n", thermal_controller_names[power_info->info.ucOverdriveThermalController], power_info->info.ucOverdriveControllerAddress >> 1); i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = thermal_controller_names[power_info->info. ucOverdriveThermalController]; info.addr = power_info->info.ucOverdriveControllerAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { case 1: rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); state_index++; break; case 2: rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); state_index++; break; case 3: rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = true; rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; } } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); state_index++; break; } } /* last mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[state_index - 1].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index - 1; rdev->pm.power_state[state_index - 1].default_clock_mode = &rdev->pm.power_state[state_index - 1].clock_info[0]; rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; rdev->pm.power_state[state_index].misc = 0; rdev->pm.power_state[state_index].misc2 = 0; } return state_index; } static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, ATOM_PPLIB_THERMALCONTROLLER *controller) { struct radeon_i2c_bus_rec i2c_bus; /* add the i2c bus for thermal/fan chip */ if (controller->ucType > 0) { if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { DRM_INFO("Special thermal controller config\n"); } else { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } } } static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, u16 *vddc, u16 *vddci) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); u8 frev, crev; u16 data_offset; union firmware_info *firmware_info; *vddc = 0; *vddci = 0; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); if ((frev == 2) && (crev >= 2)) *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); } } static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, int state_index, int mode_index, struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) { int j; u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); u32 misc2 = le16_to_cpu(non_clock_info->usClassification); u16 vddc, vddci; radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; rdev->pm.power_state[state_index].pcie_lanes = ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; break; case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; break; case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; break; case ATOM_PPLIB_CLASSIFICATION_UI_NONE: if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; break; } rdev->pm.power_state[state_index].flags = 0; if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) rdev->pm.power_state[state_index].flags |= RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; if (ASIC_IS_DCE5(rdev)) { /* NI chips post without MC ucode, so default clocks are strobe mode only */ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; } else { /* patch the table values with the default slck/mclk from firmware info */ for (j = 0; j < mode_index; j++) { rdev->pm.power_state[state_index].clock_info[j].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[j].sclk = rdev->clock.default_sclk; if (vddc) rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = vddc; } } } } static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, int state_index, int mode_index, union pplib_clock_info *clock_info) { u32 sclk, mclk; if (rdev->flags & RADEON_IS_IGP) { if (rdev->family >= CHIP_PALM) { sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); sclk |= clock_info->sumo.ucEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } else { sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } } else if (ASIC_IS_DCE4(rdev)) { sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); sclk |= clock_info->evergreen.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->evergreen.usVDDC); rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); } else { sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); sclk |= clock_info->r600.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); mclk |= clock_info->r600.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->r600.usVDDC); } if (rdev->flags & RADEON_IS_IGP) { /* skip invalid modes */ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) return false; } else { /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) return false; } return true; } static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* first mode is usually default, followed by low to high */ for (i = 0; i < power_info->pplib.ucNumStates; i++) { mode_index = 0; power_state = (union pplib_power_state *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset) + i * power_info->pplib.ucStateEntrySize); non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { clock_info = (union pplib_clock_info *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + (power_state->v1.ucClockStateIndices[j] * power_info->pplib.ucClockInfoSize)); valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); if (valid) mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, non_clock_info); state_index++; } } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { if (rdev->pm.power_state[i].num_clock_modes > 1) rdev->pm.power_state[i].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } /* first mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[0].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = 0; rdev->pm.power_state[0].default_clock_mode = &rdev->pm.power_state[0].clock_info[0]; } return state_index; } static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j, non_clock_array_index, clock_array_index; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; struct StateArray *state_array; struct ClockInfoArray *clock_info_array; struct NonClockInfoArray *non_clock_info_array; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); state_array = (struct StateArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset)); clock_info_array = (struct ClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); non_clock_info_array = (struct NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; for (i = 0; i < state_array->ucNumEntries; i++) { mode_index = 0; power_state = (union pplib_power_state *)&state_array->states[i]; /* XXX this might be an inagua bug... */ non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { clock_array_index = power_state->v2.clockInfoIndex[j]; /* XXX this might be an inagua bug... */ if (clock_array_index >= clock_info_array->ucNumEntries) continue; clock_info = (union pplib_clock_info *) &clock_info_array->clockInfo[clock_array_index]; valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); if (valid) mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, non_clock_info); state_index++; } } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { if (rdev->pm.power_state[i].num_clock_modes > 1) rdev->pm.power_state[i].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } /* first mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[0].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = 0; rdev->pm.power_state[0].default_clock_mode = &rdev->pm.power_state[0].clock_info[0]; } return state_index; } void radeon_atombios_get_power_modes(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; int state_index = 0; rdev->pm.default_power_state_index = -1; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { switch (frev) { case 1: case 2: case 3: state_index = radeon_atombios_parse_power_table_1_3(rdev); break; case 4: case 5: state_index = radeon_atombios_parse_power_table_4_5(rdev); break; case 6: state_index = radeon_atombios_parse_power_table_6(rdev); break; default: break; } } else { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { /* add the default mode */ rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; rdev->pm.power_state[state_index].pcie_lanes = 16; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].flags = 0; state_index++; } } rdev->pm.num_power_states = state_index; rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) { DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); args.ucEnable = enable; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) { GET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return le32_to_cpu(args.ulReturnEngineClock); } uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) { GET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return le32_to_cpu(args.ulReturnMemoryClock); } void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock) { SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock) { SET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); if (rdev->flags & RADEON_IS_IGP) return; args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union set_voltage { struct _SET_VOLTAGE_PS_ALLOCATION alloc; struct _SET_VOLTAGE_PARAMETERS v1; struct _SET_VOLTAGE_PARAMETERS_V2 v2; }; void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); u8 frev, crev, volt_index = voltage_level; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (crev) { case 1: args.v1.ucVoltageType = voltage_type; args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; args.v1.ucVoltageIndex = volt_index; break; case 2: args.v2.ucVoltageType = voltage_type; args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; args.v2.usVoltageLevel = cpu_to_le16(voltage_level); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint32_t bios_2_scratch, bios_6_scratch; if (rdev->family >= CHIP_R600) { bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); } else { bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); } /* let the bios control the backlight */ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; /* tell the bios not to handle mode switching */ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); } else { WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } } void radeon_save_bios_scratch_regs(struct radeon_device *rdev) { uint32_t scratch_reg; int i; if (rdev->family >= CHIP_R600) scratch_reg = R600_BIOS_0_SCRATCH; else scratch_reg = RADEON_BIOS_0_SCRATCH; for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4)); } void radeon_restore_bios_scratch_regs(struct radeon_device *rdev) { uint32_t scratch_reg; int i; if (rdev->family >= CHIP_R600) scratch_reg = R600_BIOS_0_SCRATCH; else scratch_reg = RADEON_BIOS_0_SCRATCH; for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]); } void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t bios_6_scratch; if (rdev->family >= CHIP_R600) bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); else bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; bios_6_scratch &= ~ATOM_S6_ACC_MODE; } else { bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; bios_6_scratch |= ATOM_S6_ACC_MODE; } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); else WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } /* at some point we may want to break this out into individual functions */ void radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch; if (rdev->family >= CHIP_R600) { bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); } else { bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); } if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("TV1 connected\n"); bios_3_scratch |= ATOM_S3_TV1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_TV1; } else { DRM_DEBUG_KMS("TV1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_TV1_MASK; bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CV connected\n"); bios_3_scratch |= ATOM_S3_CV_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CV; } else { DRM_DEBUG_KMS("CV disconnected\n"); bios_0_scratch &= ~ATOM_S0_CV_MASK; bios_3_scratch &= ~ATOM_S3_CV_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV; } } if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("LCD1 connected\n"); bios_0_scratch |= ATOM_S0_LCD1; bios_3_scratch |= ATOM_S3_LCD1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1; } else { DRM_DEBUG_KMS("LCD1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_LCD1; bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT1 connected\n"); bios_0_scratch |= ATOM_S0_CRT1_COLOR; bios_3_scratch |= ATOM_S3_CRT1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1; } else { DRM_DEBUG_KMS("CRT1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT1_MASK; bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT2 connected\n"); bios_0_scratch |= ATOM_S0_CRT2_COLOR; bios_3_scratch |= ATOM_S3_CRT2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2; } else { DRM_DEBUG_KMS("CRT2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT2_MASK; bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP1 connected\n"); bios_0_scratch |= ATOM_S0_DFP1; bios_3_scratch |= ATOM_S3_DFP1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1; } else { DRM_DEBUG_KMS("DFP1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP1; bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP2 connected\n"); bios_0_scratch |= ATOM_S0_DFP2; bios_3_scratch |= ATOM_S3_DFP2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2; } else { DRM_DEBUG_KMS("DFP2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP2; bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP3 connected\n"); bios_0_scratch |= ATOM_S0_DFP3; bios_3_scratch |= ATOM_S3_DFP3_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3; } else { DRM_DEBUG_KMS("DFP3 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP3; bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP4 connected\n"); bios_0_scratch |= ATOM_S0_DFP4; bios_3_scratch |= ATOM_S3_DFP4_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4; } else { DRM_DEBUG_KMS("DFP4 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP4; bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP5 connected\n"); bios_0_scratch |= ATOM_S0_DFP5; bios_3_scratch |= ATOM_S3_DFP5_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5; } else { DRM_DEBUG_KMS("DFP5 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP5; bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5; } } if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch); WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); } else { WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch); WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } } void radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_3_scratch; if (rdev->family >= CHIP_R600) bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); else bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 18); } if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE; bios_3_scratch |= (crtc << 24); } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 16); } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE; bios_3_scratch |= (crtc << 20); } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 17); } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 19); } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE; bios_3_scratch |= (crtc << 23); } if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE; bios_3_scratch |= (crtc << 25); } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch); else WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch); } void radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_2_scratch; if (rdev->family >= CHIP_R600) bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); else bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CV_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE; } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); else WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); }
gpl-2.0
xin3liang/device_ti_bootloader_uboot
drivers/video/sed13806.c
149
9928
/* * (C) Copyright 2002 * Stäubli Faverges - <www.staubli.com> * Pierre AUBERT p.aubert@staubli.com * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* Video support for Epson SED13806 chipset */ #include <common.h> #include <video_fb.h> #include <sed13806.h> #define readByte(ptrReg) \ *(volatile unsigned char *)(sed13806.isaBase + ptrReg) #define writeByte(ptrReg,value) \ *(volatile unsigned char *)(sed13806.isaBase + ptrReg) = value #ifdef CONFIG_TOTAL5200 #define writeWord(ptrReg,value) \ (*(volatile unsigned short *)(sed13806.isaBase + ptrReg) = value) #else #define writeWord(ptrReg,value) \ (*(volatile unsigned short *)(sed13806.isaBase + ptrReg) = ((value >> 8 ) & 0xff) | ((value << 8) & 0xff00)) #endif GraphicDevice sed13806; /*----------------------------------------------------------------------------- * EpsonSetRegs -- *----------------------------------------------------------------------------- */ static void EpsonSetRegs (void) { /* the content of the chipset register depends on the board (clocks, ...)*/ const S1D_REGS *preg = board_get_regs (); while (preg -> Index) { writeByte (preg -> Index, preg -> Value); preg ++; } } /*----------------------------------------------------------------------------- * video_hw_init -- *----------------------------------------------------------------------------- */ void *video_hw_init (void) { unsigned int *vm, i; memset (&sed13806, 0, sizeof (GraphicDevice)); /* Initialization of the access to the graphic chipset Retreive base address of the chipset (see board/RPXClassic/eccx.c) */ if ((sed13806.isaBase = board_video_init ()) == 0) { return (NULL); } sed13806.frameAdrs = sed13806.isaBase + FRAME_BUFFER_OFFSET; sed13806.winSizeX = board_get_width (); sed13806.winSizeY = board_get_height (); #if defined(CONFIG_VIDEO_SED13806_8BPP) sed13806.gdfIndex = GDF__8BIT_INDEX; sed13806.gdfBytesPP = 1; #elif defined(CONFIG_VIDEO_SED13806_16BPP) sed13806.gdfIndex = GDF_16BIT_565RGB; sed13806.gdfBytesPP = 2; #else #error Unsupported SED13806 BPP #endif sed13806.memSize = sed13806.winSizeX * sed13806.winSizeY * sed13806.gdfBytesPP; /* Load SED registers */ EpsonSetRegs (); /* (see board/RPXClassic/RPXClassic.c) */ board_validate_screen (sed13806.isaBase); /* Clear video memory */ i = sed13806.memSize/4; vm = (unsigned int *)sed13806.frameAdrs; while(i--) *vm++ = 0; return (&sed13806); } /*----------------------------------------------------------------------------- * Epson_wait_idle -- Wait for hardware to become idle *----------------------------------------------------------------------------- */ static void Epson_wait_idle (void) { while (readByte (BLT_CTRL0) & 0x80); /* Read a word in the BitBLT memory area to shutdown the BitBLT engine */ *(volatile unsigned short *)(sed13806.isaBase + BLT_REG); } /*----------------------------------------------------------------------------- * video_hw_bitblt -- *----------------------------------------------------------------------------- */ void video_hw_bitblt ( unsigned int bpp, /* bytes per pixel */ unsigned int src_x, /* source pos x */ unsigned int src_y, /* source pos y */ unsigned int dst_x, /* dest pos x */ unsigned int dst_y, /* dest pos y */ unsigned int dim_x, /* frame width */ unsigned int dim_y /* frame height */ ) { register GraphicDevice *pGD = (GraphicDevice *)&sed13806; unsigned long srcAddr, dstAddr; unsigned int stride = bpp * pGD -> winSizeX; srcAddr = (src_y * stride) + (src_x * bpp); dstAddr = (dst_y * stride) + (dst_x * bpp); Epson_wait_idle (); writeByte(BLT_ROP,0x0C); /* source */ writeByte(BLT_OP,0x02);/* move blit in positive direction with ROP */ writeWord(BLT_MEM_OFF0, stride / 2); if (pGD -> gdfIndex == GDF__8BIT_INDEX) { writeByte(BLT_CTRL1,0x00); } else { writeByte(BLT_CTRL1,0x01); } writeWord(BLT_WIDTH0,(dim_x - 1)); writeWord(BLT_HEIGHT0,(dim_y - 1)); /* set up blit registers */ writeByte(BLT_SRC_ADDR0,srcAddr); writeByte(BLT_SRC_ADDR1,srcAddr>>8); writeByte(BLT_SRC_ADDR2,srcAddr>>16); writeByte(BLT_DST_ADDR0,dstAddr); writeByte(BLT_DST_ADDR1,dstAddr>>8); writeByte(BLT_DST_ADDR2,dstAddr>>16); /* Engage the blt engine */ /* rectangular region for src and dst */ writeByte(BLT_CTRL0,0x80); /* wait untill current blits finished */ Epson_wait_idle (); } /*----------------------------------------------------------------------------- * video_hw_rectfill -- *----------------------------------------------------------------------------- */ void video_hw_rectfill ( unsigned int bpp, /* bytes per pixel */ unsigned int dst_x, /* dest pos x */ unsigned int dst_y, /* dest pos y */ unsigned int dim_x, /* frame width */ unsigned int dim_y, /* frame height */ unsigned int color /* fill color */ ) { register GraphicDevice *pGD = (GraphicDevice *)&sed13806; unsigned long dstAddr; unsigned int stride = bpp * pGD -> winSizeX; dstAddr = (dst_y * stride) + (dst_x * bpp); Epson_wait_idle (); /* set up blit registers */ writeByte(BLT_DST_ADDR0,dstAddr); writeByte(BLT_DST_ADDR1,dstAddr>>8); writeByte(BLT_DST_ADDR2,dstAddr>>16); writeWord(BLT_WIDTH0,(dim_x - 1)); writeWord(BLT_HEIGHT0,(dim_y - 1)); writeWord(BLT_FGCOLOR0,color); writeByte(BLT_OP,0x0C); /* solid fill */ writeWord(BLT_MEM_OFF0,stride / 2); if (pGD -> gdfIndex == GDF__8BIT_INDEX) { writeByte(BLT_CTRL1,0x00); } else { writeByte(BLT_CTRL1,0x01); } /* Engage the blt engine */ /* rectangular region for src and dst */ writeByte(BLT_CTRL0,0x80); /* wait untill current blits finished */ Epson_wait_idle (); } /*----------------------------------------------------------------------------- * video_set_lut -- *----------------------------------------------------------------------------- */ void video_set_lut ( unsigned int index, /* color number */ unsigned char r, /* red */ unsigned char g, /* green */ unsigned char b /* blue */ ) { writeByte(REG_LUT_ADDR, index ); writeByte(REG_LUT_DATA, r); writeByte(REG_LUT_DATA, g); writeByte(REG_LUT_DATA, b); } #ifdef CONFIG_VIDEO_HW_CURSOR /*----------------------------------------------------------------------------- * video_set_hw_cursor -- *----------------------------------------------------------------------------- */ void video_set_hw_cursor (int x, int y) { writeByte (LCD_CURSOR_XL, (x & 0xff)); writeByte (LCD_CURSOR_XM, (x >> 8)); writeByte (LCD_CURSOR_YL, (y & 0xff)); writeByte (LCD_CURSOR_YM, (y >> 8)); } /*----------------------------------------------------------------------------- * video_init_hw_cursor -- *----------------------------------------------------------------------------- */ void video_init_hw_cursor (int font_width, int font_height) { volatile unsigned char *ptr; unsigned char pattern; int i; /* Init cursor content Cursor size is 64x64 pixels Start of the cursor memory depends on panel type (dual panel ...) */ if ((i = readByte (LCD_CURSOR_START)) == 0) { ptr = (unsigned char *)(sed13806.frameAdrs + DEFAULT_VIDEO_MEMORY_SIZE - HWCURSORSIZE); } else { ptr = (unsigned char *)(sed13806.frameAdrs + DEFAULT_VIDEO_MEMORY_SIZE - (i * 8192)); } /* Fill the first line and the first empty line after cursor */ for (i = 0, pattern = 0; i < 64; i++) { if (i < font_width) { /* Invert background */ pattern |= 0x3; } else { /* Background */ pattern |= 0x2; } if ((i & 3) == 3) { *ptr = pattern; *(ptr + font_height * 16) = 0xaa; ptr ++; pattern = 0; } pattern <<= 2; } /* Duplicate this line */ for (i = 1; i < font_height; i++) { memcpy ((void *)ptr, (void *)(ptr - 16), 16); ptr += 16; } for (; i < 64; i++) { memcpy ((void *)(ptr + 16), (void *)ptr, 16); ptr += 16; } /* Select cursor mode */ writeByte (LCD_CURSOR_CNTL, 1); } #endif
gpl-2.0
titanxxh/xengt-ha-kernel
drivers/usb/dwc2/hcd.c
149
85523
/* * hcd.c - DesignWare HS OTG Controller host-mode routines * * Copyright (C) 2004-2013 Synopsys, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the above-listed copyright holders may not be used * to endorse or promote products derived from this software without * specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This file contains the core HCD code, and implements the Linux hc_driver * API */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/usb/ch11.h> #include "core.h" #include "hcd.h" /** * dwc2_dump_channel_info() - Prints the state of a host channel * * @hsotg: Programming view of DWC_otg controller * @chan: Pointer to the channel to dump * * Must be called with interrupt disabled and spinlock held * * NOTE: This function will be removed once the peripheral controller code * is integrated and the driver is stable */ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) { #ifdef VERBOSE_DEBUG int num_channels = hsotg->core_params->host_channels; struct dwc2_qh *qh; u32 hcchar; u32 hcsplt; u32 hctsiz; u32 hc_dma; int i; if (chan == NULL) return; hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num)); hctsiz = readl(hsotg->regs + HCTSIZ(chan->hc_num)); hc_dma = readl(hsotg->regs + HCDMA(chan->hc_num)); dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan); dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt); dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma); dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", chan->dev_addr, chan->ep_num, chan->ep_is_in); dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type); dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet); dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start); dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started); dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status); dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf); dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", (unsigned long)chan->xfer_dma); dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len); dev_dbg(hsotg->dev, " qh: %p\n", chan->qh); dev_dbg(hsotg->dev, " NP inactive sched:\n"); list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive, qh_list_entry) dev_dbg(hsotg->dev, " %p\n", qh); dev_dbg(hsotg->dev, " NP active sched:\n"); list_for_each_entry(qh, &hsotg->non_periodic_sched_active, qh_list_entry) dev_dbg(hsotg->dev, " %p\n", qh); dev_dbg(hsotg->dev, " Channels:\n"); for (i = 0; i < num_channels; i++) { struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; dev_dbg(hsotg->dev, " %2d: %p\n", i, chan); } #endif /* VERBOSE_DEBUG */ } /* * Processes all the URBs in a single list of QHs. Completes them with * -ETIMEDOUT and frees the QTD. * * Must be called with interrupt disabled and spinlock held */ static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg, struct list_head *qh_list) { struct dwc2_qh *qh, *qh_tmp; struct dwc2_qtd *qtd, *qtd_tmp; list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { dwc2_host_complete(hsotg, qtd, -ETIMEDOUT); dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); } } } static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg, struct list_head *qh_list) { struct dwc2_qtd *qtd, *qtd_tmp; struct dwc2_qh *qh, *qh_tmp; unsigned long flags; if (!qh_list->next) /* The list hasn't been initialized yet */ return; spin_lock_irqsave(&hsotg->lock, flags); /* Ensure there are no QTDs or URBs left */ dwc2_kill_urbs_in_qh_list(hsotg, qh_list); list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { dwc2_hcd_qh_unlink(hsotg, qh); /* Free each QTD in the QH's QTD list */ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_hcd_qh_free(hsotg, qh); spin_lock_irqsave(&hsotg->lock, flags); } spin_unlock_irqrestore(&hsotg->lock, flags); } /* * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic * and periodic schedules. The QTD associated with each URB is removed from * the schedule and freed. This function may be called when a disconnect is * detected or when the HCD is being stopped. * * Must be called with interrupt disabled and spinlock held */ static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg) { dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued); } /** * dwc2_hcd_start() - Starts the HCD when switching to Host mode * * @hsotg: Pointer to struct dwc2_hsotg */ void dwc2_hcd_start(struct dwc2_hsotg *hsotg) { u32 hprt0; if (hsotg->op_state == OTG_STATE_B_HOST) { /* * Reset the port. During a HNP mode switch the reset * needs to occur within 1ms and have a duration of at * least 50ms. */ hprt0 = dwc2_read_hprt0(hsotg); hprt0 |= HPRT0_RST; writel(hprt0, hsotg->regs + HPRT0); } queue_delayed_work(hsotg->wq_otg, &hsotg->start_work, msecs_to_jiffies(50)); } /* Must be called with interrupt disabled and spinlock held */ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) { int num_channels = hsotg->core_params->host_channels; struct dwc2_host_chan *channel; u32 hcchar; int i; if (hsotg->core_params->dma_enable <= 0) { /* Flush out any channel requests in slave mode */ for (i = 0; i < num_channels; i++) { channel = hsotg->hc_ptr_array[i]; if (!list_empty(&channel->hc_list_entry)) continue; hcchar = readl(hsotg->regs + HCCHAR(i)); if (hcchar & HCCHAR_CHENA) { hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR); hcchar |= HCCHAR_CHDIS; writel(hcchar, hsotg->regs + HCCHAR(i)); } } } for (i = 0; i < num_channels; i++) { channel = hsotg->hc_ptr_array[i]; if (!list_empty(&channel->hc_list_entry)) continue; hcchar = readl(hsotg->regs + HCCHAR(i)); if (hcchar & HCCHAR_CHENA) { /* Halt the channel */ hcchar |= HCCHAR_CHDIS; writel(hcchar, hsotg->regs + HCCHAR(i)); } dwc2_hc_cleanup(hsotg, channel); list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list); /* * Added for Descriptor DMA to prevent channel double cleanup in * release_channel_ddma(), which is called from ep_disable when * device disconnects */ channel->qh = NULL; } } /** * dwc2_hcd_disconnect() - Handles disconnect of the HCD * * @hsotg: Pointer to struct dwc2_hsotg * * Must be called with interrupt disabled and spinlock held */ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) { u32 intr; /* Set status flags for the hub driver */ hsotg->flags.b.port_connect_status_change = 1; hsotg->flags.b.port_connect_status = 0; /* * Shutdown any transfers in process by clearing the Tx FIFO Empty * interrupt mask and status bits and disabling subsequent host * channel interrupts. */ intr = readl(hsotg->regs + GINTMSK); intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT); writel(intr, hsotg->regs + GINTMSK); intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT; writel(intr, hsotg->regs + GINTSTS); /* * Turn off the vbus power only if the core has transitioned to device * mode. If still in host mode, need to keep power on to detect a * reconnection. */ if (dwc2_is_device_mode(hsotg)) { if (hsotg->op_state != OTG_STATE_A_SUSPEND) { dev_dbg(hsotg->dev, "Disconnect: PortPower off\n"); writel(0, hsotg->regs + HPRT0); } dwc2_disable_host_interrupts(hsotg); } /* Respond with an error status to all URBs in the schedule */ dwc2_kill_all_urbs(hsotg); if (dwc2_is_host_mode(hsotg)) /* Clean up any host channels that were in use */ dwc2_hcd_cleanup_channels(hsotg); dwc2_host_disconnect(hsotg); } /** * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup * * @hsotg: Pointer to struct dwc2_hsotg */ static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) { if (hsotg->lx_state == DWC2_L2) hsotg->flags.b.port_suspend_change = 1; else hsotg->flags.b.port_l1_change = 1; } /** * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner * * @hsotg: Pointer to struct dwc2_hsotg * * Must be called with interrupt disabled and spinlock held */ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) { dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n"); /* * The root hub should be disconnected before this function is called. * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) * and the QH lists (via ..._hcd_endpoint_disable). */ /* Turn off all host-specific interrupts */ dwc2_disable_host_interrupts(hsotg); /* Turn off the vbus power */ dev_dbg(hsotg->dev, "PortPower off\n"); writel(0, hsotg->regs + HPRT0); } static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, void **ep_handle, gfp_t mem_flags) { struct dwc2_qtd *qtd; unsigned long flags; u32 intr_mask; int retval; int dev_speed; if (!hsotg->flags.b.port_connect_status) { /* No longer connected */ dev_err(hsotg->dev, "Not connected\n"); return -ENODEV; } dev_speed = dwc2_host_get_speed(hsotg, urb->priv); /* Some configurations cannot support LS traffic on a FS root port */ if ((dev_speed == USB_SPEED_LOW) && (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) && (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) { u32 hprt0 = readl(hsotg->regs + HPRT0); u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; if (prtspd == HPRT0_SPD_FULL_SPEED) return -ENODEV; } qtd = kzalloc(sizeof(*qtd), mem_flags); if (!qtd) return -ENOMEM; dwc2_hcd_qtd_init(qtd, urb); retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, mem_flags); if (retval) { dev_err(hsotg->dev, "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", retval); kfree(qtd); return retval; } intr_mask = readl(hsotg->regs + GINTMSK); if (!(intr_mask & GINTSTS_SOF)) { enum dwc2_transaction_type tr_type; if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK && !(qtd->urb->flags & URB_GIVEBACK_ASAP)) /* * Do not schedule SG transactions until qtd has * URB_GIVEBACK_ASAP set */ return 0; spin_lock_irqsave(&hsotg->lock, flags); tr_type = dwc2_hcd_select_transactions(hsotg); if (tr_type != DWC2_TRANSACTION_NONE) dwc2_hcd_queue_transactions(hsotg, tr_type); spin_unlock_irqrestore(&hsotg->lock, flags); } return 0; } /* Must be called with interrupt disabled and spinlock held */ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb) { struct dwc2_qh *qh; struct dwc2_qtd *urb_qtd; urb_qtd = urb->qtd; if (!urb_qtd) { dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n"); return -EINVAL; } qh = urb_qtd->qh; if (!qh) { dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n"); return -EINVAL; } urb->priv = NULL; if (urb_qtd->in_process && qh->channel) { dwc2_dump_channel_info(hsotg, qh->channel); /* The QTD is in process (it has been assigned to a channel) */ if (hsotg->flags.b.port_connect_status) /* * If still connected (i.e. in host mode), halt the * channel so it can be used for other transfers. If * no longer connected, the host registers can't be * written to halt the channel since the core is in * device mode. */ dwc2_hc_halt(hsotg, qh->channel, DWC2_HC_XFER_URB_DEQUEUE); } /* * Free the QTD and clean up the associated QH. Leave the QH in the * schedule if it has any remaining QTDs. */ if (hsotg->core_params->dma_desc_enable <= 0) { u8 in_process = urb_qtd->in_process; dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); if (in_process) { dwc2_hcd_qh_deactivate(hsotg, qh, 0); qh->channel = NULL; } else if (list_empty(&qh->qtd_list)) { dwc2_hcd_qh_unlink(hsotg, qh); } } else { dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); } return 0; } /* Must NOT be called with interrupt disabled or spinlock held */ static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, struct usb_host_endpoint *ep, int retry) { struct dwc2_qtd *qtd, *qtd_tmp; struct dwc2_qh *qh; unsigned long flags; int rc; spin_lock_irqsave(&hsotg->lock, flags); qh = ep->hcpriv; if (!qh) { rc = -EINVAL; goto err; } while (!list_empty(&qh->qtd_list) && retry--) { if (retry == 0) { dev_err(hsotg->dev, "## timeout in dwc2_hcd_endpoint_disable() ##\n"); rc = -EBUSY; goto err; } spin_unlock_irqrestore(&hsotg->lock, flags); usleep_range(20000, 40000); spin_lock_irqsave(&hsotg->lock, flags); qh = ep->hcpriv; if (!qh) { rc = -EINVAL; goto err; } } dwc2_hcd_qh_unlink(hsotg, qh); /* Free each QTD in the QH's QTD list */ list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); ep->hcpriv = NULL; spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_hcd_qh_free(hsotg, qh); return 0; err: ep->hcpriv = NULL; spin_unlock_irqrestore(&hsotg->lock, flags); return rc; } /* Must be called with interrupt disabled and spinlock held */ static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, struct usb_host_endpoint *ep) { struct dwc2_qh *qh = ep->hcpriv; if (!qh) return -EINVAL; qh->data_toggle = DWC2_HC_PID_DATA0; return 0; } /* * Initializes dynamic portions of the DWC_otg HCD state * * Must be called with interrupt disabled and spinlock held */ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) { struct dwc2_host_chan *chan, *chan_tmp; int num_channels; int i; hsotg->flags.d32 = 0; hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; if (hsotg->core_params->uframe_sched > 0) { hsotg->available_host_channels = hsotg->core_params->host_channels; } else { hsotg->non_periodic_channels = 0; hsotg->periodic_channels = 0; } /* * Put all channels in the free channel list and clean up channel * states */ list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list, hc_list_entry) list_del_init(&chan->hc_list_entry); num_channels = hsotg->core_params->host_channels; for (i = 0; i < num_channels; i++) { chan = hsotg->hc_ptr_array[i]; list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); dwc2_hc_cleanup(hsotg, chan); } /* Initialize the DWC core for host mode operation */ dwc2_core_host_init(hsotg); } static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) { int hub_addr, hub_port; chan->do_split = 1; chan->xact_pos = qtd->isoc_split_pos; chan->complete_split = qtd->complete_split; dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); chan->hub_addr = (u8)hub_addr; chan->hub_port = (u8)hub_port; } static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, void *bufptr) { struct dwc2_hcd_urb *urb = qtd->urb; struct dwc2_hcd_iso_packet_desc *frame_desc; switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) { case USB_ENDPOINT_XFER_CONTROL: chan->ep_type = USB_ENDPOINT_XFER_CONTROL; switch (qtd->control_phase) { case DWC2_CONTROL_SETUP: dev_vdbg(hsotg->dev, " Control setup transaction\n"); chan->do_ping = 0; chan->ep_is_in = 0; chan->data_pid_start = DWC2_HC_PID_SETUP; if (hsotg->core_params->dma_enable > 0) chan->xfer_dma = urb->setup_dma; else chan->xfer_buf = urb->setup_packet; chan->xfer_len = 8; bufptr = NULL; break; case DWC2_CONTROL_DATA: dev_vdbg(hsotg->dev, " Control data transaction\n"); chan->data_pid_start = qtd->data_toggle; break; case DWC2_CONTROL_STATUS: /* * Direction is opposite of data direction or IN if no * data */ dev_vdbg(hsotg->dev, " Control status transaction\n"); if (urb->length == 0) chan->ep_is_in = 1; else chan->ep_is_in = dwc2_hcd_is_pipe_out(&urb->pipe_info); if (chan->ep_is_in) chan->do_ping = 0; chan->data_pid_start = DWC2_HC_PID_DATA1; chan->xfer_len = 0; if (hsotg->core_params->dma_enable > 0) chan->xfer_dma = hsotg->status_buf_dma; else chan->xfer_buf = hsotg->status_buf; bufptr = NULL; break; } break; case USB_ENDPOINT_XFER_BULK: chan->ep_type = USB_ENDPOINT_XFER_BULK; break; case USB_ENDPOINT_XFER_INT: chan->ep_type = USB_ENDPOINT_XFER_INT; break; case USB_ENDPOINT_XFER_ISOC: chan->ep_type = USB_ENDPOINT_XFER_ISOC; if (hsotg->core_params->dma_desc_enable > 0) break; frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; frame_desc->status = 0; if (hsotg->core_params->dma_enable > 0) { chan->xfer_dma = urb->dma; chan->xfer_dma += frame_desc->offset + qtd->isoc_split_offset; } else { chan->xfer_buf = urb->buf; chan->xfer_buf += frame_desc->offset + qtd->isoc_split_offset; } chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; /* For non-dword aligned buffers */ if (hsotg->core_params->dma_enable > 0 && (chan->xfer_dma & 0x3)) bufptr = (u8 *)urb->buf + frame_desc->offset + qtd->isoc_split_offset; else bufptr = NULL; if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { if (chan->xfer_len <= 188) chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; else chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN; } break; } return bufptr; } static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, struct dwc2_host_chan *chan, struct dwc2_hcd_urb *urb, void *bufptr) { u32 buf_size; struct urb *usb_urb; struct usb_hcd *hcd; if (!qh->dw_align_buf) { if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) buf_size = hsotg->core_params->max_transfer_size; else /* 3072 = 3 max-size Isoc packets */ buf_size = 3072; qh->dw_align_buf = dma_alloc_coherent(hsotg->dev, buf_size, &qh->dw_align_buf_dma, GFP_ATOMIC); if (!qh->dw_align_buf) return -ENOMEM; qh->dw_align_buf_size = buf_size; } if (chan->xfer_len) { dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); usb_urb = urb->priv; if (usb_urb) { if (usb_urb->transfer_flags & (URB_SETUP_MAP_SINGLE | URB_DMA_MAP_SG | URB_DMA_MAP_PAGE | URB_DMA_MAP_SINGLE)) { hcd = dwc2_hsotg_to_hcd(hsotg); usb_hcd_unmap_urb_for_dma(hcd, usb_urb); } if (!chan->ep_is_in) memcpy(qh->dw_align_buf, bufptr, chan->xfer_len); } else { dev_warn(hsotg->dev, "no URB in dwc2_urb\n"); } } chan->align_buf = qh->dw_align_buf_dma; return 0; } /** * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host * channel and initializes the host channel to perform the transactions. The * host channel is removed from the free list. * * @hsotg: The HCD state structure * @qh: Transactions from the first QTD for this QH are selected and assigned * to a free host channel */ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { struct dwc2_host_chan *chan; struct dwc2_hcd_urb *urb; struct dwc2_qtd *qtd; void *bufptr = NULL; if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); if (list_empty(&qh->qtd_list)) { dev_dbg(hsotg->dev, "No QTDs in QH list\n"); return -ENOMEM; } if (list_empty(&hsotg->free_hc_list)) { dev_dbg(hsotg->dev, "No free channel to assign\n"); return -ENOMEM; } chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan, hc_list_entry); /* Remove host channel from free list */ list_del_init(&chan->hc_list_entry); qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry); urb = qtd->urb; qh->channel = chan; qtd->in_process = 1; /* * Use usb_pipedevice to determine device address. This address is * 0 before the SET_ADDRESS command and the correct address afterward. */ chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); chan->speed = qh->dev_speed; chan->max_packet = dwc2_max_packet(qh->maxp); chan->xfer_started = 0; chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; chan->error_state = (qtd->error_count > 0); chan->halt_on_queue = 0; chan->halt_pending = 0; chan->requests = 0; /* * The following values may be modified in the transfer type section * below. The xfer_len value may be reduced when the transfer is * started to accommodate the max widths of the XferSize and PktCnt * fields in the HCTSIZn register. */ chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0); if (chan->ep_is_in) chan->do_ping = 0; else chan->do_ping = qh->ping_state; chan->data_pid_start = qh->data_toggle; chan->multi_count = 1; if (urb->actual_length > urb->length && !dwc2_hcd_is_pipe_in(&urb->pipe_info)) urb->actual_length = urb->length; if (hsotg->core_params->dma_enable > 0) { chan->xfer_dma = urb->dma + urb->actual_length; /* For non-dword aligned case */ if (hsotg->core_params->dma_desc_enable <= 0 && (chan->xfer_dma & 0x3)) bufptr = (u8 *)urb->buf + urb->actual_length; } else { chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; } chan->xfer_len = urb->length - urb->actual_length; chan->xfer_count = 0; /* Set the split attributes if required */ if (qh->do_split) dwc2_hc_init_split(hsotg, chan, qtd, urb); else chan->do_split = 0; /* Set the transfer attributes */ bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr); /* Non DWORD-aligned buffer case */ if (bufptr) { dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) { dev_err(hsotg->dev, "%s: Failed to allocate memory to handle non-dword aligned buffer\n", __func__); /* Add channel back to free list */ chan->align_buf = 0; chan->multi_count = 0; list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); qtd->in_process = 0; qh->channel = NULL; return -ENOMEM; } } else { chan->align_buf = 0; } if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* * This value may be modified when the transfer is started * to reflect the actual transfer length */ chan->multi_count = dwc2_hb_mult(qh->maxp); if (hsotg->core_params->dma_desc_enable > 0) chan->desc_list_addr = qh->desc_list_dma; dwc2_hc_init(hsotg, chan); chan->qh = qh; return 0; } /** * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer * schedule and assigns them to available host channels. Called from the HCD * interrupt handler functions. * * @hsotg: The HCD state structure * * Return: The types of new transactions that were assigned to host channels */ enum dwc2_transaction_type dwc2_hcd_select_transactions( struct dwc2_hsotg *hsotg) { enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE; struct list_head *qh_ptr; struct dwc2_qh *qh; int num_channels; #ifdef DWC2_DEBUG_SOF dev_vdbg(hsotg->dev, " Select Transactions\n"); #endif /* Process entries in the periodic ready list */ qh_ptr = hsotg->periodic_sched_ready.next; while (qh_ptr != &hsotg->periodic_sched_ready) { if (list_empty(&hsotg->free_hc_list)) break; if (hsotg->core_params->uframe_sched > 0) { if (hsotg->available_host_channels <= 1) break; hsotg->available_host_channels--; } qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); if (dwc2_assign_and_init_hc(hsotg, qh)) break; /* * Move the QH from the periodic ready schedule to the * periodic assigned schedule */ qh_ptr = qh_ptr->next; list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); ret_val = DWC2_TRANSACTION_PERIODIC; } /* * Process entries in the inactive portion of the non-periodic * schedule. Some free host channels may not be used if they are * reserved for periodic transfers. */ num_channels = hsotg->core_params->host_channels; qh_ptr = hsotg->non_periodic_sched_inactive.next; while (qh_ptr != &hsotg->non_periodic_sched_inactive) { if (hsotg->core_params->uframe_sched <= 0 && hsotg->non_periodic_channels >= num_channels - hsotg->periodic_channels) break; if (list_empty(&hsotg->free_hc_list)) break; qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); if (hsotg->core_params->uframe_sched > 0) { if (hsotg->available_host_channels < 1) break; hsotg->available_host_channels--; } if (dwc2_assign_and_init_hc(hsotg, qh)) break; /* * Move the QH from the non-periodic inactive schedule to the * non-periodic active schedule */ qh_ptr = qh_ptr->next; list_move(&qh->qh_list_entry, &hsotg->non_periodic_sched_active); if (ret_val == DWC2_TRANSACTION_NONE) ret_val = DWC2_TRANSACTION_NON_PERIODIC; else ret_val = DWC2_TRANSACTION_ALL; if (hsotg->core_params->uframe_sched <= 0) hsotg->non_periodic_channels++; } return ret_val; } /** * dwc2_queue_transaction() - Attempts to queue a single transaction request for * a host channel associated with either a periodic or non-periodic transfer * * @hsotg: The HCD state structure * @chan: Host channel descriptor associated with either a periodic or * non-periodic transfer * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO * for periodic transfers or the non-periodic Tx FIFO * for non-periodic transfers * * Return: 1 if a request is queued and more requests may be needed to * complete the transfer, 0 if no more requests are required for this * transfer, -1 if there is insufficient space in the Tx FIFO * * This function assumes that there is space available in the appropriate * request queue. For an OUT transfer or SETUP transaction in Slave mode, * it checks whether space is available in the appropriate Tx FIFO. * * Must be called with interrupt disabled and spinlock held */ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, u16 fifo_dwords_avail) { int retval = 0; if (hsotg->core_params->dma_enable > 0) { if (hsotg->core_params->dma_desc_enable > 0) { if (!chan->xfer_started || chan->ep_type == USB_ENDPOINT_XFER_ISOC) { dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); chan->qh->ping_state = 0; } } else if (!chan->xfer_started) { dwc2_hc_start_transfer(hsotg, chan); chan->qh->ping_state = 0; } } else if (chan->halt_pending) { /* Don't queue a request if the channel has been halted */ } else if (chan->halt_on_queue) { dwc2_hc_halt(hsotg, chan, chan->halt_status); } else if (chan->do_ping) { if (!chan->xfer_started) dwc2_hc_start_transfer(hsotg, chan); } else if (!chan->ep_is_in || chan->data_pid_start == DWC2_HC_PID_SETUP) { if ((fifo_dwords_avail * 4) >= chan->max_packet) { if (!chan->xfer_started) { dwc2_hc_start_transfer(hsotg, chan); retval = 1; } else { retval = dwc2_hc_continue_transfer(hsotg, chan); } } else { retval = -1; } } else { if (!chan->xfer_started) { dwc2_hc_start_transfer(hsotg, chan); retval = 1; } else { retval = dwc2_hc_continue_transfer(hsotg, chan); } } return retval; } /* * Processes periodic channels for the next frame and queues transactions for * these channels to the DWC_otg controller. After queueing transactions, the * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions * to queue as Periodic Tx FIFO or request queue space becomes available. * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. * * Must be called with interrupt disabled and spinlock held */ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) { struct list_head *qh_ptr; struct dwc2_qh *qh; u32 tx_status; u32 fspcavail; u32 gintmsk; int status; int no_queue_space = 0; int no_fifo_space = 0; u32 qspcavail; if (dbg_perio()) dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); tx_status = readl(hsotg->regs + HPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; if (dbg_perio()) { dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n", qspcavail); dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n", fspcavail); } qh_ptr = hsotg->periodic_sched_assigned.next; while (qh_ptr != &hsotg->periodic_sched_assigned) { tx_status = readl(hsotg->regs + HPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; if (qspcavail == 0) { no_queue_space = 1; break; } qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); if (!qh->channel) { qh_ptr = qh_ptr->next; continue; } /* Make sure EP's TT buffer is clean before queueing qtds */ if (qh->tt_buffer_dirty) { qh_ptr = qh_ptr->next; continue; } /* * Set a flag if we're queuing high-bandwidth in slave mode. * The flag prevents any halts to get into the request queue in * the middle of multiple high-bandwidth packets getting queued. */ if (hsotg->core_params->dma_enable <= 0 && qh->channel->multi_count > 1) hsotg->queuing_high_bandwidth = 1; fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); if (status < 0) { no_fifo_space = 1; break; } /* * In Slave mode, stay on the current transfer until there is * nothing more to do or the high-bandwidth request count is * reached. In DMA mode, only need to queue one request. The * controller automatically handles multiple packets for * high-bandwidth transfers. */ if (hsotg->core_params->dma_enable > 0 || status == 0 || qh->channel->requests == qh->channel->multi_count) { qh_ptr = qh_ptr->next; /* * Move the QH from the periodic assigned schedule to * the periodic queued schedule */ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_queued); /* done queuing high bandwidth */ hsotg->queuing_high_bandwidth = 0; } } if (hsotg->core_params->dma_enable <= 0) { tx_status = readl(hsotg->regs + HPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; if (dbg_perio()) { dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (after queue): %d\n", qspcavail); dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (after queue): %d\n", fspcavail); } if (!list_empty(&hsotg->periodic_sched_assigned) || no_queue_space || no_fifo_space) { /* * May need to queue more transactions as the request * queue or Tx FIFO empties. Enable the periodic Tx * FIFO empty interrupt. (Always use the half-empty * level to ensure that new requests are loaded as * soon as possible.) */ gintmsk = readl(hsotg->regs + GINTMSK); gintmsk |= GINTSTS_PTXFEMP; writel(gintmsk, hsotg->regs + GINTMSK); } else { /* * Disable the Tx FIFO empty interrupt since there are * no more transactions that need to be queued right * now. This function is called from interrupt * handlers to queue more transactions as transfer * states change. */ gintmsk = readl(hsotg->regs + GINTMSK); gintmsk &= ~GINTSTS_PTXFEMP; writel(gintmsk, hsotg->regs + GINTMSK); } } } /* * Processes active non-periodic channels and queues transactions for these * channels to the DWC_otg controller. After queueing transactions, the NP Tx * FIFO Empty interrupt is enabled if there are more transactions to queue as * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx * FIFO Empty interrupt is disabled. * * Must be called with interrupt disabled and spinlock held */ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) { struct list_head *orig_qh_ptr; struct dwc2_qh *qh; u32 tx_status; u32 qspcavail; u32 fspcavail; u32 gintmsk; int status; int no_queue_space = 0; int no_fifo_space = 0; int more_to_do = 0; dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n"); tx_status = readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n", qspcavail); dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n", fspcavail); /* * Keep track of the starting point. Skip over the start-of-list * entry. */ if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active) hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; orig_qh_ptr = hsotg->non_periodic_qh_ptr; /* * Process once through the active list or until no more space is * available in the request queue or the Tx FIFO */ do { tx_status = readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) { no_queue_space = 1; break; } qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh, qh_list_entry); if (!qh->channel) goto next; /* Make sure EP's TT buffer is clean before queueing qtds */ if (qh->tt_buffer_dirty) goto next; fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); if (status > 0) { more_to_do = 1; } else if (status < 0) { no_fifo_space = 1; break; } next: /* Advance to next QH, skipping start-of-list entry */ hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active) hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); if (hsotg->core_params->dma_enable <= 0) { tx_status = readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (after queue): %d\n", qspcavail); dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (after queue): %d\n", fspcavail); if (more_to_do || no_queue_space || no_fifo_space) { /* * May need to queue more transactions as the request * queue or Tx FIFO empties. Enable the non-periodic * Tx FIFO empty interrupt. (Always use the half-empty * level to ensure that new requests are loaded as * soon as possible.) */ gintmsk = readl(hsotg->regs + GINTMSK); gintmsk |= GINTSTS_NPTXFEMP; writel(gintmsk, hsotg->regs + GINTMSK); } else { /* * Disable the Tx FIFO empty interrupt since there are * no more transactions that need to be queued right * now. This function is called from interrupt * handlers to queue more transactions as transfer * states change. */ gintmsk = readl(hsotg->regs + GINTMSK); gintmsk &= ~GINTSTS_NPTXFEMP; writel(gintmsk, hsotg->regs + GINTMSK); } } } /** * dwc2_hcd_queue_transactions() - Processes the currently active host channels * and queues transactions for these channels to the DWC_otg controller. Called * from the HCD interrupt handler functions. * * @hsotg: The HCD state structure * @tr_type: The type(s) of transactions to queue (non-periodic, periodic, * or both) * * Must be called with interrupt disabled and spinlock held */ void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, enum dwc2_transaction_type tr_type) { #ifdef DWC2_DEBUG_SOF dev_vdbg(hsotg->dev, "Queue Transactions\n"); #endif /* Process host channels associated with periodic transfers */ if ((tr_type == DWC2_TRANSACTION_PERIODIC || tr_type == DWC2_TRANSACTION_ALL) && !list_empty(&hsotg->periodic_sched_assigned)) dwc2_process_periodic_channels(hsotg); /* Process host channels associated with non-periodic transfers */ if (tr_type == DWC2_TRANSACTION_NON_PERIODIC || tr_type == DWC2_TRANSACTION_ALL) { if (!list_empty(&hsotg->non_periodic_sched_active)) { dwc2_process_non_periodic_channels(hsotg); } else { /* * Ensure NP Tx FIFO empty interrupt is disabled when * there are no non-periodic transfers to process */ u32 gintmsk = readl(hsotg->regs + GINTMSK); gintmsk &= ~GINTSTS_NPTXFEMP; writel(gintmsk, hsotg->regs + GINTMSK); } } } static void dwc2_conn_id_status_change(struct work_struct *work) { struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, wf_otg); u32 count = 0; u32 gotgctl; dev_dbg(hsotg->dev, "%s()\n", __func__); gotgctl = readl(hsotg->regs + GOTGCTL); dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl); dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n", !!(gotgctl & GOTGCTL_CONID_B)); /* B-Device connector (Device Mode) */ if (gotgctl & GOTGCTL_CONID_B) { /* Wait for switch to device mode */ dev_dbg(hsotg->dev, "connId B\n"); while (!dwc2_is_device_mode(hsotg)) { dev_info(hsotg->dev, "Waiting for Peripheral Mode, Mode=%s\n", dwc2_is_host_mode(hsotg) ? "Host" : "Peripheral"); usleep_range(20000, 40000); if (++count > 250) break; } if (count > 250) dev_err(hsotg->dev, "Connection id status change timed out\n"); hsotg->op_state = OTG_STATE_B_PERIPHERAL; dwc2_core_init(hsotg, false, -1); dwc2_enable_global_interrupts(hsotg); } else { /* A-Device connector (Host Mode) */ dev_dbg(hsotg->dev, "connId A\n"); while (!dwc2_is_host_mode(hsotg)) { dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n", dwc2_is_host_mode(hsotg) ? "Host" : "Peripheral"); usleep_range(20000, 40000); if (++count > 250) break; } if (count > 250) dev_err(hsotg->dev, "Connection id status change timed out\n"); hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ dwc2_core_init(hsotg, false, -1); dwc2_enable_global_interrupts(hsotg); dwc2_hcd_start(hsotg); } } static void dwc2_wakeup_detected(unsigned long data) { struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data; u32 hprt0; dev_dbg(hsotg->dev, "%s()\n", __func__); /* * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms * so that OPT tests pass with all PHYs.) */ hprt0 = dwc2_read_hprt0(hsotg); dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0); hprt0 &= ~HPRT0_RES; writel(hprt0, hsotg->regs + HPRT0); dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", readl(hsotg->regs + HPRT0)); dwc2_hcd_rem_wakeup(hsotg); /* Change to L0 state */ hsotg->lx_state = DWC2_L0; } static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) { struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); return hcd->self.b_hnp_enable; } /* Must NOT be called with interrupt disabled or spinlock held */ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) { unsigned long flags; u32 hprt0; u32 pcgctl; u32 gotgctl; dev_dbg(hsotg->dev, "%s()\n", __func__); spin_lock_irqsave(&hsotg->lock, flags); if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) { gotgctl = readl(hsotg->regs + GOTGCTL); gotgctl |= GOTGCTL_HSTSETHNPEN; writel(gotgctl, hsotg->regs + GOTGCTL); hsotg->op_state = OTG_STATE_A_SUSPEND; } hprt0 = dwc2_read_hprt0(hsotg); hprt0 |= HPRT0_SUSP; writel(hprt0, hsotg->regs + HPRT0); /* Update lx_state */ hsotg->lx_state = DWC2_L2; /* Suspend the Phy Clock */ pcgctl = readl(hsotg->regs + PCGCTL); pcgctl |= PCGCTL_STOPPCLK; writel(pcgctl, hsotg->regs + PCGCTL); udelay(10); /* For HNP the bus must be suspended for at least 200ms */ if (dwc2_host_is_b_hnp_enabled(hsotg)) { pcgctl = readl(hsotg->regs + PCGCTL); pcgctl &= ~PCGCTL_STOPPCLK; writel(pcgctl, hsotg->regs + PCGCTL); spin_unlock_irqrestore(&hsotg->lock, flags); usleep_range(200000, 250000); } else { spin_unlock_irqrestore(&hsotg->lock, flags); } } /* Handles hub class-specific requests */ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, u16 wvalue, u16 windex, char *buf, u16 wlength) { struct usb_hub_descriptor *hub_desc; int retval = 0; u32 hprt0; u32 port_status; u32 speed; u32 pcgctl; switch (typereq) { case ClearHubFeature: dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue); switch (wvalue) { case C_HUB_LOCAL_POWER: case C_HUB_OVER_CURRENT: /* Nothing required here */ break; default: retval = -EINVAL; dev_err(hsotg->dev, "ClearHubFeature request %1xh unknown\n", wvalue); } break; case ClearPortFeature: if (wvalue != USB_PORT_FEAT_L1) if (!windex || windex > 1) goto error; switch (wvalue) { case USB_PORT_FEAT_ENABLE: dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); hprt0 = dwc2_read_hprt0(hsotg); hprt0 |= HPRT0_ENA; writel(hprt0, hsotg->regs + HPRT0); break; case USB_PORT_FEAT_SUSPEND: dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); writel(0, hsotg->regs + PCGCTL); usleep_range(20000, 40000); hprt0 = dwc2_read_hprt0(hsotg); hprt0 |= HPRT0_RES; writel(hprt0, hsotg->regs + HPRT0); hprt0 &= ~HPRT0_SUSP; usleep_range(100000, 150000); hprt0 &= ~HPRT0_RES; writel(hprt0, hsotg->regs + HPRT0); break; case USB_PORT_FEAT_POWER: dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_POWER\n"); hprt0 = dwc2_read_hprt0(hsotg); hprt0 &= ~HPRT0_PWR; writel(hprt0, hsotg->regs + HPRT0); break; case USB_PORT_FEAT_INDICATOR: dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); /* Port indicator not supported */ break; case USB_PORT_FEAT_C_CONNECTION: /* * Clears driver's internal Connect Status Change flag */ dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); hsotg->flags.b.port_connect_status_change = 0; break; case USB_PORT_FEAT_C_RESET: /* Clears driver's internal Port Reset Change flag */ dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); hsotg->flags.b.port_reset_change = 0; break; case USB_PORT_FEAT_C_ENABLE: /* * Clears the driver's internal Port Enable/Disable * Change flag */ dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); hsotg->flags.b.port_enable_change = 0; break; case USB_PORT_FEAT_C_SUSPEND: /* * Clears the driver's internal Port Suspend Change * flag, which is set when resume signaling on the host * port is complete */ dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); hsotg->flags.b.port_suspend_change = 0; break; case USB_PORT_FEAT_C_PORT_L1: dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n"); hsotg->flags.b.port_l1_change = 0; break; case USB_PORT_FEAT_C_OVER_CURRENT: dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); hsotg->flags.b.port_over_current_change = 0; break; default: retval = -EINVAL; dev_err(hsotg->dev, "ClearPortFeature request %1xh unknown or unsupported\n", wvalue); } break; case GetHubDescriptor: dev_dbg(hsotg->dev, "GetHubDescriptor\n"); hub_desc = (struct usb_hub_descriptor *)buf; hub_desc->bDescLength = 9; hub_desc->bDescriptorType = 0x29; hub_desc->bNbrPorts = 1; hub_desc->wHubCharacteristics = cpu_to_le16(0x08); hub_desc->bPwrOn2PwrGood = 1; hub_desc->bHubContrCurrent = 0; hub_desc->u.hs.DeviceRemovable[0] = 0; hub_desc->u.hs.DeviceRemovable[1] = 0xff; break; case GetHubStatus: dev_dbg(hsotg->dev, "GetHubStatus\n"); memset(buf, 0, 4); break; case GetPortStatus: dev_vdbg(hsotg->dev, "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex, hsotg->flags.d32); if (!windex || windex > 1) goto error; port_status = 0; if (hsotg->flags.b.port_connect_status_change) port_status |= USB_PORT_STAT_C_CONNECTION << 16; if (hsotg->flags.b.port_enable_change) port_status |= USB_PORT_STAT_C_ENABLE << 16; if (hsotg->flags.b.port_suspend_change) port_status |= USB_PORT_STAT_C_SUSPEND << 16; if (hsotg->flags.b.port_l1_change) port_status |= USB_PORT_STAT_C_L1 << 16; if (hsotg->flags.b.port_reset_change) port_status |= USB_PORT_STAT_C_RESET << 16; if (hsotg->flags.b.port_over_current_change) { dev_warn(hsotg->dev, "Overcurrent change detected\n"); port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; } if (!hsotg->flags.b.port_connect_status) { /* * The port is disconnected, which means the core is * either in device mode or it soon will be. Just * return 0's for the remainder of the port status * since the port register can't be read if the core * is in device mode. */ *(__le32 *)buf = cpu_to_le32(port_status); break; } hprt0 = readl(hsotg->regs + HPRT0); dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0); if (hprt0 & HPRT0_CONNSTS) port_status |= USB_PORT_STAT_CONNECTION; if (hprt0 & HPRT0_ENA) port_status |= USB_PORT_STAT_ENABLE; if (hprt0 & HPRT0_SUSP) port_status |= USB_PORT_STAT_SUSPEND; if (hprt0 & HPRT0_OVRCURRACT) port_status |= USB_PORT_STAT_OVERCURRENT; if (hprt0 & HPRT0_RST) port_status |= USB_PORT_STAT_RESET; if (hprt0 & HPRT0_PWR) port_status |= USB_PORT_STAT_POWER; speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; if (speed == HPRT0_SPD_HIGH_SPEED) port_status |= USB_PORT_STAT_HIGH_SPEED; else if (speed == HPRT0_SPD_LOW_SPEED) port_status |= USB_PORT_STAT_LOW_SPEED; if (hprt0 & HPRT0_TSTCTL_MASK) port_status |= USB_PORT_STAT_TEST; /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status); *(__le32 *)buf = cpu_to_le32(port_status); break; case SetHubFeature: dev_dbg(hsotg->dev, "SetHubFeature\n"); /* No HUB features supported */ break; case SetPortFeature: dev_dbg(hsotg->dev, "SetPortFeature\n"); if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) goto error; if (!hsotg->flags.b.port_connect_status) { /* * The port is disconnected, which means the core is * either in device mode or it soon will be. Just * return without doing anything since the port * register can't be written if the core is in device * mode. */ break; } switch (wvalue) { case USB_PORT_FEAT_SUSPEND: dev_dbg(hsotg->dev, "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); if (windex != hsotg->otg_port) goto error; dwc2_port_suspend(hsotg, windex); break; case USB_PORT_FEAT_POWER: dev_dbg(hsotg->dev, "SetPortFeature - USB_PORT_FEAT_POWER\n"); hprt0 = dwc2_read_hprt0(hsotg); hprt0 |= HPRT0_PWR; writel(hprt0, hsotg->regs + HPRT0); break; case USB_PORT_FEAT_RESET: hprt0 = dwc2_read_hprt0(hsotg); dev_dbg(hsotg->dev, "SetPortFeature - USB_PORT_FEAT_RESET\n"); pcgctl = readl(hsotg->regs + PCGCTL); pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK); writel(pcgctl, hsotg->regs + PCGCTL); /* ??? Original driver does this */ writel(0, hsotg->regs + PCGCTL); hprt0 = dwc2_read_hprt0(hsotg); /* Clear suspend bit if resetting from suspend state */ hprt0 &= ~HPRT0_SUSP; /* * When B-Host the Port reset bit is set in the Start * HCD Callback function, so that the reset is started * within 1ms of the HNP success interrupt */ if (!dwc2_hcd_is_b_host(hsotg)) { hprt0 |= HPRT0_PWR | HPRT0_RST; dev_dbg(hsotg->dev, "In host mode, hprt0=%08x\n", hprt0); writel(hprt0, hsotg->regs + HPRT0); } /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ usleep_range(50000, 70000); hprt0 &= ~HPRT0_RST; writel(hprt0, hsotg->regs + HPRT0); hsotg->lx_state = DWC2_L0; /* Now back to On state */ break; case USB_PORT_FEAT_INDICATOR: dev_dbg(hsotg->dev, "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); /* Not supported */ break; default: retval = -EINVAL; dev_err(hsotg->dev, "SetPortFeature %1xh unknown or unsupported\n", wvalue); break; } break; default: error: retval = -EINVAL; dev_dbg(hsotg->dev, "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n", typereq, windex, wvalue); break; } return retval; } static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port) { int retval; if (port != 1) return -EINVAL; retval = (hsotg->flags.b.port_connect_status_change || hsotg->flags.b.port_reset_change || hsotg->flags.b.port_enable_change || hsotg->flags.b.port_suspend_change || hsotg->flags.b.port_over_current_change); if (retval) { dev_dbg(hsotg->dev, "DWC OTG HCD HUB STATUS DATA: Root port status changed\n"); dev_dbg(hsotg->dev, " port_connect_status_change: %d\n", hsotg->flags.b.port_connect_status_change); dev_dbg(hsotg->dev, " port_reset_change: %d\n", hsotg->flags.b.port_reset_change); dev_dbg(hsotg->dev, " port_enable_change: %d\n", hsotg->flags.b.port_enable_change); dev_dbg(hsotg->dev, " port_suspend_change: %d\n", hsotg->flags.b.port_suspend_change); dev_dbg(hsotg->dev, " port_over_current_change: %d\n", hsotg->flags.b.port_over_current_change); } return retval; } int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) { u32 hfnum = readl(hsotg->regs + HFNUM); #ifdef DWC2_DEBUG_SOF dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n", (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT); #endif return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; } int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) { return hsotg->op_state == OTG_STATE_B_HOST; } static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, int iso_desc_count, gfp_t mem_flags) { struct dwc2_hcd_urb *urb; u32 size = sizeof(*urb) + iso_desc_count * sizeof(struct dwc2_hcd_iso_packet_desc); urb = kzalloc(size, mem_flags); if (urb) urb->packet_count = iso_desc_count; return urb; } static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, u8 dev_addr, u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps) { if (dbg_perio() || ep_type == USB_ENDPOINT_XFER_BULK || ep_type == USB_ENDPOINT_XFER_CONTROL) dev_vdbg(hsotg->dev, "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n", dev_addr, ep_num, ep_dir, ep_type, mps); urb->pipe_info.dev_addr = dev_addr; urb->pipe_info.ep_num = ep_num; urb->pipe_info.pipe_type = ep_type; urb->pipe_info.pipe_dir = ep_dir; urb->pipe_info.mps = mps; } /* * NOTE: This function will be removed once the peripheral controller code * is integrated and the driver is stable */ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) { #ifdef DEBUG struct dwc2_host_chan *chan; struct dwc2_hcd_urb *urb; struct dwc2_qtd *qtd; int num_channels; u32 np_tx_status; u32 p_tx_status; int i; num_channels = hsotg->core_params->host_channels; dev_dbg(hsotg->dev, "\n"); dev_dbg(hsotg->dev, "************************************************************\n"); dev_dbg(hsotg->dev, "HCD State:\n"); dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels); for (i = 0; i < num_channels; i++) { chan = hsotg->hc_ptr_array[i]; dev_dbg(hsotg->dev, " Channel %d:\n", i); dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", chan->dev_addr, chan->ep_num, chan->ep_is_in); dev_dbg(hsotg->dev, " speed: %d\n", chan->speed); dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type); dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet); dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start); dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count); dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started); dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf); dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", (unsigned long)chan->xfer_dma); dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len); dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count); dev_dbg(hsotg->dev, " halt_on_queue: %d\n", chan->halt_on_queue); dev_dbg(hsotg->dev, " halt_pending: %d\n", chan->halt_pending); dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status); dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split); dev_dbg(hsotg->dev, " complete_split: %d\n", chan->complete_split); dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr); dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port); dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos); dev_dbg(hsotg->dev, " requests: %d\n", chan->requests); dev_dbg(hsotg->dev, " qh: %p\n", chan->qh); if (chan->xfer_started) { u32 hfnum, hcchar, hctsiz, hcint, hcintmsk; hfnum = readl(hsotg->regs + HFNUM); hcchar = readl(hsotg->regs + HCCHAR(i)); hctsiz = readl(hsotg->regs + HCTSIZ(i)); hcint = readl(hsotg->regs + HCINT(i)); hcintmsk = readl(hsotg->regs + HCINTMSK(i)); dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum); dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar); dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz); dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint); dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk); } if (!(chan->xfer_started && chan->qh)) continue; list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) { if (!qtd->in_process) break; urb = qtd->urb; dev_dbg(hsotg->dev, " URB Info:\n"); dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n", qtd, urb); if (urb) { dev_dbg(hsotg->dev, " Dev: %d, EP: %d %s\n", dwc2_hcd_get_dev_addr(&urb->pipe_info), dwc2_hcd_get_ep_num(&urb->pipe_info), dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); dev_dbg(hsotg->dev, " Max packet size: %d\n", dwc2_hcd_get_mps(&urb->pipe_info)); dev_dbg(hsotg->dev, " transfer_buffer: %p\n", urb->buf); dev_dbg(hsotg->dev, " transfer_dma: %08lx\n", (unsigned long)urb->dma); dev_dbg(hsotg->dev, " transfer_buffer_length: %d\n", urb->length); dev_dbg(hsotg->dev, " actual_length: %d\n", urb->actual_length); } } } dev_dbg(hsotg->dev, " non_periodic_channels: %d\n", hsotg->non_periodic_channels); dev_dbg(hsotg->dev, " periodic_channels: %d\n", hsotg->periodic_channels); dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs); np_tx_status = readl(hsotg->regs + GNPTXSTS); dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n", (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n", (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); p_tx_status = readl(hsotg->regs + HPTXSTS); dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n", (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n", (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); dwc2_hcd_dump_frrem(hsotg); dwc2_dump_global_registers(hsotg); dwc2_dump_host_registers(hsotg); dev_dbg(hsotg->dev, "************************************************************\n"); dev_dbg(hsotg->dev, "\n"); #endif } /* * NOTE: This function will be removed once the peripheral controller code * is integrated and the driver is stable */ void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg) { #ifdef DWC2_DUMP_FRREM dev_dbg(hsotg->dev, "Frame remaining at SOF:\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->frrem_samples, hsotg->frrem_accum, hsotg->frrem_samples > 0 ? hsotg->frrem_accum / hsotg->frrem_samples : 0); dev_dbg(hsotg->dev, "\n"); dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 7):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_7_samples, hsotg->hfnum_7_frrem_accum, hsotg->hfnum_7_samples > 0 ? hsotg->hfnum_7_frrem_accum / hsotg->hfnum_7_samples : 0); dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 0):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_0_samples, hsotg->hfnum_0_frrem_accum, hsotg->hfnum_0_samples > 0 ? hsotg->hfnum_0_frrem_accum / hsotg->hfnum_0_samples : 0); dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 1-6):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_other_samples, hsotg->hfnum_other_frrem_accum, hsotg->hfnum_other_samples > 0 ? hsotg->hfnum_other_frrem_accum / hsotg->hfnum_other_samples : 0); dev_dbg(hsotg->dev, "\n"); dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 7):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_7_samples_a, hsotg->hfnum_7_frrem_accum_a, hsotg->hfnum_7_samples_a > 0 ? hsotg->hfnum_7_frrem_accum_a / hsotg->hfnum_7_samples_a : 0); dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 0):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_0_samples_a, hsotg->hfnum_0_frrem_accum_a, hsotg->hfnum_0_samples_a > 0 ? hsotg->hfnum_0_frrem_accum_a / hsotg->hfnum_0_samples_a : 0); dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 1-6):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_other_samples_a, hsotg->hfnum_other_frrem_accum_a, hsotg->hfnum_other_samples_a > 0 ? hsotg->hfnum_other_frrem_accum_a / hsotg->hfnum_other_samples_a : 0); dev_dbg(hsotg->dev, "\n"); dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 7):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_7_samples_b, hsotg->hfnum_7_frrem_accum_b, hsotg->hfnum_7_samples_b > 0 ? hsotg->hfnum_7_frrem_accum_b / hsotg->hfnum_7_samples_b : 0); dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 0):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_0_samples_b, hsotg->hfnum_0_frrem_accum_b, (hsotg->hfnum_0_samples_b > 0) ? hsotg->hfnum_0_frrem_accum_b / hsotg->hfnum_0_samples_b : 0); dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 1-6):\n"); dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", hsotg->hfnum_other_samples_b, hsotg->hfnum_other_frrem_accum_b, (hsotg->hfnum_other_samples_b > 0) ? hsotg->hfnum_other_frrem_accum_b / hsotg->hfnum_other_samples_b : 0); #endif } struct wrapper_priv_data { struct dwc2_hsotg *hsotg; }; /* Gets the dwc2_hsotg from a usb_hcd */ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd) { struct wrapper_priv_data *p; p = (struct wrapper_priv_data *) &hcd->hcd_priv; return p->hsotg; } static int _dwc2_hcd_start(struct usb_hcd *hcd); void dwc2_host_start(struct dwc2_hsotg *hsotg) { struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); _dwc2_hcd_start(hcd); } void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) { struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); hcd->self.is_b_host = 0; } void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, int *hub_port) { struct urb *urb = context; if (urb->dev->tt) *hub_addr = urb->dev->tt->hub->devnum; else *hub_addr = 0; *hub_port = urb->dev->ttport; } int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) { struct urb *urb = context; return urb->dev->speed; } static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw, struct urb *urb) { struct usb_bus *bus = hcd_to_bus(hcd); if (urb->interval) bus->bandwidth_allocated += bw / urb->interval; if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) bus->bandwidth_isoc_reqs++; else bus->bandwidth_int_reqs++; } static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw, struct urb *urb) { struct usb_bus *bus = hcd_to_bus(hcd); if (urb->interval) bus->bandwidth_allocated -= bw / urb->interval; if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) bus->bandwidth_isoc_reqs--; else bus->bandwidth_int_reqs--; } /* * Sets the final status of an URB and returns it to the upper layer. Any * required cleanup of the URB is performed. * * Must be called with interrupt disabled and spinlock held */ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, int status) { struct urb *urb; int i; if (!qtd) { dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); return; } if (!qtd->urb) { dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); return; } urb = qtd->urb->priv; if (!urb) { dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); return; } urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb); if (dbg_urb(urb)) dev_vdbg(hsotg->dev, "%s: urb %p device %d ep %d-%s status %d actual %d\n", __func__, urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "IN" : "OUT", status, urb->actual_length); if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) { for (i = 0; i < urb->number_of_packets; i++) dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n", i, urb->iso_frame_desc[i].status); } if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb); for (i = 0; i < urb->number_of_packets; ++i) { urb->iso_frame_desc[i].actual_length = dwc2_hcd_urb_get_iso_desc_actual_length( qtd->urb, i); urb->iso_frame_desc[i].status = dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i); } } urb->status = status; if (!status) { if ((urb->transfer_flags & URB_SHORT_NOT_OK) && urb->actual_length < urb->transfer_buffer_length) urb->status = -EREMOTEIO; } if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { struct usb_host_endpoint *ep = urb->ep; if (ep) dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg), dwc2_hcd_get_ep_bandwidth(hsotg, ep), urb); } usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb); urb->hcpriv = NULL; kfree(qtd->urb); qtd->urb = NULL; spin_unlock(&hsotg->lock); usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); spin_lock(&hsotg->lock); } /* * Work queue function for starting the HCD when A-Cable is connected */ static void dwc2_hcd_start_func(struct work_struct *work) { struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, start_work.work); dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg); dwc2_host_start(hsotg); } /* * Reset work queue function */ static void dwc2_hcd_reset_func(struct work_struct *work) { struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, reset_work.work); u32 hprt0; dev_dbg(hsotg->dev, "USB RESET function called\n"); hprt0 = dwc2_read_hprt0(hsotg); hprt0 &= ~HPRT0_RST; writel(hprt0, hsotg->regs + HPRT0); hsotg->flags.b.port_reset_change = 1; } /* * ========================================================================= * Linux HC Driver Functions * ========================================================================= */ /* * Initializes the DWC_otg controller and its root hub and prepares it for host * mode operation. Activates the root port. Returns 0 on success and a negative * error code on failure. */ static int _dwc2_hcd_start(struct usb_hcd *hcd) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); struct usb_bus *bus = hcd_to_bus(hcd); unsigned long flags; dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); spin_lock_irqsave(&hsotg->lock, flags); hcd->state = HC_STATE_RUNNING; if (dwc2_is_device_mode(hsotg)) { spin_unlock_irqrestore(&hsotg->lock, flags); return 0; /* why 0 ?? */ } dwc2_hcd_reinit(hsotg); /* Initialize and connect root hub if one is not already attached */ if (bus->root_hub) { dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n"); /* Inform the HUB driver to resume */ usb_hcd_resume_root_hub(hcd); } spin_unlock_irqrestore(&hsotg->lock, flags); return 0; } /* * Halts the DWC_otg host mode operations in a clean manner. USB transfers are * stopped. */ static void _dwc2_hcd_stop(struct usb_hcd *hcd) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); unsigned long flags; spin_lock_irqsave(&hsotg->lock, flags); dwc2_hcd_stop(hsotg); spin_unlock_irqrestore(&hsotg->lock, flags); usleep_range(1000, 3000); } /* Returns the current frame number */ static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); return dwc2_hcd_get_frame_number(hsotg); } static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, char *fn_name) { #ifdef VERBOSE_DEBUG struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); char *pipetype; char *speed; dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); dev_vdbg(hsotg->dev, " Device address: %d\n", usb_pipedevice(urb->pipe)); dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "IN" : "OUT"); switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: pipetype = "CONTROL"; break; case PIPE_BULK: pipetype = "BULK"; break; case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; default: pipetype = "UNKNOWN"; break; } dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype, usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ? "IN" : "OUT"); switch (urb->dev->speed) { case USB_SPEED_HIGH: speed = "HIGH"; break; case USB_SPEED_FULL: speed = "FULL"; break; case USB_SPEED_LOW: speed = "LOW"; break; default: speed = "UNKNOWN"; break; } dev_vdbg(hsotg->dev, " Speed: %s\n", speed); dev_vdbg(hsotg->dev, " Max packet size: %d\n", usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); dev_vdbg(hsotg->dev, " Data buffer length: %d\n", urb->transfer_buffer_length); dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", urb->transfer_buffer, (unsigned long)urb->transfer_dma); dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n", urb->setup_packet, (unsigned long)urb->setup_dma); dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval); if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; for (i = 0; i < urb->number_of_packets; i++) { dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i); dev_vdbg(hsotg->dev, " offset: %d, length %d\n", urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].length); } } #endif } /* * Starts processing a USB transfer request specified by a USB Request Block * (URB). mem_flags indicates the type of memory allocation to use while * processing this URB. */ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); struct usb_host_endpoint *ep = urb->ep; struct dwc2_hcd_urb *dwc2_urb; int i; int retval; int alloc_bandwidth = 0; u8 ep_type = 0; u32 tflags = 0; void *buf; unsigned long flags; if (dbg_urb(urb)) { dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); dwc2_dump_urb_info(hcd, urb, "urb_enqueue"); } if (ep == NULL) return -EINVAL; if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { spin_lock_irqsave(&hsotg->lock, flags); if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep)) alloc_bandwidth = 1; spin_unlock_irqrestore(&hsotg->lock, flags); } switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: ep_type = USB_ENDPOINT_XFER_CONTROL; break; case PIPE_ISOCHRONOUS: ep_type = USB_ENDPOINT_XFER_ISOC; break; case PIPE_BULK: ep_type = USB_ENDPOINT_XFER_BULK; break; case PIPE_INTERRUPT: ep_type = USB_ENDPOINT_XFER_INT; break; default: dev_warn(hsotg->dev, "Wrong ep type\n"); } dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets, mem_flags); if (!dwc2_urb) return -ENOMEM; dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ep_type, usb_pipein(urb->pipe), usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe)))); buf = urb->transfer_buffer; if (hcd->self.uses_dma) { if (!buf && (urb->transfer_dma & 3)) { dev_err(hsotg->dev, "%s: unaligned transfer with no transfer_buffer", __func__); retval = -EINVAL; goto fail1; } } if (!(urb->transfer_flags & URB_NO_INTERRUPT)) tflags |= URB_GIVEBACK_ASAP; if (urb->transfer_flags & URB_ZERO_PACKET) tflags |= URB_SEND_ZERO_PACKET; dwc2_urb->priv = urb; dwc2_urb->buf = buf; dwc2_urb->dma = urb->transfer_dma; dwc2_urb->length = urb->transfer_buffer_length; dwc2_urb->setup_packet = urb->setup_packet; dwc2_urb->setup_dma = urb->setup_dma; dwc2_urb->flags = tflags; dwc2_urb->interval = urb->interval; dwc2_urb->status = -EINPROGRESS; for (i = 0; i < urb->number_of_packets; ++i) dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].length); urb->hcpriv = dwc2_urb; spin_lock_irqsave(&hsotg->lock, flags); retval = usb_hcd_link_urb_to_ep(hcd, urb); spin_unlock_irqrestore(&hsotg->lock, flags); if (retval) goto fail1; retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); if (retval) goto fail2; if (alloc_bandwidth) { spin_lock_irqsave(&hsotg->lock, flags); dwc2_allocate_bus_bandwidth(hcd, dwc2_hcd_get_ep_bandwidth(hsotg, ep), urb); spin_unlock_irqrestore(&hsotg->lock, flags); } return 0; fail2: spin_lock_irqsave(&hsotg->lock, flags); dwc2_urb->priv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&hsotg->lock, flags); fail1: urb->hcpriv = NULL; kfree(dwc2_urb); return retval; } /* * Aborts/cancels a USB transfer request. Always returns 0 to indicate success. */ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); int rc; unsigned long flags; dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n"); dwc2_dump_urb_info(hcd, urb, "urb_dequeue"); spin_lock_irqsave(&hsotg->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (rc) goto out; if (!urb->hcpriv) { dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n"); goto out; } rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv); usb_hcd_unlink_urb_from_ep(hcd, urb); kfree(urb->hcpriv); urb->hcpriv = NULL; /* Higher layer software sets URB status */ spin_unlock(&hsotg->lock); usb_hcd_giveback_urb(hcd, urb, status); spin_lock(&hsotg->lock); dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n"); dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status); out: spin_unlock_irqrestore(&hsotg->lock, flags); return rc; } /* * Frees resources in the DWC_otg controller related to a given endpoint. Also * clears state in the HCD related to the endpoint. Any URBs for the endpoint * must already be dequeued. */ static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); dev_dbg(hsotg->dev, "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n", ep->desc.bEndpointAddress, ep->hcpriv); dwc2_hcd_endpoint_disable(hsotg, ep, 250); } /* * Resets endpoint specific parameter values, in current version used to reset * the data toggle (as a WA). This function can be called from usb_clear_halt * routine. */ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); unsigned long flags; dev_dbg(hsotg->dev, "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", ep->desc.bEndpointAddress); spin_lock_irqsave(&hsotg->lock, flags); dwc2_hcd_endpoint_reset(hsotg, ep); spin_unlock_irqrestore(&hsotg->lock, flags); } /* * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid * interrupt. * * This function is called by the USB core when an interrupt occurs */ static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); return dwc2_handle_hcd_intr(hsotg); } /* * Creates Status Change bitmap for the root hub and root port. The bitmap is * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 * is the status change indicator for the single root port. Returns 1 if either * change indicator is 1, otherwise returns 0. */ static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1; return buf[0] != 0; } /* Handles hub class-specific requests */ static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue, u16 windex, char *buf, u16 wlength) { int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq, wvalue, windex, buf, wlength); return retval; } /* Handles hub TT buffer clear completions */ static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); struct dwc2_qh *qh; unsigned long flags; qh = ep->hcpriv; if (!qh) return; spin_lock_irqsave(&hsotg->lock, flags); qh->tt_buffer_dirty = 0; if (hsotg->flags.b.port_connect_status) dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL); spin_unlock_irqrestore(&hsotg->lock, flags); } static struct hc_driver dwc2_hc_driver = { .description = "dwc2_hsotg", .product_desc = "DWC OTG Controller", .hcd_priv_size = sizeof(struct wrapper_priv_data), .irq = _dwc2_hcd_irq, .flags = HCD_MEMORY | HCD_USB2, .start = _dwc2_hcd_start, .stop = _dwc2_hcd_stop, .urb_enqueue = _dwc2_hcd_urb_enqueue, .urb_dequeue = _dwc2_hcd_urb_dequeue, .endpoint_disable = _dwc2_hcd_endpoint_disable, .endpoint_reset = _dwc2_hcd_endpoint_reset, .get_frame_number = _dwc2_hcd_get_frame_number, .hub_status_data = _dwc2_hcd_hub_status_data, .hub_control = _dwc2_hcd_hub_control, .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete, }; /* * Frees secondary storage associated with the dwc2_hsotg structure contained * in the struct usb_hcd field */ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) { u32 ahbcfg; u32 dctl; int i; dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n"); /* Free memory for QH/QTD lists */ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive); dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active); dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive); dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready); dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned); dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued); /* Free memory for the host channels */ for (i = 0; i < MAX_EPS_CHANNELS; i++) { struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; if (chan != NULL) { dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n", i, chan); hsotg->hc_ptr_array[i] = NULL; kfree(chan); } } if (hsotg->core_params->dma_enable > 0) { if (hsotg->status_buf) { dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, hsotg->status_buf, hsotg->status_buf_dma); hsotg->status_buf = NULL; } } else { kfree(hsotg->status_buf); hsotg->status_buf = NULL; } ahbcfg = readl(hsotg->regs + GAHBCFG); /* Disable all interrupts */ ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; writel(ahbcfg, hsotg->regs + GAHBCFG); writel(0, hsotg->regs + GINTMSK); if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) { dctl = readl(hsotg->regs + DCTL); dctl |= DCTL_SFTDISCON; writel(dctl, hsotg->regs + DCTL); } if (hsotg->wq_otg) { if (!cancel_work_sync(&hsotg->wf_otg)) flush_workqueue(hsotg->wq_otg); destroy_workqueue(hsotg->wq_otg); } kfree(hsotg->core_params); hsotg->core_params = NULL; del_timer(&hsotg->wkp_timer); } static void dwc2_hcd_release(struct dwc2_hsotg *hsotg) { /* Turn off all host-specific interrupts */ dwc2_disable_host_interrupts(hsotg); dwc2_hcd_free(hsotg); } /* * Sets all parameters to the given value. * * Assumes that the dwc2_core_params struct contains only integers. */ void dwc2_set_all_params(struct dwc2_core_params *params, int value) { int *p = (int *)params; size_t size = sizeof(*params) / sizeof(*p); int i; for (i = 0; i < size; i++) p[i] = value; } EXPORT_SYMBOL_GPL(dwc2_set_all_params); /* * Initializes the HCD. This function allocates memory for and initializes the * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the * USB bus with the core and calls the hc_driver->start() function. It returns * a negative error on failure. */ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq, const struct dwc2_core_params *params) { struct usb_hcd *hcd; struct dwc2_host_chan *channel; u32 hcfg; int i, num_channels; int retval; dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n"); /* Detect config values from hardware */ retval = dwc2_get_hwparams(hsotg); if (retval) return retval; retval = -ENOMEM; hcfg = readl(hsotg->regs + HCFG); dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg); #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) * FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); if (!hsotg->frame_num_array) goto error1; hsotg->last_frame_num_array = kzalloc( sizeof(*hsotg->last_frame_num_array) * FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); if (!hsotg->last_frame_num_array) goto error1; hsotg->last_frame_num = HFNUM_MAX_FRNUM; #endif hsotg->core_params = kzalloc(sizeof(*hsotg->core_params), GFP_KERNEL); if (!hsotg->core_params) goto error1; dwc2_set_all_params(hsotg->core_params, -1); /* Validate parameter values */ dwc2_set_parameters(hsotg, params); /* Check if the bus driver or platform code has setup a dma_mask */ if (hsotg->core_params->dma_enable > 0 && hsotg->dev->dma_mask == NULL) { dev_warn(hsotg->dev, "dma_mask not set, disabling DMA\n"); hsotg->core_params->dma_enable = 0; hsotg->core_params->dma_desc_enable = 0; } /* Set device flags indicating whether the HCD supports DMA */ if (hsotg->core_params->dma_enable > 0) { if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) dev_warn(hsotg->dev, "can't set DMA mask\n"); if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) dev_warn(hsotg->dev, "can't set coherent DMA mask\n"); } hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev)); if (!hcd) goto error1; if (hsotg->core_params->dma_enable <= 0) hcd->self.uses_dma = 0; hcd->has_tt = 1; spin_lock_init(&hsotg->lock); ((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg; hsotg->priv = hcd; /* * Disable the global interrupt until all the interrupt handlers are * installed */ dwc2_disable_global_interrupts(hsotg); /* Initialize the DWC_otg core, and select the Phy type */ retval = dwc2_core_init(hsotg, true, irq); if (retval) goto error2; /* Create new workqueue and init work */ retval = -ENOMEM; hsotg->wq_otg = create_singlethread_workqueue("dwc2"); if (!hsotg->wq_otg) { dev_err(hsotg->dev, "Failed to create workqueue\n"); goto error2; } INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change); setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected, (unsigned long)hsotg); /* Initialize the non-periodic schedule */ INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive); INIT_LIST_HEAD(&hsotg->non_periodic_sched_active); /* Initialize the periodic schedule */ INIT_LIST_HEAD(&hsotg->periodic_sched_inactive); INIT_LIST_HEAD(&hsotg->periodic_sched_ready); INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); INIT_LIST_HEAD(&hsotg->periodic_sched_queued); /* * Create a host channel descriptor for each host channel implemented * in the controller. Initialize the channel descriptor array. */ INIT_LIST_HEAD(&hsotg->free_hc_list); num_channels = hsotg->core_params->host_channels; memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array)); for (i = 0; i < num_channels; i++) { channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (channel == NULL) goto error3; channel->hc_num = i; hsotg->hc_ptr_array[i] = channel; } if (hsotg->core_params->uframe_sched > 0) dwc2_hcd_init_usecs(hsotg); /* Initialize hsotg start work */ INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); /* Initialize port reset work */ INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func); /* * Allocate space for storing data on status transactions. Normally no * data is sent, but this space acts as a bit bucket. This must be * done after usb_add_hcd since that function allocates the DMA buffer * pool. */ if (hsotg->core_params->dma_enable > 0) hsotg->status_buf = dma_alloc_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, &hsotg->status_buf_dma, GFP_KERNEL); else hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE, GFP_KERNEL); if (!hsotg->status_buf) goto error3; hsotg->otg_port = 1; hsotg->frame_list = NULL; hsotg->frame_list_dma = 0; hsotg->periodic_qh_count = 0; /* Initiate lx_state to L3 disconnected state */ hsotg->lx_state = DWC2_L3; hcd->self.otg_port = hsotg->otg_port; /* Don't support SG list at this point */ hcd->self.sg_tablesize = 0; /* * Finish generic HCD initialization and start the HCD. This function * allocates the DMA buffer pool, registers the USB bus, requests the * IRQ line, and calls hcd_start method. */ retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval < 0) goto error3; device_wakeup_enable(hcd->self.controller); dwc2_hcd_dump_state(hsotg); dwc2_enable_global_interrupts(hsotg); return 0; error3: dwc2_hcd_release(hsotg); error2: usb_put_hcd(hcd); error1: kfree(hsotg->core_params); #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS kfree(hsotg->last_frame_num_array); kfree(hsotg->frame_num_array); #endif dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval); return retval; } EXPORT_SYMBOL_GPL(dwc2_hcd_init); /* * Removes the HCD. * Frees memory and resources associated with the HCD and deregisters the bus. */ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) { struct usb_hcd *hcd; dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n"); hcd = dwc2_hsotg_to_hcd(hsotg); dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd); if (!hcd) { dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n", __func__); return; } usb_remove_hcd(hcd); hsotg->priv = NULL; dwc2_hcd_release(hsotg); usb_put_hcd(hcd); #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS kfree(hsotg->last_frame_num_array); kfree(hsotg->frame_num_array); #endif } EXPORT_SYMBOL_GPL(dwc2_hcd_remove);
gpl-2.0
sh95119/linux
drivers/gpu/drm/i915/dvo_ns2501.c
405
22566
/* * * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter * * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "dvo.h" #include "i915_reg.h" #include "i915_drv.h" #define NS2501_VID 0x1305 #define NS2501_DID 0x6726 #define NS2501_VID_LO 0x00 #define NS2501_VID_HI 0x01 #define NS2501_DID_LO 0x02 #define NS2501_DID_HI 0x03 #define NS2501_REV 0x04 #define NS2501_RSVD 0x05 #define NS2501_FREQ_LO 0x06 #define NS2501_FREQ_HI 0x07 #define NS2501_REG8 0x08 #define NS2501_8_VEN (1<<5) #define NS2501_8_HEN (1<<4) #define NS2501_8_DSEL (1<<3) #define NS2501_8_BPAS (1<<2) #define NS2501_8_RSVD (1<<1) #define NS2501_8_PD (1<<0) #define NS2501_REG9 0x09 #define NS2501_9_VLOW (1<<7) #define NS2501_9_MSEL_MASK (0x7<<4) #define NS2501_9_TSEL (1<<3) #define NS2501_9_RSEN (1<<2) #define NS2501_9_RSVD (1<<1) #define NS2501_9_MDI (1<<0) #define NS2501_REGC 0x0c /* * The following registers are not part of the official datasheet * and are the result of reverse engineering. */ /* * Register c0 controls how the DVO synchronizes with * its input. */ #define NS2501_REGC0 0xc0 #define NS2501_C0_ENABLE (1<<0) /* enable the DVO sync in general */ #define NS2501_C0_HSYNC (1<<1) /* synchronize horizontal with input */ #define NS2501_C0_VSYNC (1<<2) /* synchronize vertical with input */ #define NS2501_C0_RESET (1<<7) /* reset the synchronization flip/flops */ /* * Register 41 is somehow related to the sync register and sync * configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off) * and 0x00 otherwise. */ #define NS2501_REG41 0x41 /* * this register controls the dithering of the DVO * One bit enables it, the other define the dithering depth. * The higher the value, the lower the dithering depth. */ #define NS2501_F9_REG 0xf9 #define NS2501_F9_ENABLE (1<<0) /* if set, dithering is enabled */ #define NS2501_F9_DITHER_MASK (0x7f<<1) /* controls the dither depth */ #define NS2501_F9_DITHER_SHIFT 1 /* shifts the dither mask */ /* * PLL configuration register. This is a pair of registers, * one single byte register at 1B, and a pair at 1C,1D. * These registers are counters/dividers. */ #define NS2501_REG1B 0x1b /* one byte PLL control register */ #define NS2501_REG1C 0x1c /* low-part of the second register */ #define NS2501_REG1D 0x1d /* high-part of the second register */ /* * Scaler control registers. Horizontal at b8,b9, * vertical at 10,11. The scale factor is computed as * 2^16/control-value. The low-byte comes first. */ #define NS2501_REG10 0x10 /* low-byte vertical scaler */ #define NS2501_REG11 0x11 /* high-byte vertical scaler */ #define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */ #define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */ /* * Display window definition. This consists of four registers * per dimension. One register pair defines the start of the * display, one the end. * As far as I understand, this defines the window within which * the scaler samples the input. */ #define NS2501_REGC1 0xc1 /* low-byte horizontal display start */ #define NS2501_REGC2 0xc2 /* high-byte horizontal display start */ #define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */ #define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */ #define NS2501_REGC5 0xc5 /* low-byte vertical display start */ #define NS2501_REGC6 0xc6 /* high-byte vertical display start */ #define NS2501_REGC7 0xc7 /* low-byte vertical display stop */ #define NS2501_REGC8 0xc8 /* high-byte vertical display stop */ /* * The following register pair seems to define the start of * the vertical sync. If automatic syncing is enabled, and the * register value defines a sync pulse that is later than the * incoming sync, then the register value is ignored and the * external hsync triggers the synchronization. */ #define NS2501_REG80 0x80 /* low-byte vsync-start */ #define NS2501_REG81 0x81 /* high-byte vsync-start */ /* * The following register pair seems to define the total number * of lines created at the output side of the scaler. * This is again a low-high register pair. */ #define NS2501_REG82 0x82 /* output display height, low byte */ #define NS2501_REG83 0x83 /* output display height, high byte */ /* * The following registers define the end of the front-porch * in horizontal and vertical position and hence allow to shift * the image left/right or up/down. */ #define NS2501_REG98 0x98 /* horizontal start of display + 256, low */ #define NS2501_REG99 0x99 /* horizontal start of display + 256, high */ #define NS2501_REG8E 0x8e /* vertical start of the display, low byte */ #define NS2501_REG8F 0x8f /* vertical start of the display, high byte */ /* * The following register pair control the function of the * backlight and the DVO output. To enable the corresponding * function, the corresponding bit must be set in both registers. */ #define NS2501_REG34 0x34 /* DVO enable functions, first register */ #define NS2501_REG35 0x35 /* DVO enable functions, second register */ #define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */ #define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */ /* * Registers 9C and 9D define the vertical output offset * of the visible region. */ #define NS2501_REG9C 0x9c #define NS2501_REG9D 0x9d /* * The register 9F defines the dithering. This requires the * scaler to be ON. Bit 0 enables dithering, the remaining * bits control the depth of the dither. The higher the value, * the LOWER the dithering amplitude. A good value seems to be * 15 (total register value). */ #define NS2501_REGF9 0xf9 #define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */ #define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */ #define NS2501_F9_DITHER_SHIFT 1 /* upshift of the dither mask */ enum { MODE_640x480, MODE_800x600, MODE_1024x768, }; struct ns2501_reg { uint8_t offset; uint8_t value; }; /* * The following structure keeps the complete configuration of * the DVO, given a specific output configuration. * This is pretty much guess-work from reverse-engineering, so * read all this with a grain of salt. */ struct ns2501_configuration { uint8_t sync; /* configuration of the C0 register */ uint8_t conf; /* configuration register 8 */ uint8_t syncb; /* configuration register 41 */ uint8_t dither; /* configuration of the dithering */ uint8_t pll_a; /* PLL configuration, register A, 1B */ uint16_t pll_b; /* PLL configuration, register B, 1C/1D */ uint16_t hstart; /* horizontal start, registers C1/C2 */ uint16_t hstop; /* horizontal total, registers C3/C4 */ uint16_t vstart; /* vertical start, registers C5/C6 */ uint16_t vstop; /* vertical total, registers C7/C8 */ uint16_t vsync; /* manual vertical sync start, 80/81 */ uint16_t vtotal; /* number of lines generated, 82/83 */ uint16_t hpos; /* horizontal position + 256, 98/99 */ uint16_t vpos; /* vertical position, 8e/8f */ uint16_t voffs; /* vertical output offset, 9c/9d */ uint16_t hscale; /* horizontal scaling factor, b8/b9 */ uint16_t vscale; /* vertical scaling factor, 10/11 */ }; /* * DVO configuration values, partially based on what the BIOS * of the Fujitsu Lifebook S6010 writes into registers, * partially found by manual tweaking. These configurations assume * a 1024x768 panel. */ static const struct ns2501_configuration ns2501_modes[] = { [MODE_640x480] = { .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC, .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, .syncb = 0x32, .dither = 0x0f, .pll_a = 17, .pll_b = 852, .hstart = 144, .hstop = 783, .vstart = 22, .vstop = 514, .vsync = 2047, /* actually, ignored with this config */ .vtotal = 1341, .hpos = 0, .vpos = 16, .voffs = 36, .hscale = 40960, .vscale = 40960 }, [MODE_800x600] = { .sync = NS2501_C0_ENABLE | NS2501_C0_HSYNC | NS2501_C0_VSYNC, .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, .syncb = 0x00, .dither = 0x0f, .pll_a = 25, .pll_b = 612, .hstart = 215, .hstop = 1016, .vstart = 26, .vstop = 627, .vsync = 807, .vtotal = 1341, .hpos = 0, .vpos = 4, .voffs = 35, .hscale = 51248, .vscale = 51232 }, [MODE_1024x768] = { .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC, .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, .syncb = 0x32, .dither = 0x0f, .pll_a = 11, .pll_b = 1350, .hstart = 276, .hstop = 1299, .vstart = 15, .vstop = 1056, .vsync = 2047, .vtotal = 1341, .hpos = 0, .vpos = 7, .voffs = 27, .hscale = 65535, .vscale = 65535 } }; /* * Other configuration values left by the BIOS of the * Fujitsu S6010 in the DVO control registers. Their * value does not depend on the BIOS and their meaning * is unknown. */ static const struct ns2501_reg mode_agnostic_values[] = { /* 08 is mode specific */ [0] = { .offset = 0x0a, .value = 0x81, }, /* 10,11 are part of the mode specific configuration */ [1] = { .offset = 0x12, .value = 0x02, }, [2] = { .offset = 0x18, .value = 0x07, }, [3] = { .offset = 0x19, .value = 0x00, }, [4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */ /* 1b,1c,1d are part of the mode specific configuration */ [5] = { .offset = 0x1e, .value = 0x02, }, [6] = { .offset = 0x1f, .value = 0x40, }, [7] = { .offset = 0x20, .value = 0x00, }, [8] = { .offset = 0x21, .value = 0x00, }, [9] = { .offset = 0x22, .value = 0x00, }, [10] = { .offset = 0x23, .value = 0x00, }, [11] = { .offset = 0x24, .value = 0x00, }, [12] = { .offset = 0x25, .value = 0x00, }, [13] = { .offset = 0x26, .value = 0x00, }, [14] = { .offset = 0x27, .value = 0x00, }, [15] = { .offset = 0x7e, .value = 0x18, }, /* 80-84 are part of the mode-specific configuration */ [16] = { .offset = 0x84, .value = 0x00, }, [17] = { .offset = 0x85, .value = 0x00, }, [18] = { .offset = 0x86, .value = 0x00, }, [19] = { .offset = 0x87, .value = 0x00, }, [20] = { .offset = 0x88, .value = 0x00, }, [21] = { .offset = 0x89, .value = 0x00, }, [22] = { .offset = 0x8a, .value = 0x00, }, [23] = { .offset = 0x8b, .value = 0x00, }, [24] = { .offset = 0x8c, .value = 0x10, }, [25] = { .offset = 0x8d, .value = 0x02, }, /* 8e,8f are part of the mode-specific configuration */ [26] = { .offset = 0x90, .value = 0xff, }, [27] = { .offset = 0x91, .value = 0x07, }, [28] = { .offset = 0x92, .value = 0xa0, }, [29] = { .offset = 0x93, .value = 0x02, }, [30] = { .offset = 0x94, .value = 0x00, }, [31] = { .offset = 0x95, .value = 0x00, }, [32] = { .offset = 0x96, .value = 0x05, }, [33] = { .offset = 0x97, .value = 0x00, }, /* 98,99 are part of the mode-specific configuration */ [34] = { .offset = 0x9a, .value = 0x88, }, [35] = { .offset = 0x9b, .value = 0x00, }, /* 9c,9d are part of the mode-specific configuration */ [36] = { .offset = 0x9e, .value = 0x25, }, [37] = { .offset = 0x9f, .value = 0x03, }, [38] = { .offset = 0xa0, .value = 0x28, }, [39] = { .offset = 0xa1, .value = 0x01, }, [40] = { .offset = 0xa2, .value = 0x28, }, [41] = { .offset = 0xa3, .value = 0x05, }, /* register 0xa4 is mode specific, but 0x80..0x84 works always */ [42] = { .offset = 0xa4, .value = 0x84, }, [43] = { .offset = 0xa5, .value = 0x00, }, [44] = { .offset = 0xa6, .value = 0x00, }, [45] = { .offset = 0xa7, .value = 0x00, }, [46] = { .offset = 0xa8, .value = 0x00, }, /* 0xa9 to 0xab are mode specific, but have no visible effect */ [47] = { .offset = 0xa9, .value = 0x04, }, [48] = { .offset = 0xaa, .value = 0x70, }, [49] = { .offset = 0xab, .value = 0x4f, }, [50] = { .offset = 0xac, .value = 0x00, }, [51] = { .offset = 0xad, .value = 0x00, }, [52] = { .offset = 0xb6, .value = 0x09, }, [53] = { .offset = 0xb7, .value = 0x03, }, /* b8,b9 are part of the mode-specific configuration */ [54] = { .offset = 0xba, .value = 0x00, }, [55] = { .offset = 0xbb, .value = 0x20, }, [56] = { .offset = 0xf3, .value = 0x90, }, [57] = { .offset = 0xf4, .value = 0x00, }, [58] = { .offset = 0xf7, .value = 0x88, }, /* f8 is mode specific, but the value does not matter */ [59] = { .offset = 0xf8, .value = 0x0a, }, [60] = { .offset = 0xf9, .value = 0x00, } }; static const struct ns2501_reg regs_init[] = { [0] = { .offset = 0x35, .value = 0xff, }, [1] = { .offset = 0x34, .value = 0x00, }, [2] = { .offset = 0x08, .value = 0x30, }, }; struct ns2501_priv { bool quiet; const struct ns2501_configuration *conf; }; #define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) /* ** Read a register from the ns2501. ** Returns true if successful, false otherwise. ** If it returns false, it might be wise to enable the ** DVO with the above function. */ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) { struct ns2501_priv *ns = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; } if (!ns->quiet) { DRM_DEBUG_KMS ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } /* ** Write a register to the ns2501. ** Returns true if successful, false otherwise. ** If it returns false, it might be wise to enable the ** DVO with the above function. */ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct ns2501_priv *ns = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) { return true; } if (!ns->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", addr, adapter->name, dvo->slave_addr); } return false; } /* National Semiconductor 2501 driver for chip on i2c bus * scan for the chip on the bus. * Hope the VBIOS initialized the PLL correctly so we can * talk to it. If not, it will not be seen and not detected. * Bummer! */ static bool ns2501_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the NS2501 chip on the specified i2c bus */ struct ns2501_priv *ns; unsigned char ch; ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); if (ns == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = ns; ns->quiet = true; if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) goto out; if (ch != (NS2501_VID & 0xff)) { DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) goto out; if (ch != (NS2501_DID & 0xff)) { DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } ns->quiet = false; DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); return true; out: kfree(ns); return false; } static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) { /* * This is a Laptop display, it doesn't have hotplugging. * Even if not, the detection bit of the 2501 is unreliable as * it only works for some display types. * It is even more unreliable as the PLL must be active for * allowing reading from the chiop. */ return connector_status_connected; } static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { DRM_DEBUG_KMS ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); /* * Currently, these are all the modes I have data from. * More might exist. Unclear how to find the native resolution * of the panel in here so we could always accept it * by disabling the scaler. */ if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) || (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) || (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) { return MODE_OK; } else { return MODE_ONE_SIZE; /* Is this a reasonable error? */ } } static void ns2501_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { const struct ns2501_configuration *conf; struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); int mode_idx, i; DRM_DEBUG_KMS ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); DRM_DEBUG_KMS("Detailed requested mode settings are:\n" "clock : %d kHz\n" "hdisplay : %d\n" "hblank start : %d\n" "hblank end : %d\n" "hsync start : %d\n" "hsync end : %d\n" "htotal : %d\n" "hskew : %d\n" "vdisplay : %d\n" "vblank start : %d\n" "hblank end : %d\n" "vsync start : %d\n" "vsync end : %d\n" "vtotal : %d\n", adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_hblank_start, adjusted_mode->crtc_hblank_end, adjusted_mode->crtc_hsync_start, adjusted_mode->crtc_hsync_end, adjusted_mode->crtc_htotal, adjusted_mode->crtc_hskew, adjusted_mode->crtc_vdisplay, adjusted_mode->crtc_vblank_start, adjusted_mode->crtc_vblank_end, adjusted_mode->crtc_vsync_start, adjusted_mode->crtc_vsync_end, adjusted_mode->crtc_vtotal); if (mode->hdisplay == 640 && mode->vdisplay == 480) mode_idx = MODE_640x480; else if (mode->hdisplay == 800 && mode->vdisplay == 600) mode_idx = MODE_800x600; else if (mode->hdisplay == 1024 && mode->vdisplay == 768) mode_idx = MODE_1024x768; else return; /* Hopefully doing it every time won't hurt... */ for (i = 0; i < ARRAY_SIZE(regs_init); i++) ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value); /* Write the mode-agnostic values */ for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++) ns2501_writeb(dvo, mode_agnostic_values[i].offset, mode_agnostic_values[i].value); /* Write now the mode-specific configuration */ conf = ns2501_modes + mode_idx; ns->conf = conf; ns2501_writeb(dvo, NS2501_REG8, conf->conf); ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a); ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff); ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8); ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff); ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8); ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff); ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8); ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff); ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8); ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff); ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8); ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff); ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8); ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff); ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8); ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff); ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8); ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff); ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8); ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff); ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8); ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff); ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8); ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff); ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8); ns2501_writeb(dvo, NS2501_REGF9, conf->dither); ns2501_writeb(dvo, NS2501_REG41, conf->syncb); ns2501_writeb(dvo, NS2501_REGC0, conf->sync); } /* set the NS2501 power state */ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) { unsigned char ch; if (!ns2501_readb(dvo, NS2501_REG8, &ch)) return false; return ch & NS2501_8_PD; } /* set the NS2501 power state */ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) { struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); if (enable) { ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08); ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb); ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT); msleep(15); ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf | NS2501_8_BPAS); if (!(ns->conf->conf & NS2501_8_BPAS)) ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf); msleep(200); ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT); ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync); } else { ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT); msleep(200); ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN | NS2501_8_BPAS); msleep(15); ns2501_writeb(dvo, NS2501_REG34, 0x00); } } static void ns2501_destroy(struct intel_dvo_device *dvo) { struct ns2501_priv *ns = dvo->dev_priv; if (ns) { kfree(ns); dvo->dev_priv = NULL; } } struct intel_dvo_dev_ops ns2501_ops = { .init = ns2501_init, .detect = ns2501_detect, .mode_valid = ns2501_mode_valid, .mode_set = ns2501_mode_set, .dpms = ns2501_dpms, .get_hw_state = ns2501_get_hw_state, .destroy = ns2501_destroy, };
gpl-2.0
TeamWin/PoonSense-2.3.3
drivers/usb/host/xhci.c
405
75035
/* * xHCI host controller driver * * Copyright (C) 2008 Intel Corp. * * Author: Sarah Sharp * Some code borrowed from the Linux EHCI driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/irq.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include "xhci.h" #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ static int link_quirk; module_param(link_quirk, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); /* TODO: copied from ehci-hcd.c - can this be refactored? */ /* * handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read * @mask: bits to look at in result of read * @done: value of those bits when handshake succeeds * @usec: timeout in microseconds * * Returns negative errno, or zero on success * * Success happens when the "mask" bits have the specified value (hardware * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; do { result = xhci_readl(xhci, ptr); if (result == ~(u32)0) /* card removed */ return -ENODEV; result &= mask; if (result == done) return 0; udelay(1); usec--; } while (usec > 0); return -ETIMEDOUT; } /* * Disable interrupts and begin the xHCI halting process. */ void xhci_quiesce(struct xhci_hcd *xhci) { u32 halted; u32 cmd; u32 mask; mask = ~(XHCI_IRQS); halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; if (!halted) mask &= ~CMD_RUN; cmd = xhci_readl(xhci, &xhci->op_regs->command); cmd &= mask; xhci_writel(xhci, cmd, &xhci->op_regs->command); } /* * Force HC into halt state. * * Disable any IRQs and clear the run/stop bit. * HC will complete any current and actively pipelined transactions, and * should halt within 16 microframes of the run/stop bit being cleared. * Read HC Halted bit in the status register to see when the HC is finished. * XXX: shouldn't we set HC_STATE_HALT here somewhere? */ int xhci_halt(struct xhci_hcd *xhci) { xhci_dbg(xhci, "// Halt the HC\n"); xhci_quiesce(xhci); return handshake(xhci, &xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); } /* * Set the run bit and wait for the host to be running. */ int xhci_start(struct xhci_hcd *xhci) { u32 temp; int ret; temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= (CMD_RUN); xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", temp); xhci_writel(xhci, temp, &xhci->op_regs->command); /* * Wait for the HCHalted Status bit to be 0 to indicate the host is * running. */ ret = handshake(xhci, &xhci->op_regs->status, STS_HALT, 0, XHCI_MAX_HALT_USEC); if (ret == -ETIMEDOUT) xhci_err(xhci, "Host took too long to start, " "waited %u microseconds.\n", XHCI_MAX_HALT_USEC); return ret; } /* * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. * * This resets pipelines, timers, counters, state machines, etc. * Transactions will be terminated immediately, and operational registers * will be set to their defaults. */ int xhci_reset(struct xhci_hcd *xhci) { u32 command; u32 state; int ret; state = xhci_readl(xhci, &xhci->op_regs->status); if ((state & STS_HALT) == 0) { xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); return 0; } xhci_dbg(xhci, "// Reset the HC\n"); command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_RESET; xhci_writel(xhci, command, &xhci->op_regs->command); /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ xhci_to_hcd(xhci)->state = HC_STATE_HALT; ret = handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); if (ret) return ret; xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); /* * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); } #if 0 /* Set up MSI-X table for entry 0 (may claim other entries later) */ static int xhci_setup_msix(struct xhci_hcd *xhci) { int ret; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); xhci->msix_count = 0; /* XXX: did I do this right? ixgbe does kcalloc for more than one */ xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); if (!xhci->msix_entries) { xhci_err(xhci, "Failed to allocate MSI-X entries\n"); return -ENOMEM; } xhci->msix_entries[0].entry = 0; ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { xhci_err(xhci, "Failed to enable MSI-X\n"); goto free_entries; } /* * Pass the xhci pointer value as the request_irq "cookie". * If more irqs are added, this will need to be unique for each one. */ ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, "xHCI", xhci_to_hcd(xhci)); if (ret) { xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); goto disable_msix; } xhci_dbg(xhci, "Finished setting up MSI-X\n"); return 0; disable_msix: pci_disable_msix(pdev); free_entries: kfree(xhci->msix_entries); xhci->msix_entries = NULL; return ret; } /* XXX: code duplication; can xhci_setup_msix call this? */ /* Free any IRQs and disable MSI-X */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (!xhci->msix_entries) return; free_irq(xhci->msix_entries[0].vector, xhci); pci_disable_msix(pdev); kfree(xhci->msix_entries); xhci->msix_entries = NULL; xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); } #endif /* * Initialize memory for HCD and xHC (one-time init). * * Program the PAGESIZE register, initialize the device context array, create * device contexts (?), set up a command ring segment (or two?), create event * ring (one for now). */ int xhci_init(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); int retval = 0; xhci_dbg(xhci, "xhci_init\n"); spin_lock_init(&xhci->lock); if (link_quirk) { xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); xhci->quirks |= XHCI_LINK_TRB_QUIRK; } else { xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); } retval = xhci_mem_init(xhci, GFP_KERNEL); xhci_dbg(xhci, "Finished xhci_init\n"); return retval; } /* * Called in interrupt context when there might be work * queued on the event ring * * xhci->lock must be held by caller. */ static void xhci_work(struct xhci_hcd *xhci) { u32 temp; u64 temp_64; /* * Clear the op reg interrupt status first, * so we can receive interrupts from other MSI-X interrupters. * Write 1 to clear the interrupt status. */ temp = xhci_readl(xhci, &xhci->op_regs->status); temp |= STS_EINT; xhci_writel(xhci, temp, &xhci->op_regs->status); /* FIXME when MSI-X is supported and there are multiple vectors */ /* Clear the MSI-X event interrupt status */ /* Acknowledge the interrupt */ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); temp |= 0x3; xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); /* Flush posted writes */ xhci_readl(xhci, &xhci->ir_set->irq_pending); if (xhci->xhc_state & XHCI_STATE_DYING) xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " "Shouldn't IRQs be disabled?\n"); else /* FIXME this should be a delayed service routine * that clears the EHB. */ xhci_handle_event(xhci); /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); /* Flush posted writes -- FIXME is this necessary? */ xhci_readl(xhci, &xhci->ir_set->irq_pending); } /*-------------------------------------------------------------------------*/ /* * xHCI spec says we can get an interrupt, and if the HC has an error condition, * we might get bad data out of the event ring. Section 4.10.2.7 has a list of * indicators of an event TRB error, but we check the status *first* to be safe. */ irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); u32 temp, temp2; union xhci_trb *trb; spin_lock(&xhci->lock); trb = xhci->event_ring->dequeue; /* Check if the xHC generated the interrupt, or the irq is shared */ temp = xhci_readl(xhci, &xhci->op_regs->status); temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); if (temp == 0xffffffff && temp2 == 0xffffffff) goto hw_died; if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { spin_unlock(&xhci->lock); return IRQ_NONE; } xhci_dbg(xhci, "op reg status = %08x\n", temp); xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); xhci_dbg(xhci, "Event ring dequeue ptr:\n"); xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), lower_32_bits(trb->link.segment_ptr), upper_32_bits(trb->link.segment_ptr), (unsigned int) trb->link.intr_target, (unsigned int) trb->link.control); if (temp & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); hw_died: xhci_to_hcd(xhci)->state = HC_STATE_HALT; spin_unlock(&xhci->lock); return -ESHUTDOWN; } xhci_work(xhci); spin_unlock(&xhci->lock); return IRQ_HANDLED; } #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING void xhci_event_ring_work(unsigned long arg) { unsigned long flags; int temp; u64 temp_64; struct xhci_hcd *xhci = (struct xhci_hcd *) arg; int i, j; xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_dbg(xhci, "op reg status = 0x%x\n", temp); if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { xhci_dbg(xhci, "HW died, polling stopped.\n"); spin_unlock_irqrestore(&xhci->lock, flags); return; } temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); xhci->error_bitmask = 0; xhci_dbg(xhci, "Event ring:\n"); xhci_debug_segment(xhci, xhci->event_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->event_ring); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); xhci_dbg(xhci, "Command ring:\n"); xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); xhci_dbg_cmd_ptrs(xhci); for (i = 0; i < MAX_HC_SLOTS; ++i) { if (!xhci->devs[i]) continue; for (j = 0; j < 31; ++j) { xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); } } if (xhci->noops_submitted != NUM_TEST_NOOPS) if (xhci_setup_one_noop(xhci)) xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); if (!xhci->zombie) mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); else xhci_dbg(xhci, "Quit polling the event ring.\n"); } #endif /* * Start the HC after it was halted. * * This function is called by the USB core when the HC driver is added. * Its opposite is xhci_stop(). * * xhci_init() must be called once before this function can be called. * Reset the HC, enable device slot contexts, program DCBAAP, and * set command ring pointer and event ring pointer. * * Setup MSI-X vectors and enable interrupts. */ int xhci_run(struct usb_hcd *hcd) { u32 temp; u64 temp_64; struct xhci_hcd *xhci = hcd_to_xhci(hcd); void (*doorbell)(struct xhci_hcd *) = NULL; hcd->uses_new_polling = 1; hcd->poll_rh = 0; xhci_dbg(xhci, "xhci_run\n"); #if 0 /* FIXME: MSI not setup yet */ /* Do this at the very last minute */ ret = xhci_setup_msix(xhci); if (!ret) return ret; return -ENOSYS; #endif #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING init_timer(&xhci->event_ring_timer); xhci->event_ring_timer.data = (unsigned long) xhci; xhci->event_ring_timer.function = xhci_event_ring_work; /* Poll the event ring */ xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; xhci->zombie = 0; xhci_dbg(xhci, "Setting event ring polling timer\n"); add_timer(&xhci->event_ring_timer); #endif xhci_dbg(xhci, "Command ring memory map follows:\n"); xhci_debug_ring(xhci, xhci->cmd_ring); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); xhci_dbg_cmd_ptrs(xhci); xhci_dbg(xhci, "ERST memory map follows:\n"); xhci_dbg_erst(xhci, &xhci->erst); xhci_dbg(xhci, "Event ring:\n"); xhci_debug_ring(xhci, xhci->event_ring); xhci_dbg_ring_ptrs(xhci, xhci->event_ring); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); xhci_dbg(xhci, "// Set the interrupt modulation register\n"); temp = xhci_readl(xhci, &xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; temp |= (u32) 160; xhci_writel(xhci, temp, &xhci->ir_set->irq_control); /* Set the HCD state before we enable the irqs */ hcd->state = HC_STATE_RUNNING; temp = xhci_readl(xhci, &xhci->op_regs->command); temp |= (CMD_EIE); xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", temp); xhci_writel(xhci, temp, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); xhci_writel(xhci, ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); xhci_print_ir_set(xhci, xhci->ir_set, 0); if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); if (xhci->quirks & XHCI_NEC_HOST) xhci_queue_vendor_command(xhci, 0, 0, 0, TRB_TYPE(TRB_NEC_GET_FW)); if (xhci_start(xhci)) { xhci_halt(xhci); return -ENODEV; } xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); if (doorbell) (*doorbell)(xhci); if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); xhci_dbg(xhci, "Finished xhci_run\n"); return 0; } /* * Stop xHCI driver. * * This function is called by the USB core when the HC driver is removed. * Its opposite is xhci_run(). * * Disable device contexts, disable IRQs, and quiesce the HC. * Reset the HC, finish any completed transactions, and cleanup memory. */ void xhci_stop(struct usb_hcd *hcd) { u32 temp; struct xhci_hcd *xhci = hcd_to_xhci(hcd); spin_lock_irq(&xhci->lock); xhci_halt(xhci); xhci_reset(xhci); spin_unlock_irq(&xhci->lock); #if 0 /* No MSI yet */ xhci_cleanup_msix(xhci); #endif #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING /* Tell the event ring poll function not to reschedule */ xhci->zombie = 1; del_timer_sync(&xhci->event_ring_timer); #endif xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); xhci_print_ir_set(xhci, xhci->ir_set, 0); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); xhci_dbg(xhci, "xhci_stop completed - status = %x\n", xhci_readl(xhci, &xhci->op_regs->status)); } /* * Shutdown HC (not bus-specific) * * This is called when the machine is rebooting or halting. We assume that the * machine will be powered off, and the HC's internal state will be reset. * Don't bother to free memory. */ void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); spin_lock_irq(&xhci->lock); xhci_halt(xhci); spin_unlock_irq(&xhci->lock); #if 0 xhci_cleanup_msix(xhci); #endif xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", xhci_readl(xhci, &xhci->op_regs->status)); } /*-------------------------------------------------------------------------*/ /** * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and * HCDs. Find the index for an endpoint given its descriptor. Use the return * value to right shift 1 for the bitmask. * * Index = (epnum * 2) + direction - 1, * where direction = 0 for OUT, 1 for IN. * For control endpoints, the IN index is used (OUT index is unused), so * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) */ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) { unsigned int index; if (usb_endpoint_xfer_control(desc)) index = (unsigned int) (usb_endpoint_num(desc)*2); else index = (unsigned int) (usb_endpoint_num(desc)*2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; return index; } /* Find the flag for this endpoint (for use in the control context). Use the * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is * bit 1, etc. */ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) { return 1 << (xhci_get_endpoint_index(desc) + 1); } /* Find the flag for this endpoint (for use in the control context). Use the * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is * bit 1, etc. */ unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) { return 1 << (ep_index + 1); } /* Compute the last valid endpoint context index. Basically, this is the * endpoint index plus one. For slot contexts with more than valid endpoint, * we find the most significant bit set in the added contexts flags. * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. */ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) { return fls(added_ctxs) - 1; } /* Returns 1 if the arguments are OK; * returns 0 this is a root hub; returns -EINVAL for NULL pointers. */ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep, int check_ep, const char *func) { if (!hcd || (check_ep && !ep) || !udev) { printk(KERN_DEBUG "xHCI %s called with invalid args\n", func); return -EINVAL; } if (!udev->parent) { printk(KERN_DEBUG "xHCI %s called for root hub\n", func); return 0; } if (!udev->slot_id) { printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", func); return -EINVAL; } return 1; } static int xhci_configure_endpoint(struct xhci_hcd *xhci, struct usb_device *udev, struct xhci_command *command, bool ctx_change, bool must_succeed); /* * Full speed devices may have a max packet size greater than 8 bytes, but the * USB core doesn't know that until it reads the first 8 bytes of the * descriptor. If the usb_device's max packet size changes after that point, * we need to issue an evaluate context command and wait on it. */ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct urb *urb) { struct xhci_container_ctx *in_ctx; struct xhci_container_ctx *out_ctx; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_ep_ctx *ep_ctx; int max_packet_size; int hw_max_packet_size; int ret = 0; out_ctx = xhci->devs[slot_id]->out_ctx; ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; if (hw_max_packet_size != max_packet_size) { xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); xhci_dbg(xhci, "Max packet size in usb_device = %d\n", max_packet_size); xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", hw_max_packet_size); xhci_dbg(xhci, "Issuing evaluate context command.\n"); /* Set up the modified control endpoint 0 */ xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, xhci->devs[slot_id]->out_ctx, ep_index); in_ctx = xhci->devs[slot_id]->in_ctx; ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); /* Set up the input context flags for the command */ /* FIXME: This won't work if a non-default control endpoint * changes max packet sizes. */ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ctrl_ctx->add_flags = EP0_FLAG; ctrl_ctx->drop_flags = 0; xhci_dbg(xhci, "Slot %d input context\n", slot_id); xhci_dbg_ctx(xhci, in_ctx, ep_index); xhci_dbg(xhci, "Slot %d output context\n", slot_id); xhci_dbg_ctx(xhci, out_ctx, ep_index); ret = xhci_configure_endpoint(xhci, urb->dev, NULL, true, false); /* Clean up the input context for later use by bandwidth * functions. */ ctrl_ctx->add_flags = SLOT_FLAG; } return ret; } /* * non-error returns are a promise to giveback() the urb later * we drop ownership so next owner (or urb unlink) can get it */ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); unsigned long flags; int ret = 0; unsigned int slot_id, ep_index; if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) return -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); if (!xhci->devs || !xhci->devs[slot_id]) { if (!in_interrupt()) dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); ret = -EINVAL; goto exit; } if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { if (!in_interrupt()) xhci_dbg(xhci, "urb submitted during PCI suspend\n"); ret = -ESHUTDOWN; goto exit; } if (usb_endpoint_xfer_control(&urb->ep->desc)) { /* Check to see if the max packet size for the default control * endpoint changed during FS device enumeration */ if (urb->dev->speed == USB_SPEED_FULL) { ret = xhci_check_maxpacket(xhci, slot_id, ep_index, urb); if (ret < 0) return ret; } /* We have a spinlock and interrupts disabled, so we must pass * atomic context to this function, which may allocate memory. */ spin_lock_irqsave(&xhci->lock, flags); if (xhci->xhc_state & XHCI_STATE_DYING) goto dying; ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); if (xhci->xhc_state & XHCI_STATE_DYING) goto dying; if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_GETTING_STREAMS) { xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " "is transitioning to using streams.\n"); ret = -EINVAL; } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_GETTING_NO_STREAMS) { xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " "is transitioning to " "not having streams.\n"); ret = -EINVAL; } else { ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); } spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); if (xhci->xhc_state & XHCI_STATE_DYING) goto dying; ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else { ret = -EINVAL; } exit: return ret; dying: xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " "non-responsive xHCI host.\n", urb->ep->desc.bEndpointAddress, urb); spin_unlock_irqrestore(&xhci->lock, flags); return -ESHUTDOWN; } /* * Remove the URB's TD from the endpoint ring. This may cause the HC to stop * USB transfers, potentially stopping in the middle of a TRB buffer. The HC * should pick up where it left off in the TD, unless a Set Transfer Ring * Dequeue Pointer is issued. * * The TRBs that make up the buffers for the canceled URB will be "removed" from * the ring. Since the ring is a contiguous structure, they can't be physically * removed. Instead, there are two options: * * 1) If the HC is in the middle of processing the URB to be canceled, we * simply move the ring's dequeue pointer past those TRBs using the Set * Transfer Ring Dequeue Pointer command. This will be the common case, * when drivers timeout on the last submitted URB and attempt to cancel. * * 2) If the HC is in the middle of a different TD, we turn the TRBs into a * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The * HC will need to invalidate the any TRBs it has cached after the stop * endpoint command, as noted in the xHCI 0.95 errata. * * 3) The TD may have completed by the time the Stop Endpoint Command * completes, so software needs to handle that case too. * * This function should protect against the TD enqueueing code ringing the * doorbell while this code is waiting for a Stop Endpoint command to complete. * It also needs to account for multiple cancellations on happening at the same * time for the same endpoint. * * Note that this function can be called in any context, or so says * usb_hcd_unlink_urb() */ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; int ret; u32 temp; struct xhci_hcd *xhci; struct xhci_td *td; unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; xhci = hcd_to_xhci(hcd); spin_lock_irqsave(&xhci->lock, flags); /* Make sure the URB hasn't completed or been unlinked already */ ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret || !urb->hcpriv) goto done; temp = xhci_readl(xhci, &xhci->op_regs->status); if (temp == 0xffffffff) { xhci_dbg(xhci, "HW died, freeing TD.\n"); td = (struct xhci_td *) urb->hcpriv; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&xhci->lock, flags); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); kfree(td); return ret; } if (xhci->xhc_state & XHCI_STATE_DYING) { xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " "non-responsive xHCI host.\n", urb->ep->desc.bEndpointAddress, urb); /* Let the stop endpoint command watchdog timer (which set this * state) finish cleaning up the endpoint TD lists. We must * have caught it in the middle of dropping a lock and giving * back an URB. */ goto done; } xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); xhci_debug_ring(xhci, xhci->event_ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; ep_ring = xhci_urb_to_transfer_ring(xhci, urb); if (!ep_ring) { ret = -EINVAL; goto done; } xhci_dbg(xhci, "Endpoint ring:\n"); xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ if (!(ep->ep_state & EP_HALT_PENDING)) { ep->ep_state |= EP_HALT_PENDING; ep->stop_cmds_pending++; ep->stop_cmd_timer.expires = jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ; add_timer(&ep->stop_cmd_timer); xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_ring_cmd_db(xhci); } done: spin_unlock_irqrestore(&xhci->lock, flags); return ret; } /* Drop an endpoint from a new bandwidth configuration for this device. * Only one call to this function is allowed per endpoint before * check_bandwidth() or reset_bandwidth() must be called. * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will * add the endpoint to the schedule with possibly new parameters denoted by a * different endpoint descriptor in usb_host_endpoint. * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is * not allowed. * * The USB core will not allow URBs to be queued to an endpoint that is being * disabled, so there's no need for mutual exclusion to protect * the xhci->devs[slot_id] structure. */ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; struct xhci_container_ctx *in_ctx, *out_ctx; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_slot_ctx *slot_ctx; unsigned int last_ctx; unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; u32 drop_flag; u32 new_add_flags, new_drop_flags, new_slot_info; int ret; ret = xhci_check_args(hcd, udev, ep, 1, __func__); if (ret <= 0) return ret; xhci = hcd_to_xhci(hcd); xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); drop_flag = xhci_get_endpoint_flag(&ep->desc); if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", __func__, drop_flag); return 0; } if (!xhci->devs || !xhci->devs[udev->slot_id]) { xhci_warn(xhci, "xHCI %s called with unaddressed device\n", __func__); return -EINVAL; } in_ctx = xhci->devs[udev->slot_id]->in_ctx; out_ctx = xhci->devs[udev->slot_id]->out_ctx; ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); /* If the HC already knows the endpoint is disabled, * or the HCD has noted it is disabled, ignore this request */ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", __func__, ep); return 0; } ctrl_ctx->drop_flags |= drop_flag; new_drop_flags = ctrl_ctx->drop_flags; ctrl_ctx->add_flags &= ~drop_flag; new_add_flags = ctrl_ctx->add_flags; last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); /* Update the last valid endpoint context, if we deleted the last one */ if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { slot_ctx->dev_info &= ~LAST_CTX_MASK; slot_ctx->dev_info |= LAST_CTX(last_ctx); } new_slot_info = slot_ctx->dev_info; xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", (unsigned int) ep->desc.bEndpointAddress, udev->slot_id, (unsigned int) new_drop_flags, (unsigned int) new_add_flags, (unsigned int) new_slot_info); return 0; } /* Add an endpoint to a new possible bandwidth configuration for this device. * Only one call to this function is allowed per endpoint before * check_bandwidth() or reset_bandwidth() must be called. * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will * add the endpoint to the schedule with possibly new parameters denoted by a * different endpoint descriptor in usb_host_endpoint. * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is * not allowed. * * The USB core will not allow URBs to be queued to an endpoint until the * configuration or alt setting is installed in the device, so there's no need * for mutual exclusion to protect the xhci->devs[slot_id] structure. */ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; struct xhci_container_ctx *in_ctx, *out_ctx; unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u32 added_ctxs; unsigned int last_ctx; u32 new_add_flags, new_drop_flags, new_slot_info; int ret = 0; ret = xhci_check_args(hcd, udev, ep, 1, __func__); if (ret <= 0) { /* So we won't queue a reset ep command for a root hub */ ep->hcpriv = NULL; return ret; } xhci = hcd_to_xhci(hcd); added_ctxs = xhci_get_endpoint_flag(&ep->desc); last_ctx = xhci_last_valid_endpoint(added_ctxs); if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { /* FIXME when we have to issue an evaluate endpoint command to * deal with ep0 max packet size changing once we get the * descriptors */ xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", __func__, added_ctxs); return 0; } if (!xhci->devs || !xhci->devs[udev->slot_id]) { xhci_warn(xhci, "xHCI %s called with unaddressed device\n", __func__); return -EINVAL; } in_ctx = xhci->devs[udev->slot_id]->in_ctx; out_ctx = xhci->devs[udev->slot_id]->out_ctx; ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); /* If the HCD has already noted the endpoint is enabled, * ignore this request. */ if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", __func__, ep); return 0; } /* * Configuration and alternate setting changes must be done in * process context, not interrupt context (or so documenation * for usb_set_interface() and usb_set_configuration() claim). */ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], udev, ep, GFP_NOIO) < 0) { dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", __func__, ep->desc.bEndpointAddress); return -ENOMEM; } ctrl_ctx->add_flags |= added_ctxs; new_add_flags = ctrl_ctx->add_flags; /* If xhci_endpoint_disable() was called for this endpoint, but the * xHC hasn't been notified yet through the check_bandwidth() call, * this re-adds a new state for the endpoint from the new endpoint * descriptors. We must drop and re-add this endpoint, so we leave the * drop flags alone. */ new_drop_flags = ctrl_ctx->drop_flags; slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); /* Update the last valid endpoint context, if we just added one past */ if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { slot_ctx->dev_info &= ~LAST_CTX_MASK; slot_ctx->dev_info |= LAST_CTX(last_ctx); } new_slot_info = slot_ctx->dev_info; /* Store the usb_device pointer for later use */ ep->hcpriv = udev; xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", (unsigned int) ep->desc.bEndpointAddress, udev->slot_id, (unsigned int) new_drop_flags, (unsigned int) new_add_flags, (unsigned int) new_slot_info); return 0; } static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) { struct xhci_input_control_ctx *ctrl_ctx; struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; int i; /* When a device's add flag and drop flag are zero, any subsequent * configure endpoint command will leave that endpoint's state * untouched. Make sure we don't leave any old state in the input * endpoint contexts. */ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx->drop_flags = 0; ctrl_ctx->add_flags = 0; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); slot_ctx->dev_info &= ~LAST_CTX_MASK; /* Endpoint 0 is always valid */ slot_ctx->dev_info |= LAST_CTX(1); for (i = 1; i < 31; ++i) { ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; ep_ctx->deq = 0; ep_ctx->tx_info = 0; } } static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, struct usb_device *udev, int *cmd_status) { int ret; switch (*cmd_status) { case COMP_ENOMEM: dev_warn(&udev->dev, "Not enough host controller resources " "for new device state.\n"); ret = -ENOMEM; /* FIXME: can we allocate more resources for the HC? */ break; case COMP_BW_ERR: dev_warn(&udev->dev, "Not enough bandwidth " "for new device state.\n"); ret = -ENOSPC; /* FIXME: can we go back to the old state? */ break; case COMP_TRB_ERR: /* the HCD set up something wrong */ dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " "add flag = 1, " "and endpoint is not disabled.\n"); ret = -EINVAL; break; case COMP_SUCCESS: dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); ret = 0; break; default: xhci_err(xhci, "ERROR: unexpected command completion " "code 0x%x.\n", *cmd_status); ret = -EINVAL; break; } return ret; } static int xhci_evaluate_context_result(struct xhci_hcd *xhci, struct usb_device *udev, int *cmd_status) { int ret; struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; switch (*cmd_status) { case COMP_EINVAL: dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " "context command.\n"); ret = -EINVAL; break; case COMP_EBADSLT: dev_warn(&udev->dev, "WARN: slot not enabled for" "evaluate context command.\n"); case COMP_CTX_STATE: dev_warn(&udev->dev, "WARN: invalid context state for " "evaluate context command.\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); ret = -EINVAL; break; case COMP_SUCCESS: dev_dbg(&udev->dev, "Successful evaluate context command\n"); ret = 0; break; default: xhci_err(xhci, "ERROR: unexpected command completion " "code 0x%x.\n", *cmd_status); ret = -EINVAL; break; } return ret; } /* Issue a configure endpoint command or evaluate context command * and wait for it to finish. */ static int xhci_configure_endpoint(struct xhci_hcd *xhci, struct usb_device *udev, struct xhci_command *command, bool ctx_change, bool must_succeed) { int ret; int timeleft; unsigned long flags; struct xhci_container_ctx *in_ctx; struct completion *cmd_completion; int *cmd_status; struct xhci_virt_device *virt_dev; spin_lock_irqsave(&xhci->lock, flags); virt_dev = xhci->devs[udev->slot_id]; if (command) { in_ctx = command->in_ctx; cmd_completion = command->completion; cmd_status = &command->status; command->command_trb = xhci->cmd_ring->enqueue; list_add_tail(&command->cmd_list, &virt_dev->cmd_list); } else { in_ctx = virt_dev->in_ctx; cmd_completion = &virt_dev->cmd_completion; cmd_status = &virt_dev->cmd_status; } init_completion(cmd_completion); if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, udev->slot_id, must_succeed); else ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, udev->slot_id); if (ret < 0) { if (command) list_del(&command->cmd_list); spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); return -ENOMEM; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); /* Wait for the configure endpoint command to complete */ timeleft = wait_for_completion_interruptible_timeout( cmd_completion, USB_CTRL_SET_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for %s command\n", timeleft == 0 ? "Timeout" : "Signal", ctx_change == 0 ? "configure endpoint" : "evaluate context"); /* FIXME cancel the configure endpoint command */ return -ETIME; } if (!ctx_change) return xhci_configure_endpoint_result(xhci, udev, cmd_status); return xhci_evaluate_context_result(xhci, udev, cmd_status); } /* Called after one or more calls to xhci_add_endpoint() or * xhci_drop_endpoint(). If this call fails, the USB core is expected * to call xhci_reset_bandwidth(). * * Since we are in the middle of changing either configuration or * installing a new alt setting, the USB core won't allow URBs to be * enqueued for any endpoint on the old config or interface. Nothing * else should be touching the xhci->devs[slot_id] structure, so we * don't need to take the xhci->lock for manipulating that. */ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_slot_ctx *slot_ctx; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) return ret; xhci = hcd_to_xhci(hcd); if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { xhci_warn(xhci, "xHCI %s called with unaddressed device\n", __func__); return -EINVAL; } xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); virt_dev = xhci->devs[udev->slot_id]; /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx->add_flags |= SLOT_FLAG; ctrl_ctx->add_flags &= ~EP0_FLAG; ctrl_ctx->drop_flags &= ~SLOT_FLAG; ctrl_ctx->drop_flags &= ~EP0_FLAG; xhci_dbg(xhci, "New Input Control Context:\n"); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); xhci_dbg_ctx(xhci, virt_dev->in_ctx, LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); ret = xhci_configure_endpoint(xhci, udev, NULL, false, false); if (ret) { /* Callee should call reset_bandwidth() */ return ret; } xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); xhci_zero_in_ctx(xhci, virt_dev); /* Install new rings and free or cache any old rings */ for (i = 1; i < 31; ++i) { if (!virt_dev->eps[i].new_ring) continue; /* Only cache or free the old ring if it exists. * It may not if this is the first add of an endpoint. */ if (virt_dev->eps[i].ring) { xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); } virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; } return ret; } void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; int i, ret; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) return; xhci = hcd_to_xhci(hcd); if (!xhci->devs || !xhci->devs[udev->slot_id]) { xhci_warn(xhci, "xHCI %s called with unaddressed device\n", __func__); return; } xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); virt_dev = xhci->devs[udev->slot_id]; /* Free any rings allocated for added endpoints */ for (i = 0; i < 31; ++i) { if (virt_dev->eps[i].new_ring) { xhci_ring_free(xhci, virt_dev->eps[i].new_ring); virt_dev->eps[i].new_ring = NULL; } } xhci_zero_in_ctx(xhci, virt_dev); } static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx, u32 add_flags, u32 drop_flags) { struct xhci_input_control_ctx *ctrl_ctx; ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ctrl_ctx->add_flags = add_flags; ctrl_ctx->drop_flags = drop_flags; xhci_slot_copy(xhci, in_ctx, out_ctx); ctrl_ctx->add_flags |= SLOT_FLAG; xhci_dbg(xhci, "Input Context:\n"); xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); } void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state) { struct xhci_container_ctx *in_ctx; struct xhci_ep_ctx *ep_ctx; u32 added_ctxs; dma_addr_t addr; xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, xhci->devs[slot_id]->out_ctx, ep_index); in_ctx = xhci->devs[slot_id]->in_ctx; ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr); if (addr == 0) { xhci_warn(xhci, "WARN Cannot submit config ep after " "reset ep command\n"); xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", deq_state->new_deq_seg, deq_state->new_deq_ptr); return; } ep_ctx->deq = addr | deq_state->new_cycle_state; added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); } void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, struct usb_device *udev, unsigned int ep_index) { struct xhci_dequeue_state deq_state; struct xhci_virt_ep *ep; xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); ep = &xhci->devs[udev->slot_id]->eps[ep_index]; /* We need to move the HW's dequeue pointer past this TD, * or it will attempt to resend it on the next doorbell ring. */ xhci_find_new_dequeue_state(xhci, udev->slot_id, ep_index, ep->stopped_stream, ep->stopped_td, &deq_state); /* HW with the reset endpoint quirk will use the saved dequeue state to * issue a configure endpoint command later. */ if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { xhci_dbg(xhci, "Queueing new dequeue state\n"); xhci_queue_new_dequeue_state(xhci, udev->slot_id, ep_index, ep->stopped_stream, &deq_state); } else { /* Better hope no one uses the input context between now and the * reset endpoint completion! * XXX: No idea how this hardware will react when stream rings * are enabled. */ xhci_dbg(xhci, "Setting up input context for " "configure endpoint command\n"); xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, ep_index, &deq_state); } } /* Deal with stalled endpoints. The core should have sent the control message * to clear the halt condition. However, we need to make the xHCI hardware * reset its sequence number, since a device will expect a sequence number of * zero after the halt condition is cleared. * Context: in_interrupt */ void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; struct usb_device *udev; unsigned int ep_index; unsigned long flags; int ret; struct xhci_virt_ep *virt_ep; xhci = hcd_to_xhci(hcd); udev = (struct usb_device *) ep->hcpriv; /* Called with a root hub endpoint (or an endpoint that wasn't added * with xhci_add_endpoint() */ if (!ep->hcpriv) return; ep_index = xhci_get_endpoint_index(&ep->desc); virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; if (!virt_ep->stopped_td) { xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", ep->desc.bEndpointAddress); return; } if (usb_endpoint_xfer_control(&ep->desc)) { xhci_dbg(xhci, "Control endpoint stall already handled.\n"); return; } xhci_dbg(xhci, "Queueing reset endpoint command\n"); spin_lock_irqsave(&xhci->lock, flags); ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); /* * Can't change the ring dequeue pointer until it's transitioned to the * stopped state, which is only upon a successful reset endpoint * command. Better hope that last command worked! */ if (!ret) { xhci_cleanup_stalled_ring(xhci, udev, ep_index); kfree(virt_ep->stopped_td); xhci_ring_cmd_db(xhci); } virt_ep->stopped_td = NULL; virt_ep->stopped_trb = NULL; virt_ep->stopped_stream = 0; spin_unlock_irqrestore(&xhci->lock, flags); if (ret) xhci_warn(xhci, "FIXME allocate a new ring segment\n"); } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint *ep, unsigned int slot_id) { int ret; unsigned int ep_index; unsigned int ep_state; if (!ep) return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); if (ret <= 0) return -EINVAL; if (ep->ss_ep_comp.bmAttributes == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", ep->desc.bEndpointAddress); return -EINVAL; } ep_index = xhci_get_endpoint_index(&ep->desc); ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; if (ep_state & EP_HAS_STREAMS || ep_state & EP_GETTING_STREAMS) { xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " "already has streams set up.\n", ep->desc.bEndpointAddress); xhci_warn(xhci, "Send email to xHCI maintainer and ask for " "dynamic stream context array reallocation.\n"); return -EINVAL; } if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " "endpoint 0x%x; URBs are pending.\n", ep->desc.bEndpointAddress); return -EINVAL; } return 0; } static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, unsigned int *num_streams, unsigned int *num_stream_ctxs) { unsigned int max_streams; /* The stream context array size must be a power of two */ *num_stream_ctxs = roundup_pow_of_two(*num_streams); /* * Find out how many primary stream array entries the host controller * supports. Later we may use secondary stream arrays (similar to 2nd * level page entries), but that's an optional feature for xHCI host * controllers. xHCs must support at least 4 stream IDs. */ max_streams = HCC_MAX_PSA(xhci->hcc_params); if (*num_stream_ctxs > max_streams) { xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", max_streams); *num_stream_ctxs = max_streams; *num_streams = max_streams; } } /* Returns an error code if one of the endpoint already has streams. * This does not change any data structures, it only checks and gathers * information. */ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int *num_streams, u32 *changed_ep_bitmask) { unsigned int max_streams; unsigned int endpoint_flag; int i; int ret; for (i = 0; i < num_eps; i++) { ret = xhci_check_streams_endpoint(xhci, udev, eps[i], udev->slot_id); if (ret < 0) return ret; max_streams = USB_SS_MAX_STREAMS( eps[i]->ss_ep_comp.bmAttributes); if (max_streams < (*num_streams - 1)) { xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", eps[i]->desc.bEndpointAddress, max_streams); *num_streams = max_streams+1; } endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); if (*changed_ep_bitmask & endpoint_flag) return -EINVAL; *changed_ep_bitmask |= endpoint_flag; } return 0; } static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps) { u32 changed_ep_bitmask = 0; unsigned int slot_id; unsigned int ep_index; unsigned int ep_state; int i; slot_id = udev->slot_id; if (!xhci->devs[slot_id]) return 0; for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; /* Are streams already being freed for the endpoint? */ if (ep_state & EP_GETTING_NO_STREAMS) { xhci_warn(xhci, "WARN Can't disable streams for " "endpoint 0x%x\n, " "streams are being disabled already.", eps[i]->desc.bEndpointAddress); return 0; } /* Are there actually any streams to free? */ if (!(ep_state & EP_HAS_STREAMS) && !(ep_state & EP_GETTING_STREAMS)) { xhci_warn(xhci, "WARN Can't disable streams for " "endpoint 0x%x\n, " "streams are already disabled!", eps[i]->desc.bEndpointAddress); xhci_warn(xhci, "WARN xhci_free_streams() called " "with non-streams endpoint\n"); return 0; } changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); } return changed_ep_bitmask; } /* * The USB device drivers use this function (though the HCD interface in USB * core) to prepare a set of bulk endpoints to use streams. Streams are used to * coordinate mass storage command queueing across multiple endpoints (basically * a stream ID == a task ID). * * Setting up streams involves allocating the same size stream context array * for each endpoint and issuing a configure endpoint command for all endpoints. * * Don't allow the call to succeed if one endpoint only supports one stream * (which means it doesn't support streams at all). * * Drivers may get less stream IDs than they asked for, if the host controller * hardware or endpoints claim they can't support the number of requested * stream IDs. */ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags) { int i, ret; struct xhci_hcd *xhci; struct xhci_virt_device *vdev; struct xhci_command *config_cmd; unsigned int ep_index; unsigned int num_stream_ctxs; unsigned long flags; u32 changed_ep_bitmask = 0; if (!eps) return -EINVAL; /* Add one to the number of streams requested to account for * stream 0 that is reserved for xHCI usage. */ num_streams += 1; xhci = hcd_to_xhci(hcd); xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", num_streams); config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); if (!config_cmd) { xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); return -ENOMEM; } /* Check to make sure all endpoints are not already configured for * streams. While we're at it, find the maximum number of streams that * all the endpoints will support and check for duplicate endpoints. */ spin_lock_irqsave(&xhci->lock, flags); ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, num_eps, &num_streams, &changed_ep_bitmask); if (ret < 0) { xhci_free_command(xhci, config_cmd); spin_unlock_irqrestore(&xhci->lock, flags); return ret; } if (num_streams <= 1) { xhci_warn(xhci, "WARN: endpoints can't handle " "more than one stream.\n"); xhci_free_command(xhci, config_cmd); spin_unlock_irqrestore(&xhci->lock, flags); return -EINVAL; } vdev = xhci->devs[udev->slot_id]; /* Mark each endpoint as being in transistion, so * xhci_urb_enqueue() will reject all URBs. */ for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; } spin_unlock_irqrestore(&xhci->lock, flags); /* Setup internal data structures and allocate HW data structures for * streams (but don't install the HW structures in the input context * until we're sure all memory allocation succeeded). */ xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", num_stream_ctxs, num_streams); for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, num_stream_ctxs, num_streams, mem_flags); if (!vdev->eps[ep_index].stream_info) goto cleanup; /* Set maxPstreams in endpoint context and update deq ptr to * point to stream context array. FIXME */ } /* Set up the input context for a configure endpoint command. */ for (i = 0; i < num_eps; i++) { struct xhci_ep_ctx *ep_ctx; ep_index = xhci_get_endpoint_index(&eps[i]->desc); ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); xhci_endpoint_copy(xhci, config_cmd->in_ctx, vdev->out_ctx, ep_index); xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, vdev->eps[ep_index].stream_info); } /* Tell the HW to drop its old copy of the endpoint context info * and add the updated copy from the input context. */ xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); /* Issue and wait for the configure endpoint command */ ret = xhci_configure_endpoint(xhci, udev, config_cmd, false, false); /* xHC rejected the configure endpoint command for some reason, so we * leave the old ring intact and free our internal streams data * structure. */ if (ret < 0) goto cleanup; spin_lock_irqsave(&xhci->lock, flags); for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", udev->slot_id, ep_index); vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; } xhci_free_command(xhci, config_cmd); spin_unlock_irqrestore(&xhci->lock, flags); /* Subtract 1 for stream 0, which drivers can't use */ return num_streams - 1; cleanup: /* If it didn't work, free the streams! */ for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); vdev->eps[ep_index].stream_info = NULL; /* FIXME Unset maxPstreams in endpoint context and * update deq ptr to point to normal string ring. */ vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; xhci_endpoint_zero(xhci, vdev, eps[i]); } xhci_free_command(xhci, config_cmd); return -ENOMEM; } /* Transition the endpoint from using streams to being a "normal" endpoint * without streams. * * Modify the endpoint context state, submit a configure endpoint command, * and free all endpoint rings for streams if that completes successfully. */ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags) { int i, ret; struct xhci_hcd *xhci; struct xhci_virt_device *vdev; struct xhci_command *command; unsigned int ep_index; unsigned long flags; u32 changed_ep_bitmask; xhci = hcd_to_xhci(hcd); vdev = xhci->devs[udev->slot_id]; /* Set up a configure endpoint command to remove the streams rings */ spin_lock_irqsave(&xhci->lock, flags); changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, udev, eps, num_eps); if (changed_ep_bitmask == 0) { spin_unlock_irqrestore(&xhci->lock, flags); return -EINVAL; } /* Use the xhci_command structure from the first endpoint. We may have * allocated too many, but the driver may call xhci_free_streams() for * each endpoint it grouped into one call to xhci_alloc_streams(). */ ep_index = xhci_get_endpoint_index(&eps[0]->desc); command = vdev->eps[ep_index].stream_info->free_streams_command; for (i = 0; i < num_eps; i++) { struct xhci_ep_ctx *ep_ctx; ep_index = xhci_get_endpoint_index(&eps[i]->desc); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= EP_GETTING_NO_STREAMS; xhci_endpoint_copy(xhci, command->in_ctx, vdev->out_ctx, ep_index); xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, &vdev->eps[ep_index]); } xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); spin_unlock_irqrestore(&xhci->lock, flags); /* Issue and wait for the configure endpoint command, * which must succeed. */ ret = xhci_configure_endpoint(xhci, udev, command, false, true); /* xHC rejected the configure endpoint command for some reason, so we * leave the streams rings intact. */ if (ret < 0) return ret; spin_lock_irqsave(&xhci->lock, flags); for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); vdev->eps[ep_index].stream_info = NULL; /* FIXME Unset maxPstreams in endpoint context and * update deq ptr to point to normal string ring. */ vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; } spin_unlock_irqrestore(&xhci->lock, flags); return 0; } /* * This submits a Reset Device Command, which will set the device state to 0, * set the device address to 0, and disable all the endpoints except the default * control endpoint. The USB core should come back and call * xhci_address_device(), and then re-set up the configuration. If this is * called because of a usb_reset_and_verify_device(), then the old alternate * settings will be re-installed through the normal bandwidth allocation * functions. * * Wait for the Reset Device command to finish. Remove all structures * associated with the endpoints that were disabled. Clear the input device * structure? Cache the rings? Reset the control endpoint 0 max packet size? */ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) { int ret, i; unsigned long flags; struct xhci_hcd *xhci; unsigned int slot_id; struct xhci_virt_device *virt_dev; struct xhci_command *reset_device_cmd; int timeleft; int last_freed_endpoint; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) return ret; xhci = hcd_to_xhci(hcd); slot_id = udev->slot_id; virt_dev = xhci->devs[slot_id]; if (!virt_dev) { xhci_dbg(xhci, "%s called with invalid slot ID %u\n", __func__, slot_id); return -EINVAL; } xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); /* Allocate the command structure that holds the struct completion. * Assume we're in process context, since the normal device reset * process has to wait for the device anyway. Storage devices are * reset as part of error handling, so use GFP_NOIO instead of * GFP_KERNEL. */ reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); if (!reset_device_cmd) { xhci_dbg(xhci, "Couldn't allocate command structure.\n"); return -ENOMEM; } /* Attempt to submit the Reset Device command to the command ring */ spin_lock_irqsave(&xhci->lock, flags); reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); ret = xhci_queue_reset_device(xhci, slot_id); if (ret) { xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); list_del(&reset_device_cmd->cmd_list); spin_unlock_irqrestore(&xhci->lock, flags); goto command_cleanup; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); /* Wait for the Reset Device command to finish */ timeleft = wait_for_completion_interruptible_timeout( reset_device_cmd->completion, USB_CTRL_SET_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for reset device command\n", timeleft == 0 ? "Timeout" : "Signal"); spin_lock_irqsave(&xhci->lock, flags); /* The timeout might have raced with the event ring handler, so * only delete from the list if the item isn't poisoned. */ if (reset_device_cmd->cmd_list.next != LIST_POISON1) list_del(&reset_device_cmd->cmd_list); spin_unlock_irqrestore(&xhci->lock, flags); ret = -ETIME; goto command_cleanup; } /* The Reset Device command can't fail, according to the 0.95/0.96 spec, * unless we tried to reset a slot ID that wasn't enabled, * or the device wasn't in the addressed or configured state. */ ret = reset_device_cmd->status; switch (ret) { case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ case COMP_CTX_STATE: /* 0.96 completion code for same thing */ xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", slot_id, xhci_get_slot_state(xhci, virt_dev->out_ctx)); xhci_info(xhci, "Not freeing device rings.\n"); /* Don't treat this as an error. May change my mind later. */ ret = 0; goto command_cleanup; case COMP_SUCCESS: xhci_dbg(xhci, "Successful reset device command.\n"); break; default: if (xhci_is_vendor_info_code(xhci, ret)) break; xhci_warn(xhci, "Unknown completion code %u for " "reset device command.\n", ret); ret = -EINVAL; goto command_cleanup; } /* Everything but endpoint 0 is disabled, so free or cache the rings. */ last_freed_endpoint = 1; for (i = 1; i < 31; ++i) { if (!virt_dev->eps[i].ring) continue; xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); last_freed_endpoint = i; } xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); ret = 0; command_cleanup: xhci_free_command(xhci, reset_device_cmd); return ret; } /* * At this point, the struct usb_device is about to go away, the device has * disconnected, and all traffic has been stopped and the endpoints have been * disabled. Free any HC data structures associated with that device. */ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *virt_dev; unsigned long flags; u32 state; int i; if (udev->slot_id == 0) return; virt_dev = xhci->devs[udev->slot_id]; if (!virt_dev) return; /* Stop any wayward timer functions (which may grab the lock) */ for (i = 0; i < 31; ++i) { virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = xhci_readl(xhci, &xhci->op_regs->status); if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { xhci_free_virt_device(xhci, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags); return; } if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); return; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); /* * Event command completion handler will free any data structures * associated with the slot. XXX Can free sleep? */ } /* * Returns 0 if the xHC ran out of device slots, the Enable Slot command * timed out, or allocating memory failed. Returns 1 on success. */ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); unsigned long flags; int timeleft; int ret; spin_lock_irqsave(&xhci->lock, flags); ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); return 0; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); /* XXX: how much time for xHC slot assignment? */ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, USB_CTRL_SET_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for a slot\n", timeleft == 0 ? "Timeout" : "Signal"); /* FIXME cancel the enable slot request */ return 0; } if (!xhci->slot_id) { xhci_err(xhci, "Error while assigning device slot ID\n"); return 0; } /* xhci_alloc_virt_device() does not touch rings; no need to lock */ if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { /* Disable slot, if we can do it without mem alloc */ xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); spin_lock_irqsave(&xhci->lock, flags); if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); return 0; } udev->slot_id = xhci->slot_id; /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; } /* * Issue an Address Device command (which will issue a SetAddress request to * the device). * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so * we should only issue and wait on one address command at the same time. * * We add one to the device address issued by the hardware because the USB core * uses address 1 for the root hubs (even though they're not really devices). */ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) { unsigned long flags; int timeleft; struct xhci_virt_device *virt_dev; int ret = 0; struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u64 temp_64; if (!udev->slot_id) { xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); return -EINVAL; } virt_dev = xhci->devs[udev->slot_id]; /* If this is a Set Address to an unconfigured device, setup ep 0 */ if (!udev->config) xhci_setup_addressable_virt_dev(xhci, udev); else xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); /* Otherwise, assume the core has the device configured how it wants */ xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); spin_lock_irqsave(&xhci->lock, flags); ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, udev->slot_id); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); return ret; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, USB_CTRL_SET_TIMEOUT); /* FIXME: From section 4.3.4: "Software shall be responsible for timing * the SetAddress() "recovery interval" required by USB and aborting the * command on a timeout. */ if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for a slot\n", timeleft == 0 ? "Timeout" : "Signal"); /* FIXME cancel the address device command */ return -ETIME; } switch (virt_dev->cmd_status) { case COMP_CTX_STATE: case COMP_EBADSLT: xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", udev->slot_id); ret = -EINVAL; break; case COMP_TX_ERR: dev_warn(&udev->dev, "Device not responding to set address.\n"); ret = -EPROTO; break; case COMP_SUCCESS: xhci_dbg(xhci, "Successful Address Device command\n"); break; default: xhci_err(xhci, "ERROR: unexpected command completion " "code 0x%x.\n", virt_dev->cmd_status); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); ret = -EINVAL; break; } if (ret) { return ret; } temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", udev->slot_id, &xhci->dcbaa->dev_context_ptrs[udev->slot_id], (unsigned long long) xhci->dcbaa->dev_context_ptrs[udev->slot_id]); xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", (unsigned long long)virt_dev->out_ctx->dma); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); /* * USB core uses address 1 for the roothubs, so we add one to the * address given back to us by the HC. */ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; /* Zero the input context control for later use */ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0; xhci_dbg(xhci, "Device address = %d\n", udev->devnum); /* XXX Meh, not sure if anyone else but choose_address uses this. */ set_bit(udev->devnum, udev->bus->devmap.devicemap); return 0; } /* Once a hub descriptor is fetched for a device, we need to update the xHC's * internal data structures for the device. */ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *vdev; struct xhci_command *config_cmd; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_slot_ctx *slot_ctx; unsigned long flags; unsigned think_time; int ret; /* Ignore root hubs */ if (!hdev->parent) return 0; vdev = xhci->devs[hdev->slot_id]; if (!vdev) { xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); return -EINVAL; } config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); if (!config_cmd) { xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); return -ENOMEM; } spin_lock_irqsave(&xhci->lock, flags); xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); ctrl_ctx->add_flags |= SLOT_FLAG; slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); slot_ctx->dev_info |= DEV_HUB; if (tt->multi) slot_ctx->dev_info |= DEV_MTT; if (xhci->hci_version > 0x95) { xhci_dbg(xhci, "xHCI version %x needs hub " "TT think time and number of ports\n", (unsigned int) xhci->hci_version); slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); /* Set TT think time - convert from ns to FS bit times. * 0 = 8 FS bit times, 1 = 16 FS bit times, * 2 = 24 FS bit times, 3 = 32 FS bit times. */ think_time = tt->think_time; if (think_time != 0) think_time = (think_time / 666) - 1; slot_ctx->tt_info |= TT_THINK_TIME(think_time); } else { xhci_dbg(xhci, "xHCI version %x doesn't need hub " "TT think time or number of ports\n", (unsigned int) xhci->hci_version); } slot_ctx->dev_state = 0; spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "Set up %s for hub device.\n", (xhci->hci_version > 0x95) ? "configure endpoint" : "evaluate context"); xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); /* Issue and wait for the configure endpoint or * evaluate context command. */ if (xhci->hci_version > 0x95) ret = xhci_configure_endpoint(xhci, hdev, config_cmd, false, false); else ret = xhci_configure_endpoint(xhci, hdev, config_cmd, true, false); xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); xhci_dbg_ctx(xhci, vdev->out_ctx, 0); xhci_free_command(xhci, config_cmd); return ret; } int xhci_get_frame(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); /* EHCI mods by the periodic size. Why? */ return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; } MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL"); static int __init xhci_hcd_init(void) { #ifdef CONFIG_PCI int retval = 0; retval = xhci_register_pci(); if (retval < 0) { printk(KERN_DEBUG "Problem registering PCI driver."); return retval; } #endif /* * Check the compiler generated sizes of structures that must be laid * out in specific ways for hardware access. */ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); /* xhci_device_control has eight fields, and also * embeds one xhci_slot_ctx and 31 xhci_ep_ctx */ BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); return 0; } module_init(xhci_hcd_init); static void __exit xhci_hcd_cleanup(void) { #ifdef CONFIG_PCI xhci_unregister_pci(); #endif } module_exit(xhci_hcd_cleanup);
gpl-2.0
WarheadsSE/OX820-2.6-linux
kernel/kgdb.c
405
39461
/* * KGDB stub. * * Maintainer: Jason Wessel <jason.wessel@windriver.com> * * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002-2004 Timesys Corporation * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> * Copyright (C) 2004 Pavel Machek <pavel@suse.cz> * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2005-2008 Wind River Systems, Inc. * Copyright (C) 2007 MontaVista Software, Inc. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Contributors at various stages not listed above: * Jason Wessel ( jason.wessel@windriver.com ) * George Anzinger <george@mvista.com> * Anurekh Saxena (anurekh.saxena@timesys.com) * Lake Stevens Instrument Division (Glenn Engel) * Jim Kingdon, Cygnus Support. * * Original KGDB stub: David Grothe <dave@gcom.com>, * Tigran Aivazian <tigran@sco.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/pid_namespace.h> #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/console.h> #include <linux/threads.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/sysrq.h> #include <linux/init.h> #include <linux/kgdb.h> #include <linux/pid.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/byteorder.h> #include <asm/atomic.h> #include <asm/system.h> #include <asm/unaligned.h> static int kgdb_break_asap; #define KGDB_MAX_THREAD_QUERY 17 struct kgdb_state { int ex_vector; int signo; int err_code; int cpu; int pass_exception; unsigned long thr_query; unsigned long threadid; long kgdb_usethreadid; struct pt_regs *linux_regs; }; static struct debuggerinfo_struct { void *debuggerinfo; struct task_struct *task; } kgdb_info[NR_CPUS]; /** * kgdb_connected - Is a host GDB connected to us? */ int kgdb_connected; EXPORT_SYMBOL_GPL(kgdb_connected); /* All the KGDB handlers are installed */ static int kgdb_io_module_registered; /* Guard for recursive entry */ static int exception_level; static struct kgdb_io *kgdb_io_ops; static DEFINE_SPINLOCK(kgdb_registration_lock); /* kgdb console driver is loaded */ static int kgdb_con_registered; /* determine if kgdb console output should be used */ static int kgdb_use_con; static int __init opt_kgdb_con(char *str) { kgdb_use_con = 1; return 0; } early_param("kgdbcon", opt_kgdb_con); module_param(kgdb_use_con, int, 0644); /* * Holds information about breakpoints in a kernel. These breakpoints are * added and removed by gdb. */ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } }; /* * The CPU# of the active CPU, or -1 if none: */ atomic_t kgdb_active = ATOMIC_INIT(-1); /* * We use NR_CPUs not PERCPU, in case kgdb is used to debug early * bootup code (which might not have percpu set up yet): */ static atomic_t passive_cpu_wait[NR_CPUS]; static atomic_t cpu_in_kgdb[NR_CPUS]; atomic_t kgdb_setting_breakpoint; struct task_struct *kgdb_usethread; struct task_struct *kgdb_contthread; int kgdb_single_step; /* Our I/O buffers. */ static char remcom_in_buffer[BUFMAX]; static char remcom_out_buffer[BUFMAX]; /* Storage for the registers, in GDB format. */ static unsigned long gdb_regs[(NUMREGBYTES + sizeof(unsigned long) - 1) / sizeof(unsigned long)]; /* to keep track of the CPU which is doing the single stepping*/ atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); /* * If you are debugging a problem where roundup (the collection of * all other CPUs) is a problem [this should be extremely rare], * then use the nokgdbroundup option to avoid roundup. In that case * the other CPUs might interfere with your debugging context, so * use this with care: */ static int kgdb_do_roundup = 1; static int __init opt_nokgdbroundup(char *str) { kgdb_do_roundup = 0; return 0; } early_param("nokgdbroundup", opt_nokgdbroundup); /* * Finally, some KGDB code :-) */ /* * Weak aliases for breakpoint management, * can be overriden by architectures when needed: */ int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) { int err; err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); if (err) return err; return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); } int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) { return probe_kernel_write((char *)addr, (char *)bundle, BREAK_INSTR_SIZE); } int __weak kgdb_validate_break_address(unsigned long addr) { char tmp_variable[BREAK_INSTR_SIZE]; int err; /* Validate setting the breakpoint and then removing it. In the * remove fails, the kernel needs to emit a bad message because we * are deep trouble not being able to put things back the way we * found them. */ err = kgdb_arch_set_breakpoint(addr, tmp_variable); if (err) return err; err = kgdb_arch_remove_breakpoint(addr, tmp_variable); if (err) printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " "memory destroyed at: %lx", addr); return err; } unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) { return instruction_pointer(regs); } int __weak kgdb_arch_init(void) { return 0; } int __weak kgdb_skipexception(int exception, struct pt_regs *regs) { return 0; } void __weak kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code) { return; } /** * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. * @regs: Current &struct pt_regs. * * This function will be called if the particular architecture must * disable hardware debugging while it is processing gdb packets or * handling exception. */ void __weak kgdb_disable_hw_debug(struct pt_regs *regs) { } /* * GDB remote protocol parser: */ static int hex(char ch) { if ((ch >= 'a') && (ch <= 'f')) return ch - 'a' + 10; if ((ch >= '0') && (ch <= '9')) return ch - '0'; if ((ch >= 'A') && (ch <= 'F')) return ch - 'A' + 10; return -1; } /* scan for the sequence $<data>#<checksum> */ static void get_packet(char *buffer) { unsigned char checksum; unsigned char xmitcsum; int count; char ch; do { /* * Spin and wait around for the start character, ignore all * other characters: */ while ((ch = (kgdb_io_ops->read_char())) != '$') /* nothing */; kgdb_connected = 1; checksum = 0; xmitcsum = -1; count = 0; /* * now, read until a # or end of buffer is found: */ while (count < (BUFMAX - 1)) { ch = kgdb_io_ops->read_char(); if (ch == '#') break; checksum = checksum + ch; buffer[count] = ch; count = count + 1; } buffer[count] = 0; if (ch == '#') { xmitcsum = hex(kgdb_io_ops->read_char()) << 4; xmitcsum += hex(kgdb_io_ops->read_char()); if (checksum != xmitcsum) /* failed checksum */ kgdb_io_ops->write_char('-'); else /* successful transfer */ kgdb_io_ops->write_char('+'); if (kgdb_io_ops->flush) kgdb_io_ops->flush(); } } while (checksum != xmitcsum); } /* * Send the packet in buffer. * Check for gdb connection if asked for. */ static void put_packet(char *buffer) { unsigned char checksum; int count; char ch; /* * $<packet info>#<checksum>. */ while (1) { kgdb_io_ops->write_char('$'); checksum = 0; count = 0; while ((ch = buffer[count])) { kgdb_io_ops->write_char(ch); checksum += ch; count++; } kgdb_io_ops->write_char('#'); kgdb_io_ops->write_char(hex_asc_hi(checksum)); kgdb_io_ops->write_char(hex_asc_lo(checksum)); if (kgdb_io_ops->flush) kgdb_io_ops->flush(); /* Now see what we get in reply. */ ch = kgdb_io_ops->read_char(); if (ch == 3) ch = kgdb_io_ops->read_char(); /* If we get an ACK, we are done. */ if (ch == '+') return; /* * If we get the start of another packet, this means * that GDB is attempting to reconnect. We will NAK * the packet being sent, and stop trying to send this * packet. */ if (ch == '$') { kgdb_io_ops->write_char('-'); if (kgdb_io_ops->flush) kgdb_io_ops->flush(); return; } } } /* * Convert the memory pointed to by mem into hex, placing result in buf. * Return a pointer to the last char put in buf (null). May return an error. */ int kgdb_mem2hex(char *mem, char *buf, int count) { char *tmp; int err; /* * We use the upper half of buf as an intermediate buffer for the * raw memory copy. Hex conversion will work against this one. */ tmp = buf + count; err = probe_kernel_read(tmp, mem, count); if (!err) { while (count > 0) { buf = pack_hex_byte(buf, *tmp); tmp++; count--; } *buf = 0; } return err; } /* * Copy the binary array pointed to by buf into mem. Fix $, #, and * 0x7d escaped with 0x7d. Return a pointer to the character after * the last byte written. */ static int kgdb_ebin2mem(char *buf, char *mem, int count) { int err = 0; char c; while (count-- > 0) { c = *buf++; if (c == 0x7d) c = *buf++ ^ 0x20; err = probe_kernel_write(mem, &c, 1); if (err) break; mem++; } return err; } /* * Convert the hex array pointed to by buf into binary to be placed in mem. * Return a pointer to the character AFTER the last byte written. * May return an error. */ int kgdb_hex2mem(char *buf, char *mem, int count) { char *tmp_raw; char *tmp_hex; /* * We use the upper half of buf as an intermediate buffer for the * raw memory that is converted from hex. */ tmp_raw = buf + count * 2; tmp_hex = tmp_raw - 1; while (tmp_hex >= buf) { tmp_raw--; *tmp_raw = hex(*tmp_hex--); *tmp_raw |= hex(*tmp_hex--) << 4; } return probe_kernel_write(mem, tmp_raw, count); } /* * While we find nice hex chars, build a long_val. * Return number of chars processed. */ int kgdb_hex2long(char **ptr, unsigned long *long_val) { int hex_val; int num = 0; int negate = 0; *long_val = 0; if (**ptr == '-') { negate = 1; (*ptr)++; } while (**ptr) { hex_val = hex(**ptr); if (hex_val < 0) break; *long_val = (*long_val << 4) | hex_val; num++; (*ptr)++; } if (negate) *long_val = -*long_val; return num; } /* Write memory due to an 'M' or 'X' packet. */ static int write_mem_msg(int binary) { char *ptr = &remcom_in_buffer[1]; unsigned long addr; unsigned long length; int err; if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' && kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') { if (binary) err = kgdb_ebin2mem(ptr, (char *)addr, length); else err = kgdb_hex2mem(ptr, (char *)addr, length); if (err) return err; if (CACHE_FLUSH_IS_SAFE) flush_icache_range(addr, addr + length); return 0; } return -EINVAL; } static void error_packet(char *pkt, int error) { error = -error; pkt[0] = 'E'; pkt[1] = hex_asc[(error / 10)]; pkt[2] = hex_asc[(error % 10)]; pkt[3] = '\0'; } /* * Thread ID accessors. We represent a flat TID space to GDB, where * the per CPU idle threads (which under Linux all have PID 0) are * remapped to negative TIDs. */ #define BUF_THREAD_ID_SIZE 16 static char *pack_threadid(char *pkt, unsigned char *id) { char *limit; limit = pkt + BUF_THREAD_ID_SIZE; while (pkt < limit) pkt = pack_hex_byte(pkt, *id++); return pkt; } static void int_to_threadref(unsigned char *id, int value) { unsigned char *scan; int i = 4; scan = (unsigned char *)id; while (i--) *scan++ = 0; put_unaligned_be32(value, scan); } static struct task_struct *getthread(struct pt_regs *regs, int tid) { /* * Non-positive TIDs are remapped to the cpu shadow information */ if (tid == 0 || tid == -1) tid = -atomic_read(&kgdb_active) - 2; if (tid < 0) { if (kgdb_info[-tid - 2].task) return kgdb_info[-tid - 2].task; else return idle_task(-tid - 2); } /* * find_task_by_pid_ns() does not take the tasklist lock anymore * but is nicely RCU locked - hence is a pretty resilient * thing to use: */ return find_task_by_pid_ns(tid, &init_pid_ns); } /* * CPU debug state control: */ #ifdef CONFIG_SMP static void kgdb_wait(struct pt_regs *regs) { unsigned long flags; int cpu; local_irq_save(flags); cpu = raw_smp_processor_id(); kgdb_info[cpu].debuggerinfo = regs; kgdb_info[cpu].task = current; /* * Make sure the above info reaches the primary CPU before * our cpu_in_kgdb[] flag setting does: */ smp_wmb(); atomic_set(&cpu_in_kgdb[cpu], 1); /* Wait till primary CPU is done with debugging */ while (atomic_read(&passive_cpu_wait[cpu])) cpu_relax(); kgdb_info[cpu].debuggerinfo = NULL; kgdb_info[cpu].task = NULL; /* fix up hardware debug registers on local cpu */ if (arch_kgdb_ops.correct_hw_break) arch_kgdb_ops.correct_hw_break(); /* Signal the primary CPU that we are done: */ atomic_set(&cpu_in_kgdb[cpu], 0); touch_softlockup_watchdog(); clocksource_touch_watchdog(); local_irq_restore(flags); } #endif /* * Some architectures need cache flushes when we set/clear a * breakpoint: */ static void kgdb_flush_swbreak_addr(unsigned long addr) { if (!CACHE_FLUSH_IS_SAFE) return; if (current->mm && current->mm->mmap_cache) { flush_cache_range(current->mm->mmap_cache, addr, addr + BREAK_INSTR_SIZE); } /* Force flush instruction cache if it was outside the mm */ flush_icache_range(addr, addr + BREAK_INSTR_SIZE); } /* * SW breakpoint management: */ static int kgdb_activate_sw_breakpoints(void) { unsigned long addr; int error = 0; int i; for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if (kgdb_break[i].state != BP_SET) continue; addr = kgdb_break[i].bpt_addr; error = kgdb_arch_set_breakpoint(addr, kgdb_break[i].saved_instr); if (error) return error; kgdb_flush_swbreak_addr(addr); kgdb_break[i].state = BP_ACTIVE; } return 0; } static int kgdb_set_sw_break(unsigned long addr) { int err = kgdb_validate_break_address(addr); int breakno = -1; int i; if (err) return err; for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if ((kgdb_break[i].state == BP_SET) && (kgdb_break[i].bpt_addr == addr)) return -EEXIST; } for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if (kgdb_break[i].state == BP_REMOVED && kgdb_break[i].bpt_addr == addr) { breakno = i; break; } } if (breakno == -1) { for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if (kgdb_break[i].state == BP_UNDEFINED) { breakno = i; break; } } } if (breakno == -1) return -E2BIG; kgdb_break[breakno].state = BP_SET; kgdb_break[breakno].type = BP_BREAKPOINT; kgdb_break[breakno].bpt_addr = addr; return 0; } static int kgdb_deactivate_sw_breakpoints(void) { unsigned long addr; int error = 0; int i; for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if (kgdb_break[i].state != BP_ACTIVE) continue; addr = kgdb_break[i].bpt_addr; error = kgdb_arch_remove_breakpoint(addr, kgdb_break[i].saved_instr); if (error) return error; kgdb_flush_swbreak_addr(addr); kgdb_break[i].state = BP_SET; } return 0; } static int kgdb_remove_sw_break(unsigned long addr) { int i; for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if ((kgdb_break[i].state == BP_SET) && (kgdb_break[i].bpt_addr == addr)) { kgdb_break[i].state = BP_REMOVED; return 0; } } return -ENOENT; } int kgdb_isremovedbreak(unsigned long addr) { int i; for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if ((kgdb_break[i].state == BP_REMOVED) && (kgdb_break[i].bpt_addr == addr)) return 1; } return 0; } static int remove_all_break(void) { unsigned long addr; int error; int i; /* Clear memory breakpoints. */ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { if (kgdb_break[i].state != BP_ACTIVE) goto setundefined; addr = kgdb_break[i].bpt_addr; error = kgdb_arch_remove_breakpoint(addr, kgdb_break[i].saved_instr); if (error) printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", addr); setundefined: kgdb_break[i].state = BP_UNDEFINED; } /* Clear hardware breakpoints. */ if (arch_kgdb_ops.remove_all_hw_break) arch_kgdb_ops.remove_all_hw_break(); return 0; } /* * Remap normal tasks to their real PID, * CPU shadow threads are mapped to -CPU - 2 */ static inline int shadow_pid(int realpid) { if (realpid) return realpid; return -raw_smp_processor_id() - 2; } static char gdbmsgbuf[BUFMAX + 1]; static void kgdb_msg_write(const char *s, int len) { char *bufptr; int wcount; int i; /* 'O'utput */ gdbmsgbuf[0] = 'O'; /* Fill and send buffers... */ while (len > 0) { bufptr = gdbmsgbuf + 1; /* Calculate how many this time */ if ((len << 1) > (BUFMAX - 2)) wcount = (BUFMAX - 2) >> 1; else wcount = len; /* Pack in hex chars */ for (i = 0; i < wcount; i++) bufptr = pack_hex_byte(bufptr, s[i]); *bufptr = '\0'; /* Move up */ s += wcount; len -= wcount; /* Write packet */ put_packet(gdbmsgbuf); } } /* * Return true if there is a valid kgdb I/O module. Also if no * debugger is attached a message can be printed to the console about * waiting for the debugger to attach. * * The print_wait argument is only to be true when called from inside * the core kgdb_handle_exception, because it will wait for the * debugger to attach. */ static int kgdb_io_ready(int print_wait) { if (!kgdb_io_ops) return 0; if (kgdb_connected) return 1; if (atomic_read(&kgdb_setting_breakpoint)) return 1; if (print_wait) printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); return 1; } /* * All the functions that start with gdb_cmd are the various * operations to implement the handlers for the gdbserial protocol * where KGDB is communicating with an external debugger */ /* Handle the '?' status packets */ static void gdb_cmd_status(struct kgdb_state *ks) { /* * We know that this packet is only sent * during initial connect. So to be safe, * we clear out our breakpoints now in case * GDB is reconnecting. */ remove_all_break(); remcom_out_buffer[0] = 'S'; pack_hex_byte(&remcom_out_buffer[1], ks->signo); } /* Handle the 'g' get registers request */ static void gdb_cmd_getregs(struct kgdb_state *ks) { struct task_struct *thread; void *local_debuggerinfo; int i; thread = kgdb_usethread; if (!thread) { thread = kgdb_info[ks->cpu].task; local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; } else { local_debuggerinfo = NULL; for_each_online_cpu(i) { /* * Try to find the task on some other * or possibly this node if we do not * find the matching task then we try * to approximate the results. */ if (thread == kgdb_info[i].task) local_debuggerinfo = kgdb_info[i].debuggerinfo; } } /* * All threads that don't have debuggerinfo should be * in __schedule() sleeping, since all other CPUs * are in kgdb_wait, and thus have debuggerinfo. */ if (local_debuggerinfo) { pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo); } else { /* * Pull stuff saved during switch_to; nothing * else is accessible (or even particularly * relevant). * * This should be enough for a stack trace. */ sleeping_thread_to_gdb_regs(gdb_regs, thread); } kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES); } /* Handle the 'G' set registers request */ static void gdb_cmd_setregs(struct kgdb_state *ks) { kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES); if (kgdb_usethread && kgdb_usethread != current) { error_packet(remcom_out_buffer, -EINVAL); } else { gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs); strcpy(remcom_out_buffer, "OK"); } } /* Handle the 'm' memory read bytes */ static void gdb_cmd_memread(struct kgdb_state *ks) { char *ptr = &remcom_in_buffer[1]; unsigned long length; unsigned long addr; int err; if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' && kgdb_hex2long(&ptr, &length) > 0) { err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length); if (err) error_packet(remcom_out_buffer, err); } else { error_packet(remcom_out_buffer, -EINVAL); } } /* Handle the 'M' memory write bytes */ static void gdb_cmd_memwrite(struct kgdb_state *ks) { int err = write_mem_msg(0); if (err) error_packet(remcom_out_buffer, err); else strcpy(remcom_out_buffer, "OK"); } /* Handle the 'X' memory binary write bytes */ static void gdb_cmd_binwrite(struct kgdb_state *ks) { int err = write_mem_msg(1); if (err) error_packet(remcom_out_buffer, err); else strcpy(remcom_out_buffer, "OK"); } /* Handle the 'D' or 'k', detach or kill packets */ static void gdb_cmd_detachkill(struct kgdb_state *ks) { int error; /* The detach case */ if (remcom_in_buffer[0] == 'D') { error = remove_all_break(); if (error < 0) { error_packet(remcom_out_buffer, error); } else { strcpy(remcom_out_buffer, "OK"); kgdb_connected = 0; } put_packet(remcom_out_buffer); } else { /* * Assume the kill case, with no exit code checking, * trying to force detach the debugger: */ remove_all_break(); kgdb_connected = 0; } } /* Handle the 'R' reboot packets */ static int gdb_cmd_reboot(struct kgdb_state *ks) { /* For now, only honor R0 */ if (strcmp(remcom_in_buffer, "R0") == 0) { printk(KERN_CRIT "Executing emergency reboot\n"); strcpy(remcom_out_buffer, "OK"); put_packet(remcom_out_buffer); /* * Execution should not return from * machine_emergency_restart() */ machine_emergency_restart(); kgdb_connected = 0; return 1; } return 0; } /* Handle the 'q' query packets */ static void gdb_cmd_query(struct kgdb_state *ks) { struct task_struct *g; struct task_struct *p; unsigned char thref[8]; char *ptr; int i; int cpu; int finished = 0; switch (remcom_in_buffer[1]) { case 's': case 'f': if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) { error_packet(remcom_out_buffer, -EINVAL); break; } i = 0; remcom_out_buffer[0] = 'm'; ptr = remcom_out_buffer + 1; if (remcom_in_buffer[1] == 'f') { /* Each cpu is a shadow thread */ for_each_online_cpu(cpu) { ks->thr_query = 0; int_to_threadref(thref, -cpu - 2); pack_threadid(ptr, thref); ptr += BUF_THREAD_ID_SIZE; *(ptr++) = ','; i++; } } do_each_thread(g, p) { if (i >= ks->thr_query && !finished) { int_to_threadref(thref, p->pid); pack_threadid(ptr, thref); ptr += BUF_THREAD_ID_SIZE; *(ptr++) = ','; ks->thr_query++; if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0) finished = 1; } i++; } while_each_thread(g, p); *(--ptr) = '\0'; break; case 'C': /* Current thread id */ strcpy(remcom_out_buffer, "QC"); ks->threadid = shadow_pid(current->pid); int_to_threadref(thref, ks->threadid); pack_threadid(remcom_out_buffer + 2, thref); break; case 'T': if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) { error_packet(remcom_out_buffer, -EINVAL); break; } ks->threadid = 0; ptr = remcom_in_buffer + 17; kgdb_hex2long(&ptr, &ks->threadid); if (!getthread(ks->linux_regs, ks->threadid)) { error_packet(remcom_out_buffer, -EINVAL); break; } if ((int)ks->threadid > 0) { kgdb_mem2hex(getthread(ks->linux_regs, ks->threadid)->comm, remcom_out_buffer, 16); } else { static char tmpstr[23 + BUF_THREAD_ID_SIZE]; sprintf(tmpstr, "shadowCPU%d", (int)(-ks->threadid - 2)); kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); } break; } } /* Handle the 'H' task query packets */ static void gdb_cmd_task(struct kgdb_state *ks) { struct task_struct *thread; char *ptr; switch (remcom_in_buffer[1]) { case 'g': ptr = &remcom_in_buffer[2]; kgdb_hex2long(&ptr, &ks->threadid); thread = getthread(ks->linux_regs, ks->threadid); if (!thread && ks->threadid > 0) { error_packet(remcom_out_buffer, -EINVAL); break; } kgdb_usethread = thread; ks->kgdb_usethreadid = ks->threadid; strcpy(remcom_out_buffer, "OK"); break; case 'c': ptr = &remcom_in_buffer[2]; kgdb_hex2long(&ptr, &ks->threadid); if (!ks->threadid) { kgdb_contthread = NULL; } else { thread = getthread(ks->linux_regs, ks->threadid); if (!thread && ks->threadid > 0) { error_packet(remcom_out_buffer, -EINVAL); break; } kgdb_contthread = thread; } strcpy(remcom_out_buffer, "OK"); break; } } /* Handle the 'T' thread query packets */ static void gdb_cmd_thread(struct kgdb_state *ks) { char *ptr = &remcom_in_buffer[1]; struct task_struct *thread; kgdb_hex2long(&ptr, &ks->threadid); thread = getthread(ks->linux_regs, ks->threadid); if (thread) strcpy(remcom_out_buffer, "OK"); else error_packet(remcom_out_buffer, -EINVAL); } /* Handle the 'z' or 'Z' breakpoint remove or set packets */ static void gdb_cmd_break(struct kgdb_state *ks) { /* * Since GDB-5.3, it's been drafted that '0' is a software * breakpoint, '1' is a hardware breakpoint, so let's do that. */ char *bpt_type = &remcom_in_buffer[1]; char *ptr = &remcom_in_buffer[2]; unsigned long addr; unsigned long length; int error = 0; if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') { /* Unsupported */ if (*bpt_type > '4') return; } else { if (*bpt_type != '0' && *bpt_type != '1') /* Unsupported. */ return; } /* * Test if this is a hardware breakpoint, and * if we support it: */ if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)) /* Unsupported. */ return; if (*(ptr++) != ',') { error_packet(remcom_out_buffer, -EINVAL); return; } if (!kgdb_hex2long(&ptr, &addr)) { error_packet(remcom_out_buffer, -EINVAL); return; } if (*(ptr++) != ',' || !kgdb_hex2long(&ptr, &length)) { error_packet(remcom_out_buffer, -EINVAL); return; } if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0') error = kgdb_set_sw_break(addr); else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0') error = kgdb_remove_sw_break(addr); else if (remcom_in_buffer[0] == 'Z') error = arch_kgdb_ops.set_hw_breakpoint(addr, (int)length, *bpt_type - '0'); else if (remcom_in_buffer[0] == 'z') error = arch_kgdb_ops.remove_hw_breakpoint(addr, (int) length, *bpt_type - '0'); if (error == 0) strcpy(remcom_out_buffer, "OK"); else error_packet(remcom_out_buffer, error); } /* Handle the 'C' signal / exception passing packets */ static int gdb_cmd_exception_pass(struct kgdb_state *ks) { /* C09 == pass exception * C15 == detach kgdb, pass exception */ if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') { ks->pass_exception = 1; remcom_in_buffer[0] = 'c'; } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') { ks->pass_exception = 1; remcom_in_buffer[0] = 'D'; remove_all_break(); kgdb_connected = 0; return 1; } else { error_packet(remcom_out_buffer, -EINVAL); return 0; } /* Indicate fall through */ return -1; } /* * This function performs all gdbserial command procesing */ static int gdb_serial_stub(struct kgdb_state *ks) { int error = 0; int tmp; /* Clear the out buffer. */ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); if (kgdb_connected) { unsigned char thref[8]; char *ptr; /* Reply to host that an exception has occurred */ ptr = remcom_out_buffer; *ptr++ = 'T'; ptr = pack_hex_byte(ptr, ks->signo); ptr += strlen(strcpy(ptr, "thread:")); int_to_threadref(thref, shadow_pid(current->pid)); ptr = pack_threadid(ptr, thref); *ptr++ = ';'; put_packet(remcom_out_buffer); } kgdb_usethread = kgdb_info[ks->cpu].task; ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); ks->pass_exception = 0; while (1) { error = 0; /* Clear the out buffer. */ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); get_packet(remcom_in_buffer); switch (remcom_in_buffer[0]) { case '?': /* gdbserial status */ gdb_cmd_status(ks); break; case 'g': /* return the value of the CPU registers */ gdb_cmd_getregs(ks); break; case 'G': /* set the value of the CPU registers - return OK */ gdb_cmd_setregs(ks); break; case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ gdb_cmd_memread(ks); break; case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */ gdb_cmd_memwrite(ks); break; case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */ gdb_cmd_binwrite(ks); break; /* kill or detach. KGDB should treat this like a * continue. */ case 'D': /* Debugger detach */ case 'k': /* Debugger detach via kill */ gdb_cmd_detachkill(ks); goto default_handle; case 'R': /* Reboot */ if (gdb_cmd_reboot(ks)) goto default_handle; break; case 'q': /* query command */ gdb_cmd_query(ks); break; case 'H': /* task related */ gdb_cmd_task(ks); break; case 'T': /* Query thread status */ gdb_cmd_thread(ks); break; case 'z': /* Break point remove */ case 'Z': /* Break point set */ gdb_cmd_break(ks); break; case 'C': /* Exception passing */ tmp = gdb_cmd_exception_pass(ks); if (tmp > 0) goto default_handle; if (tmp == 0) break; /* Fall through on tmp < 0 */ case 'c': /* Continue packet */ case 's': /* Single step packet */ if (kgdb_contthread && kgdb_contthread != current) { /* Can't switch threads in kgdb */ error_packet(remcom_out_buffer, -EINVAL); break; } kgdb_activate_sw_breakpoints(); /* Fall through to default processing */ default: default_handle: error = kgdb_arch_handle_exception(ks->ex_vector, ks->signo, ks->err_code, remcom_in_buffer, remcom_out_buffer, ks->linux_regs); /* * Leave cmd processing on error, detach, * kill, continue, or single step. */ if (error >= 0 || remcom_in_buffer[0] == 'D' || remcom_in_buffer[0] == 'k') { error = 0; goto kgdb_exit; } } /* reply to the request */ put_packet(remcom_out_buffer); } kgdb_exit: if (ks->pass_exception) error = 1; return error; } static int kgdb_reenter_check(struct kgdb_state *ks) { unsigned long addr; if (atomic_read(&kgdb_active) != raw_smp_processor_id()) return 0; /* Panic on recursive debugger calls: */ exception_level++; addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); kgdb_deactivate_sw_breakpoints(); /* * If the break point removed ok at the place exception * occurred, try to recover and print a warning to the end * user because the user planted a breakpoint in a place that * KGDB needs in order to function. */ if (kgdb_remove_sw_break(addr) == 0) { exception_level = 0; kgdb_skipexception(ks->ex_vector, ks->linux_regs); kgdb_activate_sw_breakpoints(); printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", addr); WARN_ON_ONCE(1); return 1; } remove_all_break(); kgdb_skipexception(ks->ex_vector, ks->linux_regs); if (exception_level > 1) { dump_stack(); panic("Recursive entry to debugger"); } printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); dump_stack(); panic("Recursive entry to debugger"); return 1; } /* * kgdb_handle_exception() - main entry point from a kernel exception * * Locking hierarchy: * interface locks, if any (begin_session) * kgdb lock (kgdb_active) */ int kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) { struct kgdb_state kgdb_var; struct kgdb_state *ks = &kgdb_var; unsigned long flags; int error = 0; int i, cpu; ks->cpu = raw_smp_processor_id(); ks->ex_vector = evector; ks->signo = signo; ks->ex_vector = evector; ks->err_code = ecode; ks->kgdb_usethreadid = 0; ks->linux_regs = regs; if (kgdb_reenter_check(ks)) return 0; /* Ouch, double exception ! */ acquirelock: /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. */ local_irq_save(flags); cpu = raw_smp_processor_id(); /* * Acquire the kgdb_active lock: */ while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) cpu_relax(); /* * Do not start the debugger connection on this CPU if the last * instance of the exception handler wanted to come into the * debugger on a different CPU via a single step */ if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && atomic_read(&kgdb_cpu_doing_single_step) != cpu) { atomic_set(&kgdb_active, -1); touch_softlockup_watchdog(); clocksource_touch_watchdog(); local_irq_restore(flags); goto acquirelock; } if (!kgdb_io_ready(1)) { error = 1; goto kgdb_restore; /* No I/O connection, so resume the system */ } /* * Don't enter if we have hit a removed breakpoint. */ if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) goto kgdb_restore; /* Call the I/O driver's pre_exception routine */ if (kgdb_io_ops->pre_exception) kgdb_io_ops->pre_exception(); kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs; kgdb_info[ks->cpu].task = current; kgdb_disable_hw_debug(ks->linux_regs); /* * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active */ if (!kgdb_single_step) { for (i = 0; i < NR_CPUS; i++) atomic_set(&passive_cpu_wait[i], 1); } /* * spin_lock code is good enough as a barrier so we don't * need one here: */ atomic_set(&cpu_in_kgdb[ks->cpu], 1); #ifdef CONFIG_SMP /* Signal the other CPUs to enter kgdb_wait() */ if ((!kgdb_single_step) && kgdb_do_roundup) kgdb_roundup_cpus(flags); #endif /* * Wait for the other CPUs to be notified and be waiting for us: */ for_each_online_cpu(i) { while (!atomic_read(&cpu_in_kgdb[i])) cpu_relax(); } /* * At this point the primary processor is completely * in the debugger and all secondary CPUs are quiescent */ kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); kgdb_deactivate_sw_breakpoints(); kgdb_single_step = 0; kgdb_contthread = current; exception_level = 0; /* Talk to debugger with gdbserial protocol */ error = gdb_serial_stub(ks); /* Call the I/O driver's post_exception routine */ if (kgdb_io_ops->post_exception) kgdb_io_ops->post_exception(); kgdb_info[ks->cpu].debuggerinfo = NULL; kgdb_info[ks->cpu].task = NULL; atomic_set(&cpu_in_kgdb[ks->cpu], 0); if (!kgdb_single_step) { for (i = NR_CPUS-1; i >= 0; i--) atomic_set(&passive_cpu_wait[i], 0); /* * Wait till all the CPUs have quit * from the debugger. */ for_each_online_cpu(i) { while (atomic_read(&cpu_in_kgdb[i])) cpu_relax(); } } kgdb_restore: /* Free kgdb_active */ atomic_set(&kgdb_active, -1); touch_softlockup_watchdog(); clocksource_touch_watchdog(); local_irq_restore(flags); return error; } int kgdb_nmicallback(int cpu, void *regs) { #ifdef CONFIG_SMP if (!atomic_read(&cpu_in_kgdb[cpu]) && atomic_read(&kgdb_active) != cpu && atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { kgdb_wait((struct pt_regs *)regs); return 0; } #endif return 1; } static void kgdb_console_write(struct console *co, const char *s, unsigned count) { unsigned long flags; /* If we're debugging, or KGDB has not connected, don't try * and print. */ if (!kgdb_connected || atomic_read(&kgdb_active) != -1) return; local_irq_save(flags); kgdb_msg_write(s, count); local_irq_restore(flags); } static struct console kgdbcons = { .name = "kgdb", .write = kgdb_console_write, .flags = CON_PRINTBUFFER | CON_ENABLED, .index = -1, }; #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handle_gdb(int key, struct tty_struct *tty) { if (!kgdb_io_ops) { printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); return; } if (!kgdb_connected) printk(KERN_CRIT "Entering KGDB\n"); kgdb_breakpoint(); } static struct sysrq_key_op sysrq_gdb_op = { .handler = sysrq_handle_gdb, .help_msg = "debug(G)", .action_msg = "DEBUG", }; #endif static void kgdb_register_callbacks(void) { if (!kgdb_io_module_registered) { kgdb_io_module_registered = 1; kgdb_arch_init(); #ifdef CONFIG_MAGIC_SYSRQ register_sysrq_key('g', &sysrq_gdb_op); #endif if (kgdb_use_con && !kgdb_con_registered) { register_console(&kgdbcons); kgdb_con_registered = 1; } } } static void kgdb_unregister_callbacks(void) { /* * When this routine is called KGDB should unregister from the * panic handler and clean up, making sure it is not handling any * break exceptions at the time. */ if (kgdb_io_module_registered) { kgdb_io_module_registered = 0; kgdb_arch_exit(); #ifdef CONFIG_MAGIC_SYSRQ unregister_sysrq_key('g', &sysrq_gdb_op); #endif if (kgdb_con_registered) { unregister_console(&kgdbcons); kgdb_con_registered = 0; } } } static void kgdb_initial_breakpoint(void) { kgdb_break_asap = 0; printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); kgdb_breakpoint(); } /** * kgdb_register_io_module - register KGDB IO module * @new_kgdb_io_ops: the io ops vector * * Register it with the KGDB core. */ int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops) { int err; spin_lock(&kgdb_registration_lock); if (kgdb_io_ops) { spin_unlock(&kgdb_registration_lock); printk(KERN_ERR "kgdb: Another I/O driver is already " "registered with KGDB.\n"); return -EBUSY; } if (new_kgdb_io_ops->init) { err = new_kgdb_io_ops->init(); if (err) { spin_unlock(&kgdb_registration_lock); return err; } } kgdb_io_ops = new_kgdb_io_ops; spin_unlock(&kgdb_registration_lock); printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", new_kgdb_io_ops->name); /* Arm KGDB now. */ kgdb_register_callbacks(); if (kgdb_break_asap) kgdb_initial_breakpoint(); return 0; } EXPORT_SYMBOL_GPL(kgdb_register_io_module); /** * kkgdb_unregister_io_module - unregister KGDB IO module * @old_kgdb_io_ops: the io ops vector * * Unregister it with the KGDB core. */ void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops) { BUG_ON(kgdb_connected); /* * KGDB is no longer able to communicate out, so * unregister our callbacks and reset state. */ kgdb_unregister_callbacks(); spin_lock(&kgdb_registration_lock); WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops); kgdb_io_ops = NULL; spin_unlock(&kgdb_registration_lock); printk(KERN_INFO "kgdb: Unregistered I/O driver %s, debugger disabled.\n", old_kgdb_io_ops->name); } EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); /** * kgdb_breakpoint - generate breakpoint exception * * This function will generate a breakpoint exception. It is used at the * beginning of a program to sync up with a debugger and can be used * otherwise as a quick means to stop program execution and "break" into * the debugger. */ void kgdb_breakpoint(void) { atomic_set(&kgdb_setting_breakpoint, 1); wmb(); /* Sync point before breakpoint */ arch_kgdb_breakpoint(); wmb(); /* Sync point after breakpoint */ atomic_set(&kgdb_setting_breakpoint, 0); } EXPORT_SYMBOL_GPL(kgdb_breakpoint); static int __init opt_kgdb_wait(char *str) { kgdb_break_asap = 1; if (kgdb_io_module_registered) kgdb_initial_breakpoint(); return 0; } early_param("kgdbwait", opt_kgdb_wait);
gpl-2.0
YoungjaeLee/linux-4.3-cxlbdev
drivers/hwtracing/coresight/coresight-funnel.c
661
7031
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/coresight.h> #include <linux/amba/bus.h> #include <linux/clk.h> #include "coresight-priv.h" #define FUNNEL_FUNCTL 0x000 #define FUNNEL_PRICTL 0x004 #define FUNNEL_HOLDTIME_MASK 0xf00 #define FUNNEL_HOLDTIME_SHFT 0x8 #define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT) /** * struct funnel_drvdata - specifics associated to a funnel component * @base: memory mapped base address for this component. * @dev: the device entity associated to this component. * @atclk: optional clock for the core parts of the funnel. * @csdev: component vitals needed by the framework. * @priority: port selection order. */ struct funnel_drvdata { void __iomem *base; struct device *dev; struct clk *atclk; struct coresight_device *csdev; unsigned long priority; }; static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port) { u32 functl; CS_UNLOCK(drvdata->base); functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL); functl &= ~FUNNEL_HOLDTIME_MASK; functl |= FUNNEL_HOLDTIME; functl |= (1 << port); writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL); writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL); CS_LOCK(drvdata->base); } static int funnel_enable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); pm_runtime_get_sync(drvdata->dev); funnel_enable_hw(drvdata, inport); dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); return 0; } static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport) { u32 functl; CS_UNLOCK(drvdata->base); functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL); functl &= ~(1 << inport); writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL); CS_LOCK(drvdata->base); } static void funnel_disable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); funnel_disable_hw(drvdata, inport); pm_runtime_put(drvdata->dev); dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); } static const struct coresight_ops_link funnel_link_ops = { .enable = funnel_enable, .disable = funnel_disable, }; static const struct coresight_ops funnel_cs_ops = { .link_ops = &funnel_link_ops, }; static ssize_t priority_show(struct device *dev, struct device_attribute *attr, char *buf) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val = drvdata->priority; return sprintf(buf, "%#lx\n", val); } static ssize_t priority_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; unsigned long val; struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); ret = kstrtoul(buf, 16, &val); if (ret) return ret; drvdata->priority = val; return size; } static DEVICE_ATTR_RW(priority); static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata) { u32 functl; CS_UNLOCK(drvdata->base); functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL); CS_LOCK(drvdata->base); return functl; } static ssize_t funnel_ctrl_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 val; struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); pm_runtime_get_sync(drvdata->dev); val = get_funnel_ctrl_hw(drvdata); pm_runtime_put(drvdata->dev); return sprintf(buf, "%#x\n", val); } static DEVICE_ATTR_RO(funnel_ctrl); static struct attribute *coresight_funnel_attrs[] = { &dev_attr_funnel_ctrl.attr, &dev_attr_priority.attr, NULL, }; ATTRIBUTE_GROUPS(coresight_funnel); static int funnel_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct funnel_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc *desc; struct device_node *np = adev->dev.of_node; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &adev->dev; drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; pm_runtime_put(&adev->dev); desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; desc->type = CORESIGHT_DEV_TYPE_LINK; desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG; desc->ops = &funnel_cs_ops; desc->pdata = pdata; desc->dev = dev; desc->groups = coresight_funnel_groups; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); dev_info(dev, "FUNNEL initialized\n"); return 0; } static int funnel_remove(struct amba_device *adev) { struct funnel_drvdata *drvdata = amba_get_drvdata(adev); coresight_unregister(drvdata->csdev); return 0; } #ifdef CONFIG_PM static int funnel_runtime_suspend(struct device *dev) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev); if (drvdata && !IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); return 0; } static int funnel_runtime_resume(struct device *dev) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev); if (drvdata && !IS_ERR(drvdata->atclk)) clk_prepare_enable(drvdata->atclk); return 0; } #endif static const struct dev_pm_ops funnel_dev_pm_ops = { SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL) }; static struct amba_id funnel_ids[] = { { .id = 0x0003b908, .mask = 0x0003ffff, }, { 0, 0}, }; static struct amba_driver funnel_driver = { .drv = { .name = "coresight-funnel", .owner = THIS_MODULE, .pm = &funnel_dev_pm_ops, }, .probe = funnel_probe, .remove = funnel_remove, .id_table = funnel_ids, }; module_amba_driver(funnel_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CoreSight Funnel driver");
gpl-2.0
Sublime-Development/kernel_flounder
drivers/net/wireless/ath/ath9k/mac.c
661
25923
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "hw-ops.h" #include <linux/export.h> static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, struct ath9k_tx_queue_info *qi) { ath_dbg(ath9k_hw_common(ah), INTERRUPT, "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", ah->txok_interrupt_mask, ah->txerr_interrupt_mask, ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, ah->txurn_interrupt_mask); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_IMR_S0, SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); REG_WRITE(ah, AR_IMR_S1, SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); REGWRITE_BUFFER_FLUSH(ah); } u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) { return REG_READ(ah, AR_QTXDP(q)); } EXPORT_SYMBOL(ath9k_hw_gettxbuf); void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) { REG_WRITE(ah, AR_QTXDP(q), txdp); } EXPORT_SYMBOL(ath9k_hw_puttxbuf); void ath9k_hw_txstart(struct ath_hw *ah, u32 q) { ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q); REG_WRITE(ah, AR_Q_TXE, 1 << q); } EXPORT_SYMBOL(ath9k_hw_txstart); u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) { u32 npend; npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; if (npend == 0) { if (REG_READ(ah, AR_Q_TXE) & (1 << q)) npend = 1; } return npend; } EXPORT_SYMBOL(ath9k_hw_numtxpending); /** * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level * * @ah: atheros hardware struct * @bIncTrigLevel: whether or not the frame trigger level should be updated * * The frame trigger level specifies the minimum number of bytes, * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO * before the PCU will initiate sending the frame on the air. This can * mean we initiate transmit before a full frame is on the PCU TX FIFO. * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs * first) * * Caution must be taken to ensure to set the frame trigger level based * on the DMA request size. For example if the DMA request size is set to * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because * there need to be enough space in the tx FIFO for the requested transfer * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set * the threshold to a value beyond 6, then the transmit will hang. * * Current dual stream devices have a PCU TX FIFO size of 8 KB. * Current single stream devices have a PCU TX FIFO size of 4 KB, however, * there is a hardware issue which forces us to use 2 KB instead so the * frame trigger level must not exceed 2 KB for these chipsets. */ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) { u32 txcfg, curLevel, newLevel; if (ah->tx_trig_level >= ah->config.max_txtrig_level) return false; ath9k_hw_disable_interrupts(ah); txcfg = REG_READ(ah, AR_TXCFG); curLevel = MS(txcfg, AR_FTRIG); newLevel = curLevel; if (bIncTrigLevel) { if (curLevel < ah->config.max_txtrig_level) newLevel++; } else if (curLevel > MIN_TX_FIFO_THRESHOLD) newLevel--; if (newLevel != curLevel) REG_WRITE(ah, AR_TXCFG, (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); ath9k_hw_enable_interrupts(ah); ah->tx_trig_level = newLevel; return newLevel != curLevel; } EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); void ath9k_hw_abort_tx_dma(struct ath_hw *ah) { int maxdelay = 1000; int i, q; if (ah->curchan) { if (IS_CHAN_HALF_RATE(ah->curchan)) maxdelay *= 2; else if (IS_CHAN_QUARTER_RATE(ah->curchan)) maxdelay *= 4; } REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); for (q = 0; q < AR_NUM_QCU; q++) { for (i = 0; i < maxdelay; i++) { if (i) udelay(5); if (!ath9k_hw_numtxpending(ah, q)) break; } } REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); REG_WRITE(ah, AR_Q_TXD, 0); } EXPORT_SYMBOL(ath9k_hw_abort_tx_dma); bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q) { #define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */ #define ATH9K_TIME_QUANTUM 100 /* usec */ int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; int wait; REG_WRITE(ah, AR_Q_TXD, 1 << q); for (wait = wait_time; wait != 0; wait--) { if (wait != wait_time) udelay(ATH9K_TIME_QUANTUM); if (ath9k_hw_numtxpending(ah, q) == 0) break; } REG_WRITE(ah, AR_Q_TXD, 0); return wait != 0; #undef ATH9K_TX_STOP_DMA_TIMEOUT #undef ATH9K_TIME_QUANTUM } EXPORT_SYMBOL(ath9k_hw_stop_dma_queue); bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, const struct ath9k_tx_queue_info *qinfo) { u32 cw; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_tx_queue_info *qi; qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { ath_dbg(common, QUEUE, "Set TXQ properties, inactive queue: %u\n", q); return false; } ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q); qi->tqi_ver = qinfo->tqi_ver; qi->tqi_subtype = qinfo->tqi_subtype; qi->tqi_qflags = qinfo->tqi_qflags; qi->tqi_priority = qinfo->tqi_priority; if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); else qi->tqi_aifs = INIT_AIFS; if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { cw = min(qinfo->tqi_cwmin, 1024U); qi->tqi_cwmin = 1; while (qi->tqi_cwmin < cw) qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; } else qi->tqi_cwmin = qinfo->tqi_cwmin; if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { cw = min(qinfo->tqi_cwmax, 1024U); qi->tqi_cwmax = 1; while (qi->tqi_cwmax < cw) qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; } else qi->tqi_cwmax = INIT_CWMAX; if (qinfo->tqi_shretry != 0) qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); else qi->tqi_shretry = INIT_SH_RETRY; if (qinfo->tqi_lgretry != 0) qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); else qi->tqi_lgretry = INIT_LG_RETRY; qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; qi->tqi_burstTime = qinfo->tqi_burstTime; qi->tqi_readyTime = qinfo->tqi_readyTime; switch (qinfo->tqi_subtype) { case ATH9K_WME_UPSD: if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; break; default: break; } return true; } EXPORT_SYMBOL(ath9k_hw_set_txq_props); bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, struct ath9k_tx_queue_info *qinfo) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_tx_queue_info *qi; qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { ath_dbg(common, QUEUE, "Get TXQ properties, inactive queue: %u\n", q); return false; } qinfo->tqi_qflags = qi->tqi_qflags; qinfo->tqi_ver = qi->tqi_ver; qinfo->tqi_subtype = qi->tqi_subtype; qinfo->tqi_qflags = qi->tqi_qflags; qinfo->tqi_priority = qi->tqi_priority; qinfo->tqi_aifs = qi->tqi_aifs; qinfo->tqi_cwmin = qi->tqi_cwmin; qinfo->tqi_cwmax = qi->tqi_cwmax; qinfo->tqi_shretry = qi->tqi_shretry; qinfo->tqi_lgretry = qi->tqi_lgretry; qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; qinfo->tqi_burstTime = qi->tqi_burstTime; qinfo->tqi_readyTime = qi->tqi_readyTime; return true; } EXPORT_SYMBOL(ath9k_hw_get_txq_props); int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, const struct ath9k_tx_queue_info *qinfo) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_tx_queue_info *qi; int q; switch (type) { case ATH9K_TX_QUEUE_BEACON: q = ATH9K_NUM_TX_QUEUES - 1; break; case ATH9K_TX_QUEUE_CAB: q = ATH9K_NUM_TX_QUEUES - 2; break; case ATH9K_TX_QUEUE_PSPOLL: q = 1; break; case ATH9K_TX_QUEUE_UAPSD: q = ATH9K_NUM_TX_QUEUES - 3; break; case ATH9K_TX_QUEUE_DATA: q = qinfo->tqi_subtype; break; default: ath_err(common, "Invalid TX queue type: %u\n", type); return -1; } ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q); qi = &ah->txq[q]; if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { ath_err(common, "TX queue: %u already active\n", q); return -1; } memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); qi->tqi_type = type; qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; (void) ath9k_hw_set_txq_props(ah, q, qinfo); return q; } EXPORT_SYMBOL(ath9k_hw_setuptxqueue); static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q) { ah->txok_interrupt_mask &= ~(1 << q); ah->txerr_interrupt_mask &= ~(1 << q); ah->txdesc_interrupt_mask &= ~(1 << q); ah->txeol_interrupt_mask &= ~(1 << q); ah->txurn_interrupt_mask &= ~(1 << q); } bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_tx_queue_info *qi; qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q); return false; } ath_dbg(common, QUEUE, "Release TX queue: %u\n", q); qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; ath9k_hw_clear_queue_interrupts(ah, q); ath9k_hw_set_txq_interrupts(ah, qi); return true; } EXPORT_SYMBOL(ath9k_hw_releasetxqueue); bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; struct ath9k_tx_queue_info *qi; u32 cwMin, chanCwMin, value; qi = &ah->txq[q]; if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q); return true; } ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q); if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { if (chan && IS_CHAN_B(chan)) chanCwMin = INIT_CWMIN_11B; else chanCwMin = INIT_CWMIN; for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); } else cwMin = qi->tqi_cwmin; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN) | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); REG_WRITE(ah, AR_DRETRY_LIMIT(q), SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah)) REG_WRITE(ah, AR_DMISC(q), AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); else REG_WRITE(ah, AR_DMISC(q), AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); if (qi->tqi_cbrPeriod) { REG_WRITE(ah, AR_QCBRCFG(q), SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR | (qi->tqi_cbrOverflowLimit ? AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); } if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { REG_WRITE(ah, AR_QRDYTIMECFG(q), SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | AR_Q_RDYTIMECFG_EN); } REG_WRITE(ah, AR_DCHNTIME(q), SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); if (qi->tqi_burstTime && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY); if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); REGWRITE_BUFFER_FLUSH(ah); if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN); switch (qi->tqi_type) { case ATH9K_TX_QUEUE_BEACON: ENABLE_REGWRITE_BUFFER(ah); REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_DBA_GATED | AR_Q_MISC_BEACON_USE | AR_Q_MISC_CBR_INCR_DIS1); REG_SET_BIT(ah, AR_DMISC(q), (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << AR_D_MISC_ARB_LOCKOUT_CNTRL_S) | AR_D_MISC_BEACON_USE | AR_D_MISC_POST_FR_BKOFF_DIS); REGWRITE_BUFFER_FLUSH(ah); /* * cwmin and cwmax should be 0 for beacon queue * but not for IBSS as we would create an imbalance * on beaconing fairness for participating nodes. */ if (AR_SREV_9300_20_OR_LATER(ah) && ah->opmode != NL80211_IFTYPE_ADHOC) { REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) | SM(0, AR_D_LCL_IFS_CWMAX) | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); } break; case ATH9K_TX_QUEUE_CAB: ENABLE_REGWRITE_BUFFER(ah); REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_DBA_GATED | AR_Q_MISC_CBR_INCR_DIS1 | AR_Q_MISC_CBR_INCR_DIS0); value = (qi->tqi_readyTime - (ah->config.sw_beacon_response_time - ah->config.dma_beacon_response_time) - ah->config.additional_swba_backoff) * 1024; REG_WRITE(ah, AR_QRDYTIMECFG(q), value | AR_Q_RDYTIMECFG_EN); REG_SET_BIT(ah, AR_DMISC(q), (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); REGWRITE_BUFFER_FLUSH(ah); break; case ATH9K_TX_QUEUE_PSPOLL: REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1); break; case ATH9K_TX_QUEUE_UAPSD: REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); break; default: break; } if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { REG_SET_BIT(ah, AR_DMISC(q), SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, AR_D_MISC_ARB_LOCKOUT_CNTRL) | AR_D_MISC_POST_FR_BKOFF_DIS); } if (AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN); ath9k_hw_clear_queue_interrupts(ah, q); if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) { ah->txok_interrupt_mask |= 1 << q; ah->txerr_interrupt_mask |= 1 << q; } if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) ah->txdesc_interrupt_mask |= 1 << q; if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) ah->txeol_interrupt_mask |= 1 << q; if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) ah->txurn_interrupt_mask |= 1 << q; ath9k_hw_set_txq_interrupts(ah, qi); return true; } EXPORT_SYMBOL(ath9k_hw_resettxqueue); int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, struct ath_rx_status *rs) { struct ar5416_desc ads; struct ar5416_desc *adsp = AR5416DESC(ds); u32 phyerr; if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) return -EINPROGRESS; ads.u.rx = adsp->u.rx; rs->rs_status = 0; rs->rs_flags = 0; rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; rs->rs_tstamp = ads.AR_RcvTimestamp; if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { rs->rs_rssi = ATH9K_RSSI_BAD; rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD; rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD; rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD; rs->rs_rssi_ext0 = ATH9K_RSSI_BAD; rs->rs_rssi_ext1 = ATH9K_RSSI_BAD; rs->rs_rssi_ext2 = ATH9K_RSSI_BAD; } else { rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00); rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01); rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02); rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10); rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11); rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12); } if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); else rs->rs_keyix = ATH9K_RXKEYIX_INVALID; rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate); rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); rs->rs_flags = (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; rs->rs_flags |= (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST; if (ads.ds_rxstatus8 & AR_DecryptBusyErr) rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { /* * Treat these errors as mutually exclusive to avoid spurious * extra error reports from the hardware. If a CRC error is * reported, then decryption and MIC errors are irrelevant, * the frame is going to be dropped either way */ if (ads.ds_rxstatus8 & AR_PHYErr) { rs->rs_status |= ATH9K_RXERR_PHY; phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); rs->rs_phyerr = phyerr; } else if (ads.ds_rxstatus8 & AR_CRCErr) rs->rs_status |= ATH9K_RXERR_CRC; else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) rs->rs_status |= ATH9K_RXERR_DECRYPT; else if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; } else { if (ads.ds_rxstatus8 & (AR_CRCErr | AR_PHYErr | AR_DecryptCRCErr | AR_MichaelErr)) rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC; /* Only up to MCS16 supported, everything above is invalid */ if (rs->rs_rate >= 0x90) rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC; } if (ads.ds_rxstatus8 & AR_KeyMiss) rs->rs_status |= ATH9K_RXERR_KEYMISS; return 0; } EXPORT_SYMBOL(ath9k_hw_rxprocdesc); /* * This can stop or re-enables RX. * * If bool is set this will kill any frame which is currently being * transferred between the MAC and baseband and also prevent any new * frames from getting started. */ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) { u32 reg; if (set) { REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0, AH_WAIT_TIMEOUT)) { REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); reg = REG_READ(ah, AR_OBS_BUS_1); ath_err(ath9k_hw_common(ah), "RX failed to go idle in 10 ms RXSM=0x%x\n", reg); return false; } } else { REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); } return true; } EXPORT_SYMBOL(ath9k_hw_setrxabort); void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) { REG_WRITE(ah, AR_RXDP, rxdp); } EXPORT_SYMBOL(ath9k_hw_putrxbuf); void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) { ath9k_enable_mib_counters(ah); ath9k_ani_reset(ah, is_scanning); REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); } EXPORT_SYMBOL(ath9k_hw_startpcureceive); void ath9k_hw_abortpcurecv(struct ath_hw *ah) { REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); ath9k_hw_disable_mib_counters(ah); } EXPORT_SYMBOL(ath9k_hw_abortpcurecv); bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) { #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ struct ath_common *common = ath9k_hw_common(ah); u32 mac_status, last_mac_status = 0; int i; /* Enable access to the DMA observation bus */ REG_WRITE(ah, AR_MACMISC, ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | (AR_MACMISC_MISC_OBS_BUS_1 << AR_MACMISC_MISC_OBS_BUS_MSB_S))); REG_WRITE(ah, AR_CR, AR_CR_RXD); /* Wait for rx enable bit to go low */ for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) break; if (!AR_SREV_9300_20_OR_LATER(ah)) { mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; if (mac_status == 0x1c0 && mac_status == last_mac_status) { *reset = true; break; } last_mac_status = mac_status; } udelay(AH_TIME_QUANTUM); } if (i == 0) { ath_err(common, "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", AH_RX_STOP_DMA_TIMEOUT / 1000, REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW), REG_READ(ah, AR_DMADBG_7)); return false; } else { return true; } #undef AH_RX_STOP_DMA_TIMEOUT } EXPORT_SYMBOL(ath9k_hw_stopdmarecv); int ath9k_hw_beaconq_setup(struct ath_hw *ah) { struct ath9k_tx_queue_info qi; memset(&qi, 0, sizeof(qi)); qi.tqi_aifs = 1; qi.tqi_cwmin = 0; qi.tqi_cwmax = 0; if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE; return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); } EXPORT_SYMBOL(ath9k_hw_beaconq_setup); bool ath9k_hw_intrpend(struct ath_hw *ah) { u32 host_isr; if (AR_SREV_9100(ah)) return true; host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); if (((host_isr & AR_INTR_MAC_IRQ) || (host_isr & AR_INTR_ASYNC_MASK_MCI)) && (host_isr != AR_INTR_SPURIOUS)) return true; host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); if ((host_isr & AR_INTR_SYNC_DEFAULT) && (host_isr != AR_INTR_SPURIOUS)) return true; return false; } EXPORT_SYMBOL(ath9k_hw_intrpend); void ath9k_hw_kill_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); ath_dbg(common, INTERRUPT, "disable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); if (!AR_SREV_9100(ah)) { REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); } } EXPORT_SYMBOL(ath9k_hw_kill_interrupts); void ath9k_hw_disable_interrupts(struct ath_hw *ah) { if (!(ah->imask & ATH9K_INT_GLOBAL)) atomic_set(&ah->intr_ref_cnt, -1); else atomic_dec(&ah->intr_ref_cnt); ath9k_hw_kill_interrupts(ah); } EXPORT_SYMBOL(ath9k_hw_disable_interrupts); void ath9k_hw_enable_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sync_default = AR_INTR_SYNC_DEFAULT; u32 async_mask; if (!(ah->imask & ATH9K_INT_GLOBAL)) return; if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", atomic_read(&ah->intr_ref_cnt)); return; } if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; async_mask = AR_INTR_MAC_IRQ; if (ah->imask & ATH9K_INT_MCI) async_mask |= AR_INTR_ASYNC_MASK_MCI; ath_dbg(common, INTERRUPT, "enable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_ENABLE); if (!AR_SREV_9100(ah)) { REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask); REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); } ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); } EXPORT_SYMBOL(ath9k_hw_enable_interrupts); void ath9k_hw_set_interrupts(struct ath_hw *ah) { enum ath9k_int ints = ah->imask; u32 mask, mask2; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); if (!(ints & ATH9K_INT_GLOBAL)) ath9k_hw_disable_interrupts(ah); ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints); mask = ints & ATH9K_INT_COMMON; mask2 = 0; if (ints & ATH9K_INT_TX) { if (ah->config.tx_intr_mitigation) mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; else { if (ah->txok_interrupt_mask) mask |= AR_IMR_TXOK; if (ah->txdesc_interrupt_mask) mask |= AR_IMR_TXDESC; } if (ah->txerr_interrupt_mask) mask |= AR_IMR_TXERR; if (ah->txeol_interrupt_mask) mask |= AR_IMR_TXEOL; } if (ints & ATH9K_INT_RX) { if (AR_SREV_9300_20_OR_LATER(ah)) { mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; if (ah->config.rx_intr_mitigation) { mask &= ~AR_IMR_RXOK_LP; mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; } else { mask |= AR_IMR_RXOK_LP; } } else { if (ah->config.rx_intr_mitigation) mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; else mask |= AR_IMR_RXOK | AR_IMR_RXDESC; } if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) mask |= AR_IMR_GENTMR; } if (ints & ATH9K_INT_GENTIMER) mask |= AR_IMR_GENTMR; if (ints & (ATH9K_INT_BMISC)) { mask |= AR_IMR_BCNMISC; if (ints & ATH9K_INT_TIM) mask2 |= AR_IMR_S2_TIM; if (ints & ATH9K_INT_DTIM) mask2 |= AR_IMR_S2_DTIM; if (ints & ATH9K_INT_DTIMSYNC) mask2 |= AR_IMR_S2_DTIMSYNC; if (ints & ATH9K_INT_CABEND) mask2 |= AR_IMR_S2_CABEND; if (ints & ATH9K_INT_TSFOOR) mask2 |= AR_IMR_S2_TSFOOR; } if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { mask |= AR_IMR_BCNMISC; if (ints & ATH9K_INT_GTT) mask2 |= AR_IMR_S2_GTT; if (ints & ATH9K_INT_CST) mask2 |= AR_IMR_S2_CST; } ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask); REG_WRITE(ah, AR_IMR, mask); ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST); ah->imrs2_reg |= mask2; REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { if (ints & ATH9K_INT_TIM_TIMER) REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); else REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); } return; } EXPORT_SYMBOL(ath9k_hw_set_interrupts);
gpl-2.0
Savaged-Zen/android_kernel_liquid_tuna
fs/affs/amigaffs.c
2965
11553
/* * linux/fs/affs/amigaffs.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Amiga FFS filesystem. * * Please send bug reports to: hjw@zvw.de */ #include "affs.h" extern struct timezone sys_tz; static char ErrorBuffer[256]; /* * Functions for accessing Amiga-FFS structures. */ /* Insert a header block bh into the directory dir * caller must hold AFFS_DIR->i_hash_lock! */ int affs_insert_hash(struct inode *dir, struct buffer_head *bh) { struct super_block *sb = dir->i_sb; struct buffer_head *dir_bh; u32 ino, hash_ino; int offset; ino = bh->b_blocknr; offset = affs_hash_name(sb, AFFS_TAIL(sb, bh)->name + 1, AFFS_TAIL(sb, bh)->name[0]); pr_debug("AFFS: insert_hash(dir=%u, ino=%d)\n", (u32)dir->i_ino, ino); dir_bh = affs_bread(sb, dir->i_ino); if (!dir_bh) return -EIO; hash_ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[offset]); while (hash_ino) { affs_brelse(dir_bh); dir_bh = affs_bread(sb, hash_ino); if (!dir_bh) return -EIO; hash_ino = be32_to_cpu(AFFS_TAIL(sb, dir_bh)->hash_chain); } AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino); AFFS_TAIL(sb, bh)->hash_chain = 0; affs_fix_checksum(sb, bh); if (dir->i_ino == dir_bh->b_blocknr) AFFS_HEAD(dir_bh)->table[offset] = cpu_to_be32(ino); else AFFS_TAIL(sb, dir_bh)->hash_chain = cpu_to_be32(ino); affs_adjust_checksum(dir_bh, ino); mark_buffer_dirty_inode(dir_bh, dir); affs_brelse(dir_bh); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; dir->i_version++; mark_inode_dirty(dir); return 0; } /* Remove a header block from its directory. * caller must hold AFFS_DIR->i_hash_lock! */ int affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh) { struct super_block *sb; struct buffer_head *bh; u32 rem_ino, hash_ino; __be32 ino; int offset, retval; sb = dir->i_sb; rem_ino = rem_bh->b_blocknr; offset = affs_hash_name(sb, AFFS_TAIL(sb, rem_bh)->name+1, AFFS_TAIL(sb, rem_bh)->name[0]); pr_debug("AFFS: remove_hash(dir=%d, ino=%d, hashval=%d)\n", (u32)dir->i_ino, rem_ino, offset); bh = affs_bread(sb, dir->i_ino); if (!bh) return -EIO; retval = -ENOENT; hash_ino = be32_to_cpu(AFFS_HEAD(bh)->table[offset]); while (hash_ino) { if (hash_ino == rem_ino) { ino = AFFS_TAIL(sb, rem_bh)->hash_chain; if (dir->i_ino == bh->b_blocknr) AFFS_HEAD(bh)->table[offset] = ino; else AFFS_TAIL(sb, bh)->hash_chain = ino; affs_adjust_checksum(bh, be32_to_cpu(ino) - hash_ino); mark_buffer_dirty_inode(bh, dir); AFFS_TAIL(sb, rem_bh)->parent = 0; retval = 0; break; } affs_brelse(bh); bh = affs_bread(sb, hash_ino); if (!bh) return -EIO; hash_ino = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain); } affs_brelse(bh); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; dir->i_version++; mark_inode_dirty(dir); return retval; } static void affs_fix_dcache(struct dentry *dentry, u32 entry_ino) { struct inode *inode = dentry->d_inode; void *data = dentry->d_fsdata; struct list_head *head, *next; spin_lock(&inode->i_lock); head = &inode->i_dentry; next = head->next; while (next != head) { dentry = list_entry(next, struct dentry, d_alias); if (entry_ino == (u32)(long)dentry->d_fsdata) { dentry->d_fsdata = data; break; } next = next->next; } spin_unlock(&inode->i_lock); } /* Remove header from link chain */ static int affs_remove_link(struct dentry *dentry) { struct inode *dir, *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct buffer_head *bh = NULL, *link_bh = NULL; u32 link_ino, ino; int retval; pr_debug("AFFS: remove_link(key=%ld)\n", inode->i_ino); retval = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto done; link_ino = (u32)(long)dentry->d_fsdata; if (inode->i_ino == link_ino) { /* we can't remove the head of the link, as its blocknr is still used as ino, * so we remove the block of the first link instead. */ link_ino = be32_to_cpu(AFFS_TAIL(sb, bh)->link_chain); link_bh = affs_bread(sb, link_ino); if (!link_bh) goto done; dir = affs_iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent)); if (IS_ERR(dir)) { retval = PTR_ERR(dir); goto done; } affs_lock_dir(dir); affs_fix_dcache(dentry, link_ino); retval = affs_remove_hash(dir, link_bh); if (retval) { affs_unlock_dir(dir); goto done; } mark_buffer_dirty_inode(link_bh, inode); memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32); retval = affs_insert_hash(dir, bh); if (retval) { affs_unlock_dir(dir); goto done; } mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); iput(dir); } else { link_bh = affs_bread(sb, link_ino); if (!link_bh) goto done; } while ((ino = be32_to_cpu(AFFS_TAIL(sb, bh)->link_chain)) != 0) { if (ino == link_ino) { __be32 ino2 = AFFS_TAIL(sb, link_bh)->link_chain; AFFS_TAIL(sb, bh)->link_chain = ino2; affs_adjust_checksum(bh, be32_to_cpu(ino2) - link_ino); mark_buffer_dirty_inode(bh, inode); retval = 0; /* Fix the link count, if bh is a normal header block without links */ switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) { case ST_LINKDIR: case ST_LINKFILE: break; default: if (!AFFS_TAIL(sb, bh)->link_chain) inode->i_nlink = 1; } affs_free_block(sb, link_ino); goto done; } affs_brelse(bh); bh = affs_bread(sb, ino); if (!bh) goto done; } retval = -ENOENT; done: affs_brelse(link_bh); affs_brelse(bh); return retval; } static int affs_empty_dir(struct inode *inode) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; int retval, size; retval = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto done; retval = -ENOTEMPTY; for (size = AFFS_SB(sb)->s_hashsize - 1; size >= 0; size--) if (AFFS_HEAD(bh)->table[size]) goto not_empty; retval = 0; not_empty: affs_brelse(bh); done: return retval; } /* Remove a filesystem object. If the object to be removed has * links to it, one of the links must be changed to inherit * the file or directory. As above, any inode will do. * The buffer will not be freed. If the header is a link, the * block will be marked as free. * This function returns a negative error number in case of * an error, else 0 if the inode is to be deleted or 1 if not. */ int affs_remove_header(struct dentry *dentry) { struct super_block *sb; struct inode *inode, *dir; struct buffer_head *bh = NULL; int retval; dir = dentry->d_parent->d_inode; sb = dir->i_sb; retval = -ENOENT; inode = dentry->d_inode; if (!inode) goto done; pr_debug("AFFS: remove_header(key=%ld)\n", inode->i_ino); retval = -EIO; bh = affs_bread(sb, (u32)(long)dentry->d_fsdata); if (!bh) goto done; affs_lock_link(inode); affs_lock_dir(dir); switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) { case ST_USERDIR: /* if we ever want to support links to dirs * i_hash_lock of the inode must only be * taken after some checks */ affs_lock_dir(inode); retval = affs_empty_dir(inode); affs_unlock_dir(inode); if (retval) goto done_unlock; break; default: break; } retval = affs_remove_hash(dir, bh); if (retval) goto done_unlock; mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); if (inode->i_nlink > 1) retval = affs_remove_link(dentry); else inode->i_nlink = 0; affs_unlock_link(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); done: affs_brelse(bh); return retval; done_unlock: affs_unlock_dir(dir); affs_unlock_link(inode); goto done; } /* Checksum a block, do various consistency checks and optionally return the blocks type number. DATA points to the block. If their pointers are non-null, *PTYPE and *STYPE are set to the primary and secondary block types respectively, *HASHSIZE is set to the size of the hashtable (which lets us calculate the block size). Returns non-zero if the block is not consistent. */ u32 affs_checksum_block(struct super_block *sb, struct buffer_head *bh) { __be32 *ptr = (__be32 *)bh->b_data; u32 sum; int bsize; sum = 0; for (bsize = sb->s_blocksize / sizeof(__be32); bsize > 0; bsize--) sum += be32_to_cpu(*ptr++); return sum; } /* * Calculate the checksum of a disk block and store it * at the indicated position. */ void affs_fix_checksum(struct super_block *sb, struct buffer_head *bh) { int cnt = sb->s_blocksize / sizeof(__be32); __be32 *ptr = (__be32 *)bh->b_data; u32 checksum; __be32 *checksumptr; checksumptr = ptr + 5; *checksumptr = 0; for (checksum = 0; cnt > 0; ptr++, cnt--) checksum += be32_to_cpu(*ptr); *checksumptr = cpu_to_be32(-checksum); } void secs_to_datestamp(time_t secs, struct affs_date *ds) { u32 days; u32 minute; secs -= sys_tz.tz_minuteswest * 60 + ((8 * 365 + 2) * 24 * 60 * 60); if (secs < 0) secs = 0; days = secs / 86400; secs -= days * 86400; minute = secs / 60; secs -= minute * 60; ds->days = cpu_to_be32(days); ds->mins = cpu_to_be32(minute); ds->ticks = cpu_to_be32(secs * 50); } mode_t prot_to_mode(u32 prot) { int mode = 0; if (!(prot & FIBF_NOWRITE)) mode |= S_IWUSR; if (!(prot & FIBF_NOREAD)) mode |= S_IRUSR; if (!(prot & FIBF_NOEXECUTE)) mode |= S_IXUSR; if (prot & FIBF_GRP_WRITE) mode |= S_IWGRP; if (prot & FIBF_GRP_READ) mode |= S_IRGRP; if (prot & FIBF_GRP_EXECUTE) mode |= S_IXGRP; if (prot & FIBF_OTR_WRITE) mode |= S_IWOTH; if (prot & FIBF_OTR_READ) mode |= S_IROTH; if (prot & FIBF_OTR_EXECUTE) mode |= S_IXOTH; return mode; } void mode_to_prot(struct inode *inode) { u32 prot = AFFS_I(inode)->i_protect; mode_t mode = inode->i_mode; if (!(mode & S_IXUSR)) prot |= FIBF_NOEXECUTE; if (!(mode & S_IRUSR)) prot |= FIBF_NOREAD; if (!(mode & S_IWUSR)) prot |= FIBF_NOWRITE; if (mode & S_IXGRP) prot |= FIBF_GRP_EXECUTE; if (mode & S_IRGRP) prot |= FIBF_GRP_READ; if (mode & S_IWGRP) prot |= FIBF_GRP_WRITE; if (mode & S_IXOTH) prot |= FIBF_OTR_EXECUTE; if (mode & S_IROTH) prot |= FIBF_OTR_READ; if (mode & S_IWOTH) prot |= FIBF_OTR_WRITE; AFFS_I(inode)->i_protect = prot; } void affs_error(struct super_block *sb, const char *function, const char *fmt, ...) { va_list args; va_start(args,fmt); vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args); va_end(args); printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id, function,ErrorBuffer); if (!(sb->s_flags & MS_RDONLY)) printk(KERN_WARNING "AFFS: Remounting filesystem read-only\n"); sb->s_flags |= MS_RDONLY; } void affs_warning(struct super_block *sb, const char *function, const char *fmt, ...) { va_list args; va_start(args,fmt); vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args); va_end(args); printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id, function,ErrorBuffer); } /* Check if the name is valid for a affs object. */ int affs_check_name(const unsigned char *name, int len) { int i; if (len > 30) #ifdef AFFS_NO_TRUNCATE return -ENAMETOOLONG; #else len = 30; #endif for (i = 0; i < len; i++) { if (name[i] < ' ' || name[i] == ':' || (name[i] > 0x7e && name[i] < 0xa0)) return -EINVAL; } return 0; } /* This function copies name to bstr, with at most 30 * characters length. The bstr will be prepended by * a length byte. * NOTE: The name will must be already checked by * affs_check_name()! */ int affs_copy_name(unsigned char *bstr, struct dentry *dentry) { int len = min(dentry->d_name.len, 30u); *bstr++ = len; memcpy(bstr, dentry->d_name.name, len); return len; }
gpl-2.0
AndroidDeveloperAlliance/kernel_samsung_smdk4210
drivers/acpi/acpica/exregion.c
3221
15317
/****************************************************************************** * * Module Name: exregion - ACPI default op_region (address space) handlers * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exregion") /******************************************************************************* * * FUNCTION: acpi_ex_system_memory_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the System Memory address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_system_memory_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; void *logical_addr_ptr = NULL; struct acpi_mem_space_context *mem_info = region_context; u32 length; acpi_size map_length; acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif ACPI_FUNCTION_TRACE(ex_system_memory_space_handler); /* Validate and translate the bit width */ switch (bit_width) { case 8: length = 1; break; case 16: length = 2; break; case 32: length = 4; break; case 64: length = 8; break; default: ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u", bit_width)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED /* * Hardware does not support non-aligned data transfers, we must verify * the request. */ (void)acpi_ut_short_divide((u64) address, length, NULL, &remainder); if (remainder != 0) { return_ACPI_STATUS(AE_AML_ALIGNMENT); } #endif /* * Does the request fit into the cached memory mapping? * Is 1) Address below the current mapping? OR * 2) Address beyond the current mapping? */ if ((address < mem_info->mapped_physical_address) || (((u64) address + length) > ((u64) mem_info->mapped_physical_address + mem_info->mapped_length))) { /* * The request cannot be resolved by the current memory mapping; * Delete the existing mapping and create a new one. */ if (mem_info->mapped_length) { /* Valid mapping, delete it */ acpi_os_unmap_memory(mem_info->mapped_logical_address, mem_info->mapped_length); } /* * Attempt to map from the requested address to the end of the region. * However, we will never map more than one page, nor will we cross * a page boundary. */ map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); /* * If mapping the entire remaining portion of the region will cross * a page boundary, just map up to the page boundary, do not cross. * On some systems, crossing a page boundary while mapping regions * can cause warnings if the pages have different attributes * due to resource management */ page_boundary_map_length = ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address; if (!page_boundary_map_length) { page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; } if (map_length > page_boundary_map_length) { map_length = page_boundary_map_length; } /* Create a new mapping starting at the address given */ mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length); if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", ACPI_FORMAT_NATIVE_UINT(address), (u32) map_length)); mem_info->mapped_length = 0; return_ACPI_STATUS(AE_NO_MEMORY); } /* Save the physical address and mapping size */ mem_info->mapped_physical_address = address; mem_info->mapped_length = map_length; } /* * Generate a logical pointer corresponding to the address we want to * access */ logical_addr_ptr = mem_info->mapped_logical_address + ((u64) address - (u64) mem_info->mapped_physical_address); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", bit_width, function, ACPI_FORMAT_NATIVE_UINT(address))); /* * Perform the memory read or write * * Note: For machines that do not support non-aligned transfers, the target * address was checked for alignment above. We do not attempt to break the * transfer up into smaller (byte-size) chunks because the AML specifically * asked for a transfer width that the hardware may require. */ switch (function) { case ACPI_READ: *value = 0; switch (bit_width) { case 8: *value = (u64) ACPI_GET8(logical_addr_ptr); break; case 16: *value = (u64) ACPI_GET16(logical_addr_ptr); break; case 32: *value = (u64) ACPI_GET32(logical_addr_ptr); break; case 64: *value = (u64) ACPI_GET64(logical_addr_ptr); break; default: /* bit_width was already validated */ break; } break; case ACPI_WRITE: switch (bit_width) { case 8: ACPI_SET8(logical_addr_ptr) = (u8) * value; break; case 16: ACPI_SET16(logical_addr_ptr) = (u16) * value; break; case 32: ACPI_SET32(logical_addr_ptr) = (u32) * value; break; case 64: ACPI_SET64(logical_addr_ptr) = (u64) * value; break; default: /* bit_width was already validated */ break; } break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_io_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the System IO address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_system_io_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; u32 value32; ACPI_FUNCTION_TRACE(ex_system_io_space_handler); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n", bit_width, function, ACPI_FORMAT_NATIVE_UINT(address))); /* Decode the function parameter */ switch (function) { case ACPI_READ: status = acpi_hw_read_port((acpi_io_address) address, &value32, bit_width); *value = value32; break; case ACPI_WRITE: status = acpi_hw_write_port((acpi_io_address) address, (u32) * value, bit_width); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_pci_config_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the PCI Config address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_config_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; struct acpi_pci_id *pci_id; u16 pci_register; ACPI_FUNCTION_TRACE(ex_pci_config_space_handler); /* * The arguments to acpi_os(Read|Write)pci_configuration are: * * pci_segment is the PCI bus segment range 0-31 * pci_bus is the PCI bus number range 0-255 * pci_device is the PCI device number range 0-31 * pci_function is the PCI device function number * pci_register is the Config space register range 0-255 bytes * * Value - input value for write, output address for read * */ pci_id = (struct acpi_pci_id *)region_context; pci_register = (u16) (u32) address; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Pci-Config %u (%u) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n", function, bit_width, pci_id->segment, pci_id->bus, pci_id->device, pci_id->function, pci_register)); switch (function) { case ACPI_READ: status = acpi_os_read_pci_configuration(pci_id, pci_register, value, bit_width); break; case ACPI_WRITE: status = acpi_os_write_pci_configuration(pci_id, pci_register, *value, bit_width); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_cmos_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the CMOS address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_cmos_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_cmos_space_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_pci_bar_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the PCI bar_target address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_pci_bar_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_data_table_space_handler * * PARAMETERS: Function - Read or Write operation * Address - Where in the space to read or write * bit_width - Field width in bits (8, 16, or 32) * Value - Pointer to in or out value * handler_context - Pointer to Handler's context * region_context - Pointer to context specific to the * accessed region * * RETURN: Status * * DESCRIPTION: Handler for the Data Table address space (Op Region) * ******************************************************************************/ acpi_status acpi_ex_data_table_space_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { ACPI_FUNCTION_TRACE(ex_data_table_space_handler); /* * Perform the memory read or write. The bit_width was already * validated. */ switch (function) { case ACPI_READ: ACPI_MEMCPY(ACPI_CAST_PTR(char, value), ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width)); break; case ACPI_WRITE: ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address), ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width)); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } return_ACPI_STATUS(AE_OK); }
gpl-2.0
dastin1015/android_kernel_samsung_d710
drivers/acpi/acpica/tbutils.c
3221
23441
/****************************************************************************** * * Module Name: tbutils - table utilities * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbutils") /* Local prototypes */ static void acpi_tb_fix_string(char *string, acpi_size length); static void acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, struct acpi_table_header *header); static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); /******************************************************************************* * * FUNCTION: acpi_tb_check_xsdt * * PARAMETERS: address - Pointer to the XSDT * * RETURN: status * AE_OK - XSDT is okay * AE_NO_MEMORY - can't map XSDT * AE_INVALID_TABLE_LENGTH - invalid table length * AE_NULL_ENTRY - XSDT has NULL entry * * DESCRIPTION: validate XSDT ******************************************************************************/ static acpi_status acpi_tb_check_xsdt(acpi_physical_address address) { struct acpi_table_header *table; u32 length; u64 xsdt_entry_address; u8 *table_entry; u32 table_count; int i; table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!table) return AE_NO_MEMORY; length = table->length; acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); if (length < sizeof(struct acpi_table_header)) return AE_INVALID_TABLE_LENGTH; table = acpi_os_map_memory(address, length); if (!table) return AE_NO_MEMORY; /* Calculate the number of tables described in XSDT */ table_count = (u32) ((table->length - sizeof(struct acpi_table_header)) / sizeof(u64)); table_entry = ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); for (i = 0; i < table_count; i++) { ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry); if (!xsdt_entry_address) { /* XSDT has NULL entry */ break; } table_entry += sizeof(u64); } acpi_os_unmap_memory(table, length); if (i < table_count) return AE_NULL_ENTRY; else return AE_OK; } /******************************************************************************* * * FUNCTION: acpi_tb_initialize_facs * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Create a permanent mapping for the FADT and save it in a global * for accessing the Global Lock and Firmware Waking Vector * ******************************************************************************/ acpi_status acpi_tb_initialize_facs(void) { acpi_status status; status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, ACPI_CAST_INDIRECT_PTR(struct acpi_table_header, &acpi_gbl_FACS)); return status; } /******************************************************************************* * * FUNCTION: acpi_tb_tables_loaded * * PARAMETERS: None * * RETURN: TRUE if required ACPI tables are loaded * * DESCRIPTION: Determine if the minimum required ACPI tables are present * (FADT, FACS, DSDT) * ******************************************************************************/ u8 acpi_tb_tables_loaded(void) { if (acpi_gbl_root_table_list.current_table_count >= 3) { return (TRUE); } return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_tb_fix_string * * PARAMETERS: String - String to be repaired * Length - Maximum length * * RETURN: None * * DESCRIPTION: Replace every non-printable or non-ascii byte in the string * with a question mark '?'. * ******************************************************************************/ static void acpi_tb_fix_string(char *string, acpi_size length) { while (length && *string) { if (!ACPI_IS_PRINT(*string)) { *string = '?'; } string++; length--; } } /******************************************************************************* * * FUNCTION: acpi_tb_cleanup_table_header * * PARAMETERS: out_header - Where the cleaned header is returned * Header - Input ACPI table header * * RETURN: Returns the cleaned header in out_header * * DESCRIPTION: Copy the table header and ensure that all "string" fields in * the header consist of printable characters. * ******************************************************************************/ static void acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, struct acpi_table_header *header) { ACPI_MEMCPY(out_header, header, sizeof(struct acpi_table_header)); acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE); acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE); acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE); } /******************************************************************************* * * FUNCTION: acpi_tb_print_table_header * * PARAMETERS: Address - Table physical address * Header - Table header * * RETURN: None * * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP. * ******************************************************************************/ void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header) { struct acpi_table_header local_header; /* * The reason that the Address is cast to a void pointer is so that we * can use %p which will work properly on both 32-bit and 64-bit hosts. */ if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ ACPI_INFO((AE_INFO, "%4.4s %p %05X", header->signature, ACPI_CAST_PTR(void, address), header->length)); } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) { /* RSDP has no common fields */ ACPI_MEMCPY(local_header.oem_id, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)", ACPI_CAST_PTR (void, address), (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> revision > 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp, header)->length : 20, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->revision, local_header.oem_id)); } else { /* Standard ACPI table with full common header */ acpi_tb_cleanup_table_header(&local_header, header); ACPI_INFO((AE_INFO, "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)", local_header.signature, ACPI_CAST_PTR(void, address), local_header.length, local_header.revision, local_header.oem_id, local_header.oem_table_id, local_header.oem_revision, local_header.asl_compiler_id, local_header.asl_compiler_revision)); } } /******************************************************************************* * * FUNCTION: acpi_tb_validate_checksum * * PARAMETERS: Table - ACPI table to verify * Length - Length of entire table * * RETURN: Status * * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns * exception on bad checksum. * ******************************************************************************/ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) { u8 checksum; /* Compute the checksum on the table */ checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); /* Checksum ok? (should be zero) */ if (checksum) { ACPI_WARNING((AE_INFO, "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X", table->signature, table->checksum, (u8) (table->checksum - checksum))); #if (ACPI_CHECKSUM_ABORT) return (AE_BAD_CHECKSUM); #endif } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_tb_checksum * * PARAMETERS: Buffer - Pointer to memory region to be checked * Length - Length of this memory region * * RETURN: Checksum (u8) * * DESCRIPTION: Calculates circular checksum of memory region. * ******************************************************************************/ u8 acpi_tb_checksum(u8 *buffer, u32 length) { u8 sum = 0; u8 *end = buffer + length; while (buffer < end) { sum = (u8) (sum + *(buffer++)); } return sum; } /******************************************************************************* * * FUNCTION: acpi_tb_check_dsdt_header * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect * if the DSDT has been replaced from outside the OS and/or if * the DSDT header has been corrupted. * ******************************************************************************/ void acpi_tb_check_dsdt_header(void) { /* Compare original length and checksum to current values */ if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length || acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) { ACPI_ERROR((AE_INFO, "The DSDT has been corrupted or replaced - old, new headers below")); acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header); acpi_tb_print_table_header(0, acpi_gbl_DSDT); ACPI_ERROR((AE_INFO, "Please send DMI info to linux-acpi@vger.kernel.org\n" "If system does not work as expected, please boot with acpi=copy_dsdt")); /* Disable further error messages */ acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length; acpi_gbl_original_dsdt_header.checksum = acpi_gbl_DSDT->checksum; } } /******************************************************************************* * * FUNCTION: acpi_tb_copy_dsdt * * PARAMETERS: table_desc - Installed table to copy * * RETURN: None * * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory. * Some very bad BIOSs are known to either corrupt the DSDT or * install a new, bad DSDT. This copy works around the problem. * ******************************************************************************/ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index) { struct acpi_table_header *new_table; struct acpi_table_desc *table_desc; table_desc = &acpi_gbl_root_table_list.tables[table_index]; new_table = ACPI_ALLOCATE(table_desc->length); if (!new_table) { ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X", table_desc->length)); return (NULL); } ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length); acpi_tb_delete_table(table_desc); table_desc->pointer = new_table; table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED; ACPI_INFO((AE_INFO, "Forced DSDT copy: length 0x%05X copied locally, original unmapped", new_table->length)); return (new_table); } /******************************************************************************* * * FUNCTION: acpi_tb_install_table * * PARAMETERS: Address - Physical address of DSDT or FACS * Signature - Table signature, NULL if no need to * match * table_index - Index into root table array * * RETURN: None * * DESCRIPTION: Install an ACPI table into the global data structure. The * table override mechanism is implemented here to allow the host * OS to replace any table before it is installed in the root * table array. * ******************************************************************************/ void acpi_tb_install_table(acpi_physical_address address, char *signature, u32 table_index) { u8 flags; acpi_status status; struct acpi_table_header *table_to_install; struct acpi_table_header *mapped_table; struct acpi_table_header *override_table = NULL; if (!address) { ACPI_ERROR((AE_INFO, "Null physical address for ACPI table [%s]", signature)); return; } /* Map just the table header */ mapped_table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!mapped_table) { return; } /* If a particular signature is expected (DSDT/FACS), it must match */ if (signature && !ACPI_COMPARE_NAME(mapped_table->signature, signature)) { ACPI_ERROR((AE_INFO, "Invalid signature 0x%X for ACPI table, expected [%s]", *ACPI_CAST_PTR(u32, mapped_table->signature), signature)); goto unmap_and_exit; } /* * ACPI Table Override: * * Before we install the table, let the host OS override it with a new * one if desired. Any table within the RSDT/XSDT can be replaced, * including the DSDT which is pointed to by the FADT. */ status = acpi_os_table_override(mapped_table, &override_table); if (ACPI_SUCCESS(status) && override_table) { ACPI_INFO((AE_INFO, "%4.4s @ 0x%p Table override, replaced with:", mapped_table->signature, ACPI_CAST_PTR(void, address))); acpi_gbl_root_table_list.tables[table_index].pointer = override_table; address = ACPI_PTR_TO_PHYSADDR(override_table); table_to_install = override_table; flags = ACPI_TABLE_ORIGIN_OVERRIDE; } else { table_to_install = mapped_table; flags = ACPI_TABLE_ORIGIN_MAPPED; } /* Initialize the table entry */ acpi_gbl_root_table_list.tables[table_index].address = address; acpi_gbl_root_table_list.tables[table_index].length = table_to_install->length; acpi_gbl_root_table_list.tables[table_index].flags = flags; ACPI_MOVE_32_TO_32(& (acpi_gbl_root_table_list.tables[table_index]. signature), table_to_install->signature); acpi_tb_print_table_header(address, table_to_install); if (table_index == ACPI_TABLE_INDEX_DSDT) { /* Global integer width is based upon revision of the DSDT */ acpi_ut_set_integer_width(table_to_install->revision); } unmap_and_exit: acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); } /******************************************************************************* * * FUNCTION: acpi_tb_get_root_table_entry * * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry * table_entry_size - sizeof 32 or 64 (RSDT or XSDT) * * RETURN: Physical address extracted from the root table * * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on * both 32-bit and 64-bit platforms * * NOTE: acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on * 64-bit platforms. * ******************************************************************************/ static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) { u64 address64; /* * Get the table physical address (32-bit for RSDT, 64-bit for XSDT): * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT */ if (table_entry_size == sizeof(u32)) { /* * 32-bit platform, RSDT: Return 32-bit table entry * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return */ return ((acpi_physical_address) (*ACPI_CAST_PTR(u32, table_entry))); } else { /* * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, * return 64-bit */ ACPI_MOVE_64_TO_64(&address64, table_entry); #if ACPI_MACHINE_WIDTH == 32 if (address64 > ACPI_UINT32_MAX) { /* Will truncate 64-bit address to 32 bits, issue warning */ ACPI_WARNING((AE_INFO, "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X)," " truncating", ACPI_FORMAT_UINT64(address64))); } #endif return ((acpi_physical_address) (address64)); } } /******************************************************************************* * * FUNCTION: acpi_tb_parse_root_table * * PARAMETERS: Rsdp - Pointer to the RSDP * * RETURN: Status * * DESCRIPTION: This function is called to parse the Root System Description * Table (RSDT or XSDT) * * NOTE: Tables are mapped (not copied) for efficiency. The FACS must * be mapped and cannot be copied because it contains the actual * memory location of the ACPI Global Lock. * ******************************************************************************/ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) { struct acpi_table_rsdp *rsdp; u32 table_entry_size; u32 i; u32 table_count; struct acpi_table_header *table; acpi_physical_address address; acpi_physical_address uninitialized_var(rsdt_address); u32 length; u8 *table_entry; acpi_status status; ACPI_FUNCTION_TRACE(tb_parse_root_table); /* * Map the entire RSDP and extract the address of the RSDT or XSDT */ rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp)); if (!rsdp) { return_ACPI_STATUS(AE_NO_MEMORY); } acpi_tb_print_table_header(rsdp_address, ACPI_CAST_PTR(struct acpi_table_header, rsdp)); /* Differentiate between RSDT and XSDT root tables */ if (rsdp->revision > 1 && rsdp->xsdt_physical_address && !acpi_rsdt_forced) { /* * Root table is an XSDT (64-bit physical addresses). We must use the * XSDT if the revision is > 1 and the XSDT pointer is present, as per * the ACPI specification. */ address = (acpi_physical_address) rsdp->xsdt_physical_address; table_entry_size = sizeof(u64); rsdt_address = (acpi_physical_address) rsdp->rsdt_physical_address; } else { /* Root table is an RSDT (32-bit physical addresses) */ address = (acpi_physical_address) rsdp->rsdt_physical_address; table_entry_size = sizeof(u32); } /* * It is not possible to map more than one entry in some environments, * so unmap the RSDP here before mapping other tables */ acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); if (table_entry_size == sizeof(u64)) { if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) { /* XSDT has NULL entry, RSDT is used */ address = rsdt_address; table_entry_size = sizeof(u32); ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, " "using RSDT")); } } /* Map the RSDT/XSDT table header to get the full table length */ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!table) { return_ACPI_STATUS(AE_NO_MEMORY); } acpi_tb_print_table_header(address, table); /* Get the length of the full table, verify length and map entire table */ length = table->length; acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); if (length < sizeof(struct acpi_table_header)) { ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT", length)); return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); } table = acpi_os_map_memory(address, length); if (!table) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Validate the root table checksum */ status = acpi_tb_verify_checksum(table, length); if (ACPI_FAILURE(status)) { acpi_os_unmap_memory(table, length); return_ACPI_STATUS(status); } /* Calculate the number of tables described in the root table */ table_count = (u32)((table->length - sizeof(struct acpi_table_header)) / table_entry_size); /* * First two entries in the table array are reserved for the DSDT * and FACS, which are not actually present in the RSDT/XSDT - they * come from the FADT */ table_entry = ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); acpi_gbl_root_table_list.current_table_count = 2; /* * Initialize the root table array from the RSDT/XSDT */ for (i = 0; i < table_count; i++) { if (acpi_gbl_root_table_list.current_table_count >= acpi_gbl_root_table_list.max_table_count) { /* There is no more room in the root table array, attempt resize */ status = acpi_tb_resize_root_table_list(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "Truncating %u table entries!", (unsigned) (table_count - (acpi_gbl_root_table_list. current_table_count - 2)))); break; } } /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list. current_table_count].address = acpi_tb_get_root_table_entry(table_entry, table_entry_size); table_entry += table_entry_size; acpi_gbl_root_table_list.current_table_count++; } /* * It is not possible to map more than one entry in some environments, * so unmap the root table here before mapping other tables */ acpi_os_unmap_memory(table, length); /* * Complete the initialization of the root table array by examining * the header of each table */ for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) { acpi_tb_install_table(acpi_gbl_root_table_list.tables[i]. address, NULL, i); /* Special case for FADT - get the DSDT and FACS */ if (ACPI_COMPARE_NAME (&acpi_gbl_root_table_list.tables[i].signature, ACPI_SIG_FADT)) { acpi_tb_parse_fadt(i); } } return_ACPI_STATUS(AE_OK); }
gpl-2.0
aeroevan/android_kernel_asus_grouper
tools/perf/util/color.c
3989
6714
#include "cache.h" #include "color.h" int perf_use_color_default = -1; static int parse_color(const char *name, int len) { static const char * const color_names[] = { "normal", "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white" }; char *end; int i; for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) { const char *str = color_names[i]; if (!strncasecmp(name, str, len) && !str[len]) return i - 1; } i = strtol(name, &end, 10); if (end - name == len && i >= -1 && i <= 255) return i; return -2; } static int parse_attr(const char *name, int len) { static const int attr_values[] = { 1, 2, 4, 5, 7 }; static const char * const attr_names[] = { "bold", "dim", "ul", "blink", "reverse" }; unsigned int i; for (i = 0; i < ARRAY_SIZE(attr_names); i++) { const char *str = attr_names[i]; if (!strncasecmp(name, str, len) && !str[len]) return attr_values[i]; } return -1; } void color_parse(const char *value, const char *var, char *dst) { color_parse_mem(value, strlen(value), var, dst); } void color_parse_mem(const char *value, int value_len, const char *var, char *dst) { const char *ptr = value; int len = value_len; int attr = -1; int fg = -2; int bg = -2; if (!strncasecmp(value, "reset", len)) { strcpy(dst, PERF_COLOR_RESET); return; } /* [fg [bg]] [attr] */ while (len > 0) { const char *word = ptr; int val, wordlen = 0; while (len > 0 && !isspace(word[wordlen])) { wordlen++; len--; } ptr = word + wordlen; while (len > 0 && isspace(*ptr)) { ptr++; len--; } val = parse_color(word, wordlen); if (val >= -1) { if (fg == -2) { fg = val; continue; } if (bg == -2) { bg = val; continue; } goto bad; } val = parse_attr(word, wordlen); if (val < 0 || attr != -1) goto bad; attr = val; } if (attr >= 0 || fg >= 0 || bg >= 0) { int sep = 0; *dst++ = '\033'; *dst++ = '['; if (attr >= 0) { *dst++ = '0' + attr; sep++; } if (fg >= 0) { if (sep++) *dst++ = ';'; if (fg < 8) { *dst++ = '3'; *dst++ = '0' + fg; } else { dst += sprintf(dst, "38;5;%d", fg); } } if (bg >= 0) { if (sep++) *dst++ = ';'; if (bg < 8) { *dst++ = '4'; *dst++ = '0' + bg; } else { dst += sprintf(dst, "48;5;%d", bg); } } *dst++ = 'm'; } *dst = 0; return; bad: die("bad color value '%.*s' for variable '%s'", value_len, value, var); } int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) { if (value) { if (!strcasecmp(value, "never")) return 0; if (!strcasecmp(value, "always")) return 1; if (!strcasecmp(value, "auto")) goto auto_color; } /* Missing or explicit false to turn off colorization */ if (!perf_config_bool(var, value)) return 0; /* any normal truth value defaults to 'auto' */ auto_color: if (stdout_is_tty < 0) stdout_is_tty = isatty(1); if (stdout_is_tty || (pager_in_use() && pager_use_color)) { char *term = getenv("TERM"); if (term && strcmp(term, "dumb")) return 1; } return 0; } int perf_color_default_config(const char *var, const char *value, void *cb) { if (!strcmp(var, "color.ui")) { perf_use_color_default = perf_config_colorbool(var, value, -1); return 0; } return perf_default_config(var, value, cb); } static int __color_vsnprintf(char *bf, size_t size, const char *color, const char *fmt, va_list args, const char *trail) { int r = 0; /* * Auto-detect: */ if (perf_use_color_default < 0) { if (isatty(1) || pager_in_use()) perf_use_color_default = 1; else perf_use_color_default = 0; } if (perf_use_color_default && *color) r += snprintf(bf, size, "%s", color); r += vsnprintf(bf + r, size - r, fmt, args); if (perf_use_color_default && *color) r += snprintf(bf + r, size - r, "%s", PERF_COLOR_RESET); if (trail) r += snprintf(bf + r, size - r, "%s", trail); return r; } static int __color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args, const char *trail) { int r = 0; /* * Auto-detect: */ if (perf_use_color_default < 0) { if (isatty(1) || pager_in_use()) perf_use_color_default = 1; else perf_use_color_default = 0; } if (perf_use_color_default && *color) r += fprintf(fp, "%s", color); r += vfprintf(fp, fmt, args); if (perf_use_color_default && *color) r += fprintf(fp, "%s", PERF_COLOR_RESET); if (trail) r += fprintf(fp, "%s", trail); return r; } int color_vsnprintf(char *bf, size_t size, const char *color, const char *fmt, va_list args) { return __color_vsnprintf(bf, size, color, fmt, args, NULL); } int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args) { return __color_vfprintf(fp, color, fmt, args, NULL); } int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = color_vsnprintf(bf, size, color, fmt, args); va_end(args); return r; } int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = color_vfprintf(fp, color, fmt, args); va_end(args); return r; } int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = __color_vfprintf(fp, color, fmt, args, "\n"); va_end(args); return r; } /* * This function splits the buffer by newlines and colors the lines individually. * * Returns 0 on success. */ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf) { if (!*color) return fwrite(buf, count, 1, fp) != 1; while (count) { char *p = memchr(buf, '\n', count); if (p != buf && (fputs(color, fp) < 0 || fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 || fputs(PERF_COLOR_RESET, fp) < 0)) return -1; if (!p) return 0; if (fputc('\n', fp) < 0) return -1; count -= p + 1 - buf; buf = p + 1; } return 0; } const char *get_percent_color(double percent) { const char *color = PERF_COLOR_NORMAL; /* * We color high-overhead entries in red, mid-overhead * entries in green - and keep the low overhead places * normal: */ if (percent >= MIN_RED) color = PERF_COLOR_RED; else { if (percent > MIN_GREEN) color = PERF_COLOR_GREEN; } return color; } int percent_color_fprintf(FILE *fp, const char *fmt, double percent) { int r; const char *color; color = get_percent_color(percent); r = color_fprintf(fp, color, fmt, percent); return r; } int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent) { const char *color = get_percent_color(percent); return color_snprintf(bf, size, color, fmt, percent); }
gpl-2.0
flyelfrickchu/imx6dl_kernel_imx
arch/arm/mm/copypage-v4wt.c
4245
2455
/* * linux/arch/arm/mm/copypage-v4wt.S * * Copyright (C) 1995-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is for CPUs with a writethrough cache and 'flush ID cache' is * the only supported cache operation. */ #include <linux/init.h> #include <linux/highmem.h> /* * ARMv4 optimised copy_user_highpage * * Since we have writethrough caches, we don't have to worry about * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ static void __naked v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ mov r2, %2 @ 1\n\ ldmia r1!, {r3, r4, ip, lr} @ 4\n\ 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ stmia r0!, {r3, r4, ip, lr} @ 4\n\ ldmia r1!, {r3, r4, ip, lr} @ 4\n\ stmia r0!, {r3, r4, ip, lr} @ 4\n\ ldmia r1!, {r3, r4, ip, lr} @ 4\n\ subs r2, r2, #1 @ 1\n\ stmia r0!, {r3, r4, ip, lr} @ 4\n\ ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ ldmfd sp!, {r4, pc} @ 3" : : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); } void v4wt_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); v4wt_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); } /* * ARMv4 optimised clear_user_page * * Same story as above. */ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page, KM_USER0); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4wt_user_fns __initdata = { .cpu_clear_user_highpage = v4wt_clear_user_highpage, .cpu_copy_user_highpage = v4wt_copy_user_highpage, };
gpl-2.0
leehz/android_kernel_samsung_ms013g
net/sunrpc/xprtrdma/svc_rdma_marshal.c
5269
11239
/* * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/debug.h> #include <asm/unaligned.h> #include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/svc_rdma.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT /* * Decodes a read chunk list. The expected format is as follows: * descrim : xdr_one * position : u32 offset into XDR stream * handle : u32 RKEY * . . . * end-of-list: xdr_zero */ static u32 *decode_read_list(u32 *va, u32 *vaend) { struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; while (ch->rc_discrim != xdr_zero) { if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > (unsigned long)vaend) { dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); return NULL; } ch++; } return (u32 *)&ch->rc_position; } /* * Determine number of chunks and total bytes in chunk list. The chunk * list has already been verified to fit within the RPCRDMA header. */ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch, int *ch_count, int *byte_count) { /* compute the number of bytes represented by read chunks */ *byte_count = 0; *ch_count = 0; for (; ch->rc_discrim != 0; ch++) { *byte_count = *byte_count + ntohl(ch->rc_target.rs_length); *ch_count = *ch_count + 1; } } /* * Decodes a write chunk list. The expected format is as follows: * descrim : xdr_one * nchunks : <count> * handle : u32 RKEY ---+ * length : u32 <len of segment> | * offset : remove va + <count> * . . . | * ---+ */ static u32 *decode_write_list(u32 *va, u32 *vaend) { int nchunks; struct rpcrdma_write_array *ary = (struct rpcrdma_write_array *)va; /* Check for not write-array */ if (ary->wc_discrim == xdr_zero) return (u32 *)&ary->wc_nchunks; if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) > (unsigned long)vaend) { dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); return NULL; } nchunks = ntohl(ary->wc_nchunks); if (((unsigned long)&ary->wc_array[0] + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > (unsigned long)vaend) { dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", ary, nchunks, vaend); return NULL; } /* * rs_length is the 2nd 4B field in wc_target and taking its * address skips the list terminator */ return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length; } static u32 *decode_reply_array(u32 *va, u32 *vaend) { int nchunks; struct rpcrdma_write_array *ary = (struct rpcrdma_write_array *)va; /* Check for no reply-array */ if (ary->wc_discrim == xdr_zero) return (u32 *)&ary->wc_nchunks; if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) > (unsigned long)vaend) { dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); return NULL; } nchunks = ntohl(ary->wc_nchunks); if (((unsigned long)&ary->wc_array[0] + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > (unsigned long)vaend) { dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", ary, nchunks, vaend); return NULL; } return (u32 *)&ary->wc_array[nchunks]; } int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, struct svc_rqst *rqstp) { struct rpcrdma_msg *rmsgp = NULL; u32 *va; u32 *vaend; u32 hdr_len; rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; /* Verify that there's enough bytes for header + something */ if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_MIN) { dprintk("svcrdma: header too short = %d\n", rqstp->rq_arg.len); return -EINVAL; } /* Decode the header */ rmsgp->rm_xid = ntohl(rmsgp->rm_xid); rmsgp->rm_vers = ntohl(rmsgp->rm_vers); rmsgp->rm_credit = ntohl(rmsgp->rm_credit); rmsgp->rm_type = ntohl(rmsgp->rm_type); if (rmsgp->rm_vers != RPCRDMA_VERSION) return -ENOSYS; /* Pull in the extra for the padded case and bump our pointer */ if (rmsgp->rm_type == RDMA_MSGP) { int hdrlen; rmsgp->rm_body.rm_padded.rm_align = ntohl(rmsgp->rm_body.rm_padded.rm_align); rmsgp->rm_body.rm_padded.rm_thresh = ntohl(rmsgp->rm_body.rm_padded.rm_thresh); va = &rmsgp->rm_body.rm_padded.rm_pempty[4]; rqstp->rq_arg.head[0].iov_base = va; hdrlen = (u32)((unsigned long)va - (unsigned long)rmsgp); rqstp->rq_arg.head[0].iov_len -= hdrlen; if (hdrlen > rqstp->rq_arg.len) return -EINVAL; return hdrlen; } /* The chunk list may contain either a read chunk list or a write * chunk list and a reply chunk list. */ va = &rmsgp->rm_body.rm_chunks[0]; vaend = (u32 *)((unsigned long)rmsgp + rqstp->rq_arg.len); va = decode_read_list(va, vaend); if (!va) return -EINVAL; va = decode_write_list(va, vaend); if (!va) return -EINVAL; va = decode_reply_array(va, vaend); if (!va) return -EINVAL; rqstp->rq_arg.head[0].iov_base = va; hdr_len = (unsigned long)va - (unsigned long)rmsgp; rqstp->rq_arg.head[0].iov_len -= hdr_len; *rdma_req = rmsgp; return hdr_len; } int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *rqstp) { struct rpcrdma_msg *rmsgp = NULL; struct rpcrdma_read_chunk *ch; struct rpcrdma_write_array *ary; u32 *va; u32 hdrlen; dprintk("svcrdma: processing deferred RDMA header on rqstp=%p\n", rqstp); rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; /* Pull in the extra for the padded case and bump our pointer */ if (rmsgp->rm_type == RDMA_MSGP) { va = &rmsgp->rm_body.rm_padded.rm_pempty[4]; rqstp->rq_arg.head[0].iov_base = va; hdrlen = (u32)((unsigned long)va - (unsigned long)rmsgp); rqstp->rq_arg.head[0].iov_len -= hdrlen; return hdrlen; } /* * Skip all chunks to find RPC msg. These were previously processed */ va = &rmsgp->rm_body.rm_chunks[0]; /* Skip read-list */ for (ch = (struct rpcrdma_read_chunk *)va; ch->rc_discrim != xdr_zero; ch++); va = (u32 *)&ch->rc_position; /* Skip write-list */ ary = (struct rpcrdma_write_array *)va; if (ary->wc_discrim == xdr_zero) va = (u32 *)&ary->wc_nchunks; else /* * rs_length is the 2nd 4B field in wc_target and taking its * address skips the list terminator */ va = (u32 *)&ary->wc_array[ary->wc_nchunks].wc_target.rs_length; /* Skip reply-array */ ary = (struct rpcrdma_write_array *)va; if (ary->wc_discrim == xdr_zero) va = (u32 *)&ary->wc_nchunks; else va = (u32 *)&ary->wc_array[ary->wc_nchunks]; rqstp->rq_arg.head[0].iov_base = va; hdrlen = (unsigned long)va - (unsigned long)rmsgp; rqstp->rq_arg.head[0].iov_len -= hdrlen; return hdrlen; } int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, enum rpcrdma_errcode err, u32 *va) { u32 *startp = va; *va++ = htonl(rmsgp->rm_xid); *va++ = htonl(rmsgp->rm_vers); *va++ = htonl(xprt->sc_max_requests); *va++ = htonl(RDMA_ERROR); *va++ = htonl(err); if (err == ERR_VERS) { *va++ = htonl(RPCRDMA_VERSION); *va++ = htonl(RPCRDMA_VERSION); } return (int)((unsigned long)va - (unsigned long)startp); } int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *rmsgp) { struct rpcrdma_write_array *wr_ary; /* There is no read-list in a reply */ /* skip write list */ wr_ary = (struct rpcrdma_write_array *) &rmsgp->rm_body.rm_chunks[1]; if (wr_ary->wc_discrim) wr_ary = (struct rpcrdma_write_array *) &wr_ary->wc_array[ntohl(wr_ary->wc_nchunks)]. wc_target.rs_length; else wr_ary = (struct rpcrdma_write_array *) &wr_ary->wc_nchunks; /* skip reply array */ if (wr_ary->wc_discrim) wr_ary = (struct rpcrdma_write_array *) &wr_ary->wc_array[ntohl(wr_ary->wc_nchunks)]; else wr_ary = (struct rpcrdma_write_array *) &wr_ary->wc_nchunks; return (unsigned long) wr_ary - (unsigned long) rmsgp; } void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks) { struct rpcrdma_write_array *ary; /* no read-list */ rmsgp->rm_body.rm_chunks[0] = xdr_zero; /* write-array discrim */ ary = (struct rpcrdma_write_array *) &rmsgp->rm_body.rm_chunks[1]; ary->wc_discrim = xdr_one; ary->wc_nchunks = htonl(chunks); /* write-list terminator */ ary->wc_array[chunks].wc_target.rs_handle = xdr_zero; /* reply-array discriminator */ ary->wc_array[chunks].wc_target.rs_length = xdr_zero; } void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary, int chunks) { ary->wc_discrim = xdr_one; ary->wc_nchunks = htonl(chunks); } void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, int chunk_no, __be32 rs_handle, __be64 rs_offset, u32 write_len) { struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; seg->rs_handle = rs_handle; seg->rs_offset = rs_offset; seg->rs_length = htonl(write_len); } void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rdma_argp, struct rpcrdma_msg *rdma_resp, enum rpcrdma_proc rdma_type) { rdma_resp->rm_xid = htonl(rdma_argp->rm_xid); rdma_resp->rm_vers = htonl(rdma_argp->rm_vers); rdma_resp->rm_credit = htonl(xprt->sc_max_requests); rdma_resp->rm_type = htonl(rdma_type); /* Encode <nul> chunks lists */ rdma_resp->rm_body.rm_chunks[0] = xdr_zero; rdma_resp->rm_body.rm_chunks[1] = xdr_zero; rdma_resp->rm_body.rm_chunks[2] = xdr_zero; }
gpl-2.0
Dearms/android_kernel_xiaomi_msm8960
arch/mips/jz4740/timer.c
7573
1470
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform timer support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include "timer.h" #include <asm/mach-jz4740/base.h> void __iomem *jz4740_timer_base; void jz4740_timer_enable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); } EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); void jz4740_timer_disable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); } EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); void __init jz4740_timer_init(void) { jz4740_timer_base = ioremap(JZ4740_TCU_BASE_ADDR, 0x100); if (!jz4740_timer_base) panic("Failed to ioremap timer registers"); /* Disable all timer clocks except for those used as system timers */ writel(0x000100fc, jz4740_timer_base + JZ_REG_TIMER_STOP_SET); /* Timer irqs are unmasked by default, mask them */ writel(0x00ff00ff, jz4740_timer_base + JZ_REG_TIMER_MASK_SET); }
gpl-2.0
Ca1ne/Enoch-Sense-Kernel-old
arch/mn10300/unit-asb2305/unit-init.c
7573
1535
/* ASB2305 Initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/intctl-regs.h> #include <asm/serial-regs.h> #include <unit/serial.h> /* * initialise some of the unit hardware before gdbstub is set up */ asmlinkage void __init unit_init(void) { #ifndef CONFIG_GDBSTUB_ON_TTYSx /* set the 16550 interrupt line to level 3 if not being used for GDB */ #ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL)); #endif #endif /* CONFIG_GDBSTUB_ON_TTYSx */ } /* * initialise the rest of the unit hardware after gdbstub is ready */ void __init unit_setup(void) { #ifdef CONFIG_PCI unit_pci_init(); #endif } /* * initialise the external interrupts used by a unit of this type */ void __init unit_init_IRQ(void) { unsigned int extnum; for (extnum = 0; extnum < NR_XIRQS; extnum++) { switch (GET_XIRQ_TRIGGER(extnum)) { case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum)); break; default: break; } } }
gpl-2.0
Al3XKOoL/tmp
drivers/pcmcia/sa1111_lubbock.c
9365
5190
/* * linux/drivers/pcmcia/pxa2xx_lubbock.c * * Author: George Davis * Created: Jan 10, 2002 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Originally based upon linux/drivers/pcmcia/sa1100_neponset.c * * Lubbock PCMCIA specific routines. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <mach/hardware.h> #include <asm/hardware/sa1111.h> #include <asm/mach-types.h> #include <mach/lubbock.h> #include "sa1111_generic.h" static int lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { struct sa1111_pcmcia_socket *s = to_skt(skt); unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set; int ret = 0; pa_dwr_mask = pa_dwr_set = misc_mask = misc_set = 0; /* Lubbock uses the Maxim MAX1602, with the following connections: * * Socket 0 (PCMCIA): * MAX1602 Lubbock Register * Pin Signal * ----- ------- ---------------------- * A0VPP S0_PWR0 SA-1111 GPIO A<0> * A1VPP S0_PWR1 SA-1111 GPIO A<1> * A0VCC S0_PWR2 SA-1111 GPIO A<2> * A1VCC S0_PWR3 SA-1111 GPIO A<3> * VX VCC * VY +3.3V * 12IN +12V * CODE +3.3V Cirrus Code, CODE = High (VY) * * Socket 1 (CF): * MAX1602 Lubbock Register * Pin Signal * ----- ------- ---------------------- * A0VPP GND VPP is not connected * A1VPP GND VPP is not connected * A0VCC S1_PWR0 MISC_WR<14> * A1VCC S1_PWR1 MISC_WR<15> * VX VCC * VY +3.3V * 12IN GND VPP is not connected * CODE +3.3V Cirrus Code, CODE = High (VY) * */ again: switch (skt->nr) { case 0: pa_dwr_mask = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; switch (state->Vcc) { case 0: /* Hi-Z */ break; case 33: /* VY */ pa_dwr_set |= GPIO_A3; break; case 50: /* VX */ pa_dwr_set |= GPIO_A2; break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); ret = -1; } switch (state->Vpp) { case 0: /* Hi-Z */ break; case 120: /* 12IN */ pa_dwr_set |= GPIO_A1; break; default: /* VCC */ if (state->Vpp == state->Vcc) pa_dwr_set |= GPIO_A0; else { printk(KERN_ERR "%s(): unrecognized Vpp %u\n", __func__, state->Vpp); ret = -1; break; } } break; case 1: misc_mask = (1 << 15) | (1 << 14); switch (state->Vcc) { case 0: /* Hi-Z */ break; case 33: /* VY */ misc_set |= 1 << 15; break; case 50: /* VX */ misc_set |= 1 << 14; break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); ret = -1; break; } if (state->Vpp != state->Vcc && state->Vpp != 0) { printk(KERN_ERR "%s(): CF slot cannot support Vpp %u\n", __func__, state->Vpp); ret = -1; break; } break; default: ret = -1; } if (ret == 0) ret = sa1111_pcmcia_configure_socket(skt, state); if (ret == 0) { lubbock_set_misc_wr(misc_mask, misc_set); sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); } #if 1 if (ret == 0 && state->Vcc == 33) { struct pcmcia_state new_state; /* * HACK ALERT: * We can't sense the voltage properly on Lubbock before * actually applying some power to the socket (catch 22). * Resense the socket Voltage Sense pins after applying * socket power. * * Note: It takes about 2.5ms for the MAX1602 VCC output * to rise. */ mdelay(3); sa1111_pcmcia_socket_state(skt, &new_state); if (!new_state.vs_3v && !new_state.vs_Xv) { /* * Switch to 5V, Configure socket with 5V voltage */ lubbock_set_misc_wr(misc_mask, 0); sa1111_set_io(s->dev, pa_dwr_mask, 0); /* * It takes about 100ms to turn off Vcc. */ mdelay(100); /* * We need to hack around the const qualifier as * well to keep this ugly workaround localized and * not force it to the rest of the code. Barf bags * available in the seat pocket in front of you! */ ((socket_state_t *)state)->Vcc = 50; ((socket_state_t *)state)->Vpp = 50; goto again; } } #endif return ret; } static struct pcmcia_low_level lubbock_pcmcia_ops = { .owner = THIS_MODULE, .configure_socket = lubbock_pcmcia_configure_socket, .first = 0, .nr = 2, }; #include "pxa2xx_base.h" int pcmcia_lubbock_init(struct sa1111_dev *sadev) { int ret = -ENODEV; if (machine_is_lubbock()) { /* * Set GPIO_A<3:0> to be outputs for the MAX1600, * and switch to standby mode. */ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); /* Set CF Socket 1 power to standby mode. */ lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); pxa2xx_configure_sockets(&sadev->dev); ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, pxa2xx_drv_pcmcia_add_one); } return ret; } MODULE_LICENSE("GPL");
gpl-2.0
Schischu/xoom-ElementalX
arch/x86/lib/mmx_32.c
13461
8212
/* * MMX 3DNow! library helper functions * * To do: * We can use MMX just for prefetch in IRQ's. This may be a win. * (reported so on K6-III) * We should use a better code neutral filler for the short jump * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? * We also want to clobber the filler register so we don't get any * register forwarding stalls on the filler. * * Add *user handling. Checksums are not a win with MMX on any CPU * tested so far for any MMX solution figured. * * 22/09/2000 - Arjan van de Ven * Improved for non-egineering-sample Athlons * */ #include <linux/hardirq.h> #include <linux/string.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/types.h> #include <asm/i387.h> #include <asm/asm.h> void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; int i; if (unlikely(in_interrupt())) return __memcpy(to, from, len); p = to; i = len >> 6; /* len/64 */ kernel_fpu_begin(); __asm__ __volatile__ ( "1: prefetch (%0)\n" /* This set is 28 bytes */ " prefetch 64(%0)\n" " prefetch 128(%0)\n" " prefetch 192(%0)\n" " prefetch 256(%0)\n" "2: \n" ".section .fixup, \"ax\"\n" "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) : : "r" (from)); for ( ; i > 5; i--) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" " movq 8(%0), %%mm1\n" " movq 16(%0), %%mm2\n" " movq 24(%0), %%mm3\n" " movq %%mm0, (%1)\n" " movq %%mm1, 8(%1)\n" " movq %%mm2, 16(%1)\n" " movq %%mm3, 24(%1)\n" " movq 32(%0), %%mm0\n" " movq 40(%0), %%mm1\n" " movq 48(%0), %%mm2\n" " movq 56(%0), %%mm3\n" " movq %%mm0, 32(%1)\n" " movq %%mm1, 40(%1)\n" " movq %%mm2, 48(%1)\n" " movq %%mm3, 56(%1)\n" ".section .fixup, \"ax\"\n" "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); from += 64; to += 64; } for ( ; i > 0; i--) { __asm__ __volatile__ ( " movq (%0), %%mm0\n" " movq 8(%0), %%mm1\n" " movq 16(%0), %%mm2\n" " movq 24(%0), %%mm3\n" " movq %%mm0, (%1)\n" " movq %%mm1, 8(%1)\n" " movq %%mm2, 16(%1)\n" " movq %%mm3, 24(%1)\n" " movq 32(%0), %%mm0\n" " movq 40(%0), %%mm1\n" " movq 48(%0), %%mm2\n" " movq 56(%0), %%mm3\n" " movq %%mm0, 32(%1)\n" " movq %%mm1, 40(%1)\n" " movq %%mm2, 48(%1)\n" " movq %%mm3, 56(%1)\n" : : "r" (from), "r" (to) : "memory"); from += 64; to += 64; } /* * Now do the tail of the block: */ __memcpy(to, from, len & 63); kernel_fpu_end(); return p; } EXPORT_SYMBOL(_mmx_memcpy); #ifdef CONFIG_MK7 /* * The K7 has streaming cache bypass load/store. The Cyrix III, K6 and * other MMX using processors do not. */ static void fast_clear_page(void *page) { int i; kernel_fpu_begin(); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( " movntq %%mm0, (%0)\n" " movntq %%mm0, 8(%0)\n" " movntq %%mm0, 16(%0)\n" " movntq %%mm0, 24(%0)\n" " movntq %%mm0, 32(%0)\n" " movntq %%mm0, 40(%0)\n" " movntq %%mm0, 48(%0)\n" " movntq %%mm0, 56(%0)\n" : : "r" (page) : "memory"); page += 64; } /* * Since movntq is weakly-ordered, a "sfence" is needed to become * ordered again: */ __asm__ __volatile__("sfence\n"::); kernel_fpu_end(); } static void fast_copy_page(void *to, void *from) { int i; kernel_fpu_begin(); /* * maybe the prefetch stuff can go before the expensive fnsave... * but that is for later. -AV */ __asm__ __volatile__( "1: prefetch (%0)\n" " prefetch 64(%0)\n" " prefetch 128(%0)\n" " prefetch 192(%0)\n" " prefetch 256(%0)\n" "2: \n" ".section .fixup, \"ax\"\n" "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) : : "r" (from)); for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" " movntq %%mm0, (%1)\n" " movq 8(%0), %%mm1\n" " movntq %%mm1, 8(%1)\n" " movq 16(%0), %%mm2\n" " movntq %%mm2, 16(%1)\n" " movq 24(%0), %%mm3\n" " movntq %%mm3, 24(%1)\n" " movq 32(%0), %%mm4\n" " movntq %%mm4, 32(%1)\n" " movq 40(%0), %%mm5\n" " movntq %%mm5, 40(%1)\n" " movq 48(%0), %%mm6\n" " movntq %%mm6, 48(%1)\n" " movq 56(%0), %%mm7\n" " movntq %%mm7, 56(%1)\n" ".section .fixup, \"ax\"\n" "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); from += 64; to += 64; } for (i = (4096-320)/64; i < 4096/64; i++) { __asm__ __volatile__ ( "2: movq (%0), %%mm0\n" " movntq %%mm0, (%1)\n" " movq 8(%0), %%mm1\n" " movntq %%mm1, 8(%1)\n" " movq 16(%0), %%mm2\n" " movntq %%mm2, 16(%1)\n" " movq 24(%0), %%mm3\n" " movntq %%mm3, 24(%1)\n" " movq 32(%0), %%mm4\n" " movntq %%mm4, 32(%1)\n" " movq 40(%0), %%mm5\n" " movntq %%mm5, 40(%1)\n" " movq 48(%0), %%mm6\n" " movntq %%mm6, 48(%1)\n" " movq 56(%0), %%mm7\n" " movntq %%mm7, 56(%1)\n" : : "r" (from), "r" (to) : "memory"); from += 64; to += 64; } /* * Since movntq is weakly-ordered, a "sfence" is needed to become * ordered again: */ __asm__ __volatile__("sfence \n"::); kernel_fpu_end(); } #else /* CONFIG_MK7 */ /* * Generic MMX implementation without K7 specific streaming */ static void fast_clear_page(void *page) { int i; kernel_fpu_begin(); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); for (i = 0; i < 4096/128; i++) { __asm__ __volatile__ ( " movq %%mm0, (%0)\n" " movq %%mm0, 8(%0)\n" " movq %%mm0, 16(%0)\n" " movq %%mm0, 24(%0)\n" " movq %%mm0, 32(%0)\n" " movq %%mm0, 40(%0)\n" " movq %%mm0, 48(%0)\n" " movq %%mm0, 56(%0)\n" " movq %%mm0, 64(%0)\n" " movq %%mm0, 72(%0)\n" " movq %%mm0, 80(%0)\n" " movq %%mm0, 88(%0)\n" " movq %%mm0, 96(%0)\n" " movq %%mm0, 104(%0)\n" " movq %%mm0, 112(%0)\n" " movq %%mm0, 120(%0)\n" : : "r" (page) : "memory"); page += 128; } kernel_fpu_end(); } static void fast_copy_page(void *to, void *from) { int i; kernel_fpu_begin(); __asm__ __volatile__ ( "1: prefetch (%0)\n" " prefetch 64(%0)\n" " prefetch 128(%0)\n" " prefetch 192(%0)\n" " prefetch 256(%0)\n" "2: \n" ".section .fixup, \"ax\"\n" "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) : : "r" (from)); for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" " movq 8(%0), %%mm1\n" " movq 16(%0), %%mm2\n" " movq 24(%0), %%mm3\n" " movq %%mm0, (%1)\n" " movq %%mm1, 8(%1)\n" " movq %%mm2, 16(%1)\n" " movq %%mm3, 24(%1)\n" " movq 32(%0), %%mm0\n" " movq 40(%0), %%mm1\n" " movq 48(%0), %%mm2\n" " movq 56(%0), %%mm3\n" " movq %%mm0, 32(%1)\n" " movq %%mm1, 40(%1)\n" " movq %%mm2, 48(%1)\n" " movq %%mm3, 56(%1)\n" ".section .fixup, \"ax\"\n" "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); from += 64; to += 64; } kernel_fpu_end(); } #endif /* !CONFIG_MK7 */ /* * Favour MMX for page clear and copy: */ static void slow_zero_page(void *page) { int d0, d1; __asm__ __volatile__( "cld\n\t" "rep ; stosl" : "=&c" (d0), "=&D" (d1) :"a" (0), "1" (page), "0" (1024) :"memory"); } void mmx_clear_page(void *page) { if (unlikely(in_interrupt())) slow_zero_page(page); else fast_clear_page(page); } EXPORT_SYMBOL(mmx_clear_page); static void slow_copy_page(void *to, void *from) { int d0, d1, d2; __asm__ __volatile__( "cld\n\t" "rep ; movsl" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (1024), "1" ((long) to), "2" ((long) from) : "memory"); } void mmx_copy_page(void *to, void *from) { if (unlikely(in_interrupt())) slow_copy_page(to, from); else fast_copy_page(to, from); } EXPORT_SYMBOL(mmx_copy_page);
gpl-2.0
bilalliberty/android_kernel_htc_liberty-villec2
drivers/scsi/fnic/fnic_attrs.c
14997
1942
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include "fnic.h" static ssize_t fnic_show_state(struct device *dev, struct device_attribute *attr, char *buf) { struct fc_lport *lp = shost_priv(class_to_shost(dev)); struct fnic *fnic = lport_priv(lp); return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]); } static ssize_t fnic_show_drv_version(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); } static ssize_t fnic_show_link_state(struct device *dev, struct device_attribute *attr, char *buf) { struct fc_lport *lp = shost_priv(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up) ? "Link Up" : "Link Down"); } static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL); static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL); struct device_attribute *fnic_attrs[] = { &dev_attr_fnic_state, &dev_attr_drv_version, &dev_attr_link_state, NULL, };
gpl-2.0
wooshy1/android-tegra-nv-2.6.39
drivers/gpu/drm/nouveau/nouveau_perf.c
150
5421
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_pm.h" static void legacy_perf_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->vbios; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; char *perf, *entry, *bmp = &bios->data[bios->offset]; int headerlen, use_straps; if (bmp[5] < 0x5 || bmp[6] < 0x14) { NV_DEBUG(dev, "BMP version too old for perf\n"); return; } perf = ROMPTR(bios, bmp[0x73]); if (!perf) { NV_DEBUG(dev, "No memclock table pointer found.\n"); return; } switch (perf[0]) { case 0x12: case 0x14: case 0x18: use_straps = 0; headerlen = 1; break; case 0x01: use_straps = perf[1] & 1; headerlen = (use_straps ? 8 : 2); break; default: NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]); return; } entry = perf + headerlen; if (use_straps) entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; sprintf(pm->perflvl[0].name, "performance_level_0"); pm->perflvl[0].memory = ROM16(entry[0]) * 20; pm->nr_perflvl = 1; } void nouveau_perf_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; u8 version, headerlen, recordlen, entries; u8 *perf, *entry; int vid, i; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) return; if (P.version != 1 && P.version != 2) { NV_WARN(dev, "unknown perf for BIT P %d\n", P.version); return; } perf = ROMPTR(bios, P.data[0]); version = perf[0]; headerlen = perf[1]; if (version < 0x40) { recordlen = perf[3] + (perf[4] * perf[5]); entries = perf[2]; } else { recordlen = perf[2] + (perf[3] * perf[4]); entries = perf[5]; } } else { if (bios->data[bios->offset + 6] < 0x25) { legacy_perf_init(dev); return; } perf = ROMPTR(bios, bios->data[bios->offset + 0x94]); if (!perf) { NV_DEBUG(dev, "perf table pointer invalid\n"); return; } version = perf[1]; headerlen = perf[0]; recordlen = perf[3]; entries = perf[2]; } entry = perf + headerlen; for (i = 0; i < entries; i++) { struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; if (entry[0] == 0xff) { entry += recordlen; continue; } switch (version) { case 0x12: case 0x13: case 0x15: perflvl->fanspeed = entry[55]; perflvl->voltage = (recordlen > 56) ? entry[56] : 0; perflvl->core = ROM32(entry[1]) * 10; perflvl->memory = ROM32(entry[5]) * 20; break; case 0x21: case 0x23: case 0x24: perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; perflvl->core = ROM16(entry[6]) * 1000; if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) perflvl->memory = ROM16(entry[11]) * 1000; else perflvl->memory = ROM16(entry[11]) * 2000; break; case 0x25: perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; perflvl->core = ROM16(entry[6]) * 1000; perflvl->shader = ROM16(entry[10]) * 1000; perflvl->memory = ROM16(entry[12]) * 1000; break; case 0x30: perflvl->memscript = ROM16(entry[2]); case 0x35: perflvl->fanspeed = entry[6]; perflvl->voltage = entry[7]; perflvl->core = ROM16(entry[8]) * 1000; perflvl->shader = ROM16(entry[10]) * 1000; perflvl->memory = ROM16(entry[12]) * 1000; /*XXX: confirm on 0x35 */ perflvl->unk05 = ROM16(entry[16]) * 1000; break; case 0x40: #define subent(n) entry[perf[2] + ((n) * perf[3])] perflvl->fanspeed = 0; /*XXX*/ perflvl->voltage = entry[2]; perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; break; } /* make sure vid is valid */ if (pm->voltage.supported && perflvl->voltage) { vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); if (vid < 0) { NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); entry += recordlen; continue; } } snprintf(perflvl->name, sizeof(perflvl->name), "performance_level_%d", i); perflvl->id = i; pm->nr_perflvl++; entry += recordlen; } } void nouveau_perf_fini(struct drm_device *dev) { }
gpl-2.0
motley-git/Kernel-GT-P73xx-v2
sound/soc/soc-core.c
150
77345
/* * soc-core.c -- ALSA SoC Audio Layer * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * with code, comments and ideas from :- * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * TODO: * o Add hw rules to enforce rates, etc. * o More testing with other codecs/machines. * o Add more codecs and platforms to ensure good API coverage. * o Support TDM on PCM and I2S */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/ac97_codec.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> static DEFINE_MUTEX(pcm_mutex); static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq); #ifdef CONFIG_DEBUG_FS static struct dentry *debugfs_root; #endif static DEFINE_MUTEX(client_mutex); static LIST_HEAD(card_list); static LIST_HEAD(dai_list); static LIST_HEAD(platform_list); static LIST_HEAD(codec_list); static int snd_soc_register_card(struct snd_soc_card *card); static int snd_soc_unregister_card(struct snd_soc_card *card); /* * This is a timeout to do a DAPM powerdown after a stream is closed(). * It can be used to eliminate pops between different playback streams, e.g. * between two audio tracks. */ static int pmdown_time = 5000; module_param(pmdown_time, int, 0); MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); /* * This function forces any delayed work to be queued and run. */ static int run_delayed_work(struct delayed_work *dwork) { int ret; /* cancel any work waiting to be queued. */ ret = cancel_delayed_work(dwork); /* if there was any work waiting then we run it now and * wait for it's completion */ if (ret) { schedule_delayed_work(dwork, 0); flush_scheduled_work(); } return ret; } /* codec register dump */ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf) { int ret, i, step = 1, count = 0; if (!codec->reg_cache_size) return 0; if (codec->reg_cache_step) step = codec->reg_cache_step; count += sprintf(buf, "%s registers\n", codec->name); for (i = 0; i < codec->reg_cache_size; i += step) { if (codec->readable_register && !codec->readable_register(i)) continue; count += sprintf(buf + count, "%2x: ", i); if (count >= PAGE_SIZE - 1) break; if (codec->display_register) { count += codec->display_register(codec, buf + count, PAGE_SIZE - count, i); } else { /* If the read fails it's almost certainly due to * the register being volatile and the device being * powered off. */ ret = codec->read(codec, i); if (ret >= 0) count += snprintf(buf + count, PAGE_SIZE - count, "%4x", ret); else count += snprintf(buf + count, PAGE_SIZE - count, "<no data: %d>", ret); } if (count >= PAGE_SIZE - 1) break; count += snprintf(buf + count, PAGE_SIZE - count, "\n"); if (count >= PAGE_SIZE - 1) break; } /* Truncate count; min() would cause a warning */ if (count >= PAGE_SIZE) count = PAGE_SIZE - 1; return count; } static ssize_t codec_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_device *devdata = dev_get_drvdata(dev); return soc_codec_reg_show(devdata->card->codec, buf); } static DEVICE_ATTR(codec_reg, 0444, codec_reg_show, NULL); static ssize_t pmdown_time_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_device *socdev = dev_get_drvdata(dev); struct snd_soc_card *card = socdev->card; return sprintf(buf, "%ld\n", card->pmdown_time); } static ssize_t pmdown_time_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_soc_device *socdev = dev_get_drvdata(dev); struct snd_soc_card *card = socdev->card; strict_strtol(buf, 10, &card->pmdown_time); return count; } static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set); #ifdef CONFIG_DEBUG_FS static int codec_reg_open_file(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { ssize_t ret; struct snd_soc_codec *codec = file->private_data; char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = soc_codec_reg_show(codec, buf); if (ret >= 0) ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static ssize_t codec_reg_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; int buf_size; char *start = buf; unsigned long reg, value; int step = 1; struct snd_soc_codec *codec = file->private_data; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; if (codec->reg_cache_step) step = codec->reg_cache_step; while (*start == ' ') start++; reg = simple_strtoul(start, &start, 16); if ((reg >= codec->reg_cache_size) || (reg % step)) return -EINVAL; while (*start == ' ') start++; if (strict_strtoul(start, 16, &value)) return -EINVAL; codec->write(codec, reg, value); return buf_size; } static const struct file_operations codec_reg_fops = { .open = codec_reg_open_file, .read = codec_reg_read_file, .write = codec_reg_write_file, }; static void soc_init_codec_debugfs(struct snd_soc_codec *codec) { char codec_root[128]; if (codec->dev) snprintf(codec_root, sizeof(codec_root), "%s.%s", codec->name, dev_name(codec->dev)); else snprintf(codec_root, sizeof(codec_root), "%s", codec->name); codec->debugfs_codec_root = debugfs_create_dir(codec_root, debugfs_root); if (!codec->debugfs_codec_root) { printk(KERN_WARNING "ASoC: Failed to create codec debugfs directory\n"); return; } codec->debugfs_reg = debugfs_create_file("codec_reg", 0644, codec->debugfs_codec_root, codec, &codec_reg_fops); if (!codec->debugfs_reg) printk(KERN_WARNING "ASoC: Failed to create codec register debugfs file\n"); codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644, codec->debugfs_codec_root, &codec->pop_time); if (!codec->debugfs_pop_time) printk(KERN_WARNING "Failed to create pop time debugfs file\n"); codec->debugfs_dapm = debugfs_create_dir("dapm", codec->debugfs_codec_root); if (!codec->debugfs_dapm) printk(KERN_WARNING "Failed to create DAPM debugfs directory\n"); snd_soc_dapm_debugfs_init(codec); } static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { debugfs_remove_recursive(codec->debugfs_codec_root); } #else static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec) { } static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { } #endif #ifdef CONFIG_SND_SOC_AC97_BUS /* unregister ac97 codec */ static int soc_ac97_dev_unregister(struct snd_soc_codec *codec) { if (codec->ac97->dev.bus) device_unregister(&codec->ac97->dev); return 0; } /* stop no dev release warning */ static void soc_ac97_device_release(struct device *dev){} /* register ac97 codec to bus */ static int soc_ac97_dev_register(struct snd_soc_codec *codec) { int err; codec->ac97->dev.bus = &ac97_bus_type; codec->ac97->dev.parent = codec->card->dev; codec->ac97->dev.release = soc_ac97_device_release; dev_set_name(&codec->ac97->dev, "%d-%d:%s", codec->card->number, 0, codec->name); err = device_register(&codec->ac97->dev); if (err < 0) { snd_printk(KERN_ERR "Can't register ac97 bus\n"); codec->ac97->dev.bus = NULL; return err; } return 0; } #endif static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_card *card = socdev->card; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; int ret; if (codec_dai->symmetric_rates || cpu_dai->symmetric_rates || machine->symmetric_rates) { dev_dbg(card->dev, "Symmetry forces %dHz rate\n", machine->rate); ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE, machine->rate, machine->rate); if (ret < 0) { dev_err(card->dev, "Unable to apply rate symmetry constraint: %d\n", ret); return ret; } } return 0; } /* * Called by ALSA when a PCM substream is opened, the runtime->hw record is * then initialized and any private data can be allocated. This also calls * startup for the cpu DAI, platform, machine and codec DAI. */ static int soc_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_card *card = socdev->card; struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; int ret = 0; mutex_lock(&pcm_mutex); /* startup the audio subsystem */ if (cpu_dai->ops->startup) { ret = cpu_dai->ops->startup(substream, cpu_dai); if (ret < 0) { printk(KERN_ERR "asoc: can't open interface %s\n", cpu_dai->name); goto out; } } if (platform->pcm_ops->open) { ret = platform->pcm_ops->open(substream); if (ret < 0) { printk(KERN_ERR "asoc: can't open platform %s\n", platform->name); goto platform_err; } } if (codec_dai->ops->startup) { ret = codec_dai->ops->startup(substream, codec_dai); if (ret < 0) { printk(KERN_ERR "asoc: can't open codec %s\n", codec_dai->name); goto codec_dai_err; } } if (machine->ops && machine->ops->startup) { ret = machine->ops->startup(substream); if (ret < 0) { printk(KERN_ERR "asoc: %s startup failed\n", machine->name); goto machine_err; } } /* Check that the codec and cpu DAI's are compatible */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw.rate_min = max(codec_dai->playback.rate_min, cpu_dai->playback.rate_min); runtime->hw.rate_max = min(codec_dai->playback.rate_max, cpu_dai->playback.rate_max); runtime->hw.channels_min = max(codec_dai->playback.channels_min, cpu_dai->playback.channels_min); runtime->hw.channels_max = min(codec_dai->playback.channels_max, cpu_dai->playback.channels_max); runtime->hw.formats = codec_dai->playback.formats & cpu_dai->playback.formats; runtime->hw.rates = codec_dai->playback.rates & cpu_dai->playback.rates; if (codec_dai->playback.rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS)) runtime->hw.rates |= cpu_dai->playback.rates; if (cpu_dai->playback.rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS)) runtime->hw.rates |= codec_dai->playback.rates; } else { runtime->hw.rate_min = max(codec_dai->capture.rate_min, cpu_dai->capture.rate_min); runtime->hw.rate_max = min(codec_dai->capture.rate_max, cpu_dai->capture.rate_max); runtime->hw.channels_min = max(codec_dai->capture.channels_min, cpu_dai->capture.channels_min); runtime->hw.channels_max = min(codec_dai->capture.channels_max, cpu_dai->capture.channels_max); runtime->hw.formats = codec_dai->capture.formats & cpu_dai->capture.formats; runtime->hw.rates = codec_dai->capture.rates & cpu_dai->capture.rates; if (codec_dai->capture.rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS)) runtime->hw.rates |= cpu_dai->capture.rates; if (cpu_dai->capture.rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS)) runtime->hw.rates |= codec_dai->capture.rates; } snd_pcm_limit_hw_rates(runtime); if (!runtime->hw.rates) { printk(KERN_ERR "asoc: %s <-> %s No matching rates\n", codec_dai->name, cpu_dai->name); goto config_err; } if (!runtime->hw.formats) { printk(KERN_ERR "asoc: %s <-> %s No matching formats\n", codec_dai->name, cpu_dai->name); goto config_err; } if (!runtime->hw.channels_min || !runtime->hw.channels_max) { printk(KERN_ERR "asoc: %s <-> %s No matching channels\n", codec_dai->name, cpu_dai->name); goto config_err; } /* Symmetry only applies if we've already got an active stream. */ if (cpu_dai->active || codec_dai->active) { ret = soc_pcm_apply_symmetry(substream); if (ret != 0) goto config_err; } pr_debug("asoc: %s <-> %s info:\n", codec_dai->name, cpu_dai->name); pr_debug("asoc: rate mask 0x%x\n", runtime->hw.rates); pr_debug("asoc: min ch %d max ch %d\n", runtime->hw.channels_min, runtime->hw.channels_max); pr_debug("asoc: min rate %d max rate %d\n", runtime->hw.rate_min, runtime->hw.rate_max); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { cpu_dai->playback.active++; codec_dai->playback.active++; } else { cpu_dai->capture.active++; codec_dai->capture.active++; } cpu_dai->active++; codec_dai->active++; card->codec->active++; mutex_unlock(&pcm_mutex); return 0; config_err: if (machine->ops && machine->ops->shutdown) machine->ops->shutdown(substream); machine_err: if (codec_dai->ops->shutdown) codec_dai->ops->shutdown(substream, codec_dai); codec_dai_err: if (platform->pcm_ops->close) platform->pcm_ops->close(substream); platform_err: if (cpu_dai->ops->shutdown) cpu_dai->ops->shutdown(substream, cpu_dai); out: mutex_unlock(&pcm_mutex); return ret; } /* * Power down the audio subsystem pmdown_time msecs after close is called. * This is to ensure there are no pops or clicks in between any music tracks * due to DAPM power cycling. */ static void close_delayed_work(struct work_struct *work) { struct snd_soc_card *card = container_of(work, struct snd_soc_card, delayed_work.work); struct snd_soc_codec *codec = card->codec; struct snd_soc_dai *codec_dai; int i; mutex_lock(&pcm_mutex); for (i = 0; i < codec->num_dai; i++) { codec_dai = &codec->dai[i]; pr_debug("pop wq checking: %s status: %s waiting: %s\n", codec_dai->playback.stream_name, codec_dai->playback.active ? "active" : "inactive", codec_dai->pop_wait ? "yes" : "no"); /* are we waiting on this codec DAI stream */ if (codec_dai->pop_wait == 1) { codec_dai->pop_wait = 0; snd_soc_dapm_stream_event(codec, codec_dai->playback.stream_name, SND_SOC_DAPM_STREAM_STOP); } } mutex_unlock(&pcm_mutex); } /* * Called by ALSA when a PCM substream is closed. Private data can be * freed here. The cpu DAI, codec DAI, machine and platform are also * shutdown. */ static int soc_codec_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_card *card = socdev->card; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; struct snd_soc_codec *codec = card->codec; mutex_lock(&pcm_mutex); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { cpu_dai->playback.active--; codec_dai->playback.active--; } else { cpu_dai->capture.active--; codec_dai->capture.active--; } cpu_dai->active--; codec_dai->active--; codec->active--; /* Muting the DAC suppresses artifacts caused during digital * shutdown, for example from stopping clocks. */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_soc_dai_digital_mute(codec_dai, 1); if (cpu_dai->ops->shutdown) cpu_dai->ops->shutdown(substream, cpu_dai); if (codec_dai->ops->shutdown) codec_dai->ops->shutdown(substream, codec_dai); if (machine->ops && machine->ops->shutdown) machine->ops->shutdown(substream); if (platform->pcm_ops->close) platform->pcm_ops->close(substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* start delayed pop wq here for playback streams */ codec_dai->pop_wait = 1; schedule_delayed_work(&card->delayed_work, msecs_to_jiffies(card->pmdown_time)); } else { /* capture streams can be powered down now */ snd_soc_dapm_stream_event(codec, codec_dai->capture.stream_name, SND_SOC_DAPM_STREAM_STOP); } mutex_unlock(&pcm_mutex); return 0; } /* * Called by ALSA when the PCM substream is prepared, can set format, sample * rate, etc. This function is non atomic and can be called multiple times, * it can refer to the runtime info. */ static int soc_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_card *card = socdev->card; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; struct snd_soc_codec *codec = card->codec; int ret = 0; mutex_lock(&pcm_mutex); if (machine->ops && machine->ops->prepare) { ret = machine->ops->prepare(substream); if (ret < 0) { printk(KERN_ERR "asoc: machine prepare error\n"); goto out; } } if (platform->pcm_ops->prepare) { ret = platform->pcm_ops->prepare(substream); if (ret < 0) { printk(KERN_ERR "asoc: platform prepare error\n"); goto out; } } if (codec_dai->ops->prepare) { ret = codec_dai->ops->prepare(substream, codec_dai); if (ret < 0) { printk(KERN_ERR "asoc: codec DAI prepare error\n"); goto out; } } if (cpu_dai->ops->prepare) { ret = cpu_dai->ops->prepare(substream, cpu_dai); if (ret < 0) { printk(KERN_ERR "asoc: cpu DAI prepare error\n"); goto out; } } /* cancel any delayed stream shutdown that is pending */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && codec_dai->pop_wait) { codec_dai->pop_wait = 0; cancel_delayed_work(&card->delayed_work); } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_soc_dapm_stream_event(codec, codec_dai->playback.stream_name, SND_SOC_DAPM_STREAM_START); else snd_soc_dapm_stream_event(codec, codec_dai->capture.stream_name, SND_SOC_DAPM_STREAM_START); snd_soc_dai_digital_mute(codec_dai, 0); out: mutex_unlock(&pcm_mutex); return ret; } /* * Called by ALSA when the hardware params are set by application. This * function can also be called multiple times and can allocate buffers * (using snd_pcm_lib_* ). It's non-atomic. */ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_card *card = socdev->card; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; int ret = 0; mutex_lock(&pcm_mutex); if (machine->ops && machine->ops->hw_params) { ret = machine->ops->hw_params(substream, params); if (ret < 0) { printk(KERN_ERR "asoc: machine hw_params failed\n"); goto out; } } if (codec_dai->ops->hw_params) { ret = codec_dai->ops->hw_params(substream, params, codec_dai); if (ret < 0) { printk(KERN_ERR "asoc: can't set codec %s hw params\n", codec_dai->name); goto codec_err; } } if (cpu_dai->ops->hw_params) { ret = cpu_dai->ops->hw_params(substream, params, cpu_dai); if (ret < 0) { printk(KERN_ERR "asoc: interface %s hw params failed\n", cpu_dai->name); goto interface_err; } } if (platform->pcm_ops->hw_params) { ret = platform->pcm_ops->hw_params(substream, params); if (ret < 0) { printk(KERN_ERR "asoc: platform %s hw params failed\n", platform->name); goto platform_err; } } machine->rate = params_rate(params); out: mutex_unlock(&pcm_mutex); return ret; platform_err: if (cpu_dai->ops->hw_free) cpu_dai->ops->hw_free(substream, cpu_dai); interface_err: if (codec_dai->ops->hw_free) codec_dai->ops->hw_free(substream, codec_dai); codec_err: if (machine->ops && machine->ops->hw_free) machine->ops->hw_free(substream); mutex_unlock(&pcm_mutex); return ret; } /* * Free's resources allocated by hw_params, can be called multiple times */ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_card *card = socdev->card; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; struct snd_soc_codec *codec = card->codec; mutex_lock(&pcm_mutex); /* apply codec digital mute */ if (!codec->active) snd_soc_dai_digital_mute(codec_dai, 1); /* free any machine hw params */ if (machine->ops && machine->ops->hw_free) machine->ops->hw_free(substream); /* free any DMA resources */ if (platform->pcm_ops->hw_free) platform->pcm_ops->hw_free(substream); /* now free hw params for the DAI's */ if (codec_dai->ops->hw_free) codec_dai->ops->hw_free(substream, codec_dai); if (cpu_dai->ops->hw_free) cpu_dai->ops->hw_free(substream, cpu_dai); mutex_unlock(&pcm_mutex); return 0; } static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_card *card= socdev->card; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; int ret; if (codec_dai->ops->trigger) { ret = codec_dai->ops->trigger(substream, cmd, codec_dai); if (ret < 0) return ret; } if (platform->pcm_ops->trigger) { ret = platform->pcm_ops->trigger(substream, cmd); if (ret < 0) return ret; } if (cpu_dai->ops->trigger) { ret = cpu_dai->ops->trigger(substream, cmd, cpu_dai); if (ret < 0) return ret; } return 0; } /* * soc level wrapper for pointer callback * If cpu_dai, codec_dai, platform driver has the delay callback, than * the runtime->delay will be updated accordingly. */ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_card *card = socdev->card; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai_link *machine = rtd->dai; struct snd_soc_dai *cpu_dai = machine->cpu_dai; struct snd_soc_dai *codec_dai = machine->codec_dai; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t offset = 0; snd_pcm_sframes_t delay = 0; if (platform->pcm_ops->pointer) offset = platform->pcm_ops->pointer(substream); if (cpu_dai->ops->delay) delay += cpu_dai->ops->delay(substream, cpu_dai); if (codec_dai->ops->delay) delay += codec_dai->ops->delay(substream, codec_dai); if (platform->delay) delay += platform->delay(substream, codec_dai); runtime->delay = delay; return offset; } /* ASoC PCM operations */ static struct snd_pcm_ops soc_pcm_ops = { .open = soc_pcm_open, .close = soc_codec_close, .hw_params = soc_pcm_hw_params, .hw_free = soc_pcm_hw_free, .prepare = soc_pcm_prepare, .trigger = soc_pcm_trigger, .pointer = soc_pcm_pointer, }; #ifdef CONFIG_PM /* powers down audio subsystem for suspend */ static int soc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; struct snd_soc_platform *platform = card->platform; struct snd_soc_codec_device *codec_dev = socdev->codec_dev; struct snd_soc_codec *codec = card->codec; int i; /* If the initialization of this soc device failed, there is no codec * associated with it. Just bail out in this case. */ if (!codec) return 0; /* Due to the resume being scheduled into a workqueue we could * suspend before that's finished - wait for it to complete. */ snd_power_lock(codec->card); snd_power_wait(codec->card, SNDRV_CTL_POWER_D0); snd_power_unlock(codec->card); /* we're going to block userspace touching us until resume completes */ snd_power_change_state(codec->card, SNDRV_CTL_POWER_D3hot); /* mute any active DAC's */ for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *dai = card->dai_link[i].codec_dai; if (card->dai_link[i].ignore_suspend) continue; if (dai->ops->digital_mute && dai->playback.active) dai->ops->digital_mute(dai, 1); } /* suspend all pcms */ for (i = 0; i < card->num_links; i++) { if (card->dai_link[i].ignore_suspend) continue; snd_pcm_suspend_all(card->dai_link[i].pcm); } if (card->suspend_pre) card->suspend_pre(pdev, PMSG_SUSPEND); for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (card->dai_link[i].ignore_suspend) continue; if (cpu_dai->suspend && !cpu_dai->ac97_control) cpu_dai->suspend(cpu_dai); if (platform->suspend) platform->suspend(&card->dai_link[i]); } /* close any waiting streams and save state */ run_delayed_work(&card->delayed_work); codec->suspend_bias_level = codec->bias_level; for (i = 0; i < codec->num_dai; i++) { char *stream = codec->dai[i].playback.stream_name; if (card->dai_link[i].ignore_suspend) continue; if (stream != NULL) snd_soc_dapm_stream_event(codec, stream, SND_SOC_DAPM_STREAM_SUSPEND); stream = codec->dai[i].capture.stream_name; if (stream != NULL) snd_soc_dapm_stream_event(codec, stream, SND_SOC_DAPM_STREAM_SUSPEND); } /* If there are paths active then the CODEC will be held with * bias _ON and should not be suspended. */ if (codec_dev->suspend) { switch (codec->bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec_dev->suspend(pdev, PMSG_SUSPEND); break; default: dev_dbg(socdev->dev, "CODEC is on over suspend\n"); break; } } for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (card->dai_link[i].ignore_suspend) continue; if (cpu_dai->suspend && cpu_dai->ac97_control) cpu_dai->suspend(cpu_dai); } if (card->suspend_post) card->suspend_post(pdev, PMSG_SUSPEND); return 0; } /* deferred resume work, so resume can complete before we finished * setting our codec back up, which can be very slow on I2C */ static void soc_resume_deferred(struct work_struct *work) { struct snd_soc_card *card = container_of(work, struct snd_soc_card, deferred_resume_work); struct snd_soc_device *socdev = card->socdev; struct snd_soc_platform *platform = card->platform; struct snd_soc_codec_device *codec_dev = socdev->codec_dev; struct snd_soc_codec *codec = card->codec; struct platform_device *pdev = to_platform_device(socdev->dev); int i; /* our power state is still SNDRV_CTL_POWER_D3hot from suspend time, * so userspace apps are blocked from touching us */ dev_dbg(socdev->dev, "starting resume work\n"); /* Bring us up into D2 so that DAPM starts enabling things */ snd_power_change_state(codec->card, SNDRV_CTL_POWER_D2); if (card->resume_pre) card->resume_pre(pdev); for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (card->dai_link[i].ignore_suspend) continue; if (cpu_dai->resume && cpu_dai->ac97_control) cpu_dai->resume(cpu_dai); } /* If the CODEC was idle over suspend then it will have been * left with bias OFF or STANDBY and suspended so we must now * resume. Otherwise the suspend was suppressed. */ if (codec_dev->resume) { switch (codec->bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec_dev->resume(pdev); break; default: dev_dbg(socdev->dev, "CODEC was on over suspend\n"); break; } } for (i = 0; i < codec->num_dai; i++) { char *stream = codec->dai[i].playback.stream_name; if (card->dai_link[i].ignore_suspend) continue; if (stream != NULL) snd_soc_dapm_stream_event(codec, stream, SND_SOC_DAPM_STREAM_RESUME); stream = codec->dai[i].capture.stream_name; if (stream != NULL) snd_soc_dapm_stream_event(codec, stream, SND_SOC_DAPM_STREAM_RESUME); } /* unmute any active DACs */ for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *dai = card->dai_link[i].codec_dai; if (card->dai_link[i].ignore_suspend) continue; if (dai->ops->digital_mute && dai->playback.active) dai->ops->digital_mute(dai, 0); } for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (card->dai_link[i].ignore_suspend) continue; if (cpu_dai->resume && !cpu_dai->ac97_control) cpu_dai->resume(cpu_dai); if (platform->resume) platform->resume(&card->dai_link[i]); } if (card->resume_post) card->resume_post(pdev); dev_dbg(socdev->dev, "resume work completed\n"); /* userspace can access us now we are back as we were before */ snd_power_change_state(codec->card, SNDRV_CTL_POWER_D0); } /* powers up audio subsystem after a suspend */ static int soc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; struct snd_soc_dai *cpu_dai = card->dai_link[0].cpu_dai; /* If the initialization of this soc device failed, there is no codec * associated with it. Just bail out in this case. */ if (!card->codec) return 0; /* AC97 devices might have other drivers hanging off them so * need to resume immediately. Other drivers don't have that * problem and may take a substantial amount of time to resume * due to I/O costs and anti-pop so handle them out of line. */ if (cpu_dai->ac97_control) { dev_dbg(socdev->dev, "Resuming AC97 immediately\n"); soc_resume_deferred(&card->deferred_resume_work); } else { dev_dbg(socdev->dev, "Scheduling resume work\n"); if (!schedule_work(&card->deferred_resume_work)) dev_err(socdev->dev, "resume work item may be lost\n"); } return 0; } #else #define soc_suspend NULL #define soc_resume NULL #endif static struct snd_soc_dai_ops null_dai_ops = { }; static void snd_soc_instantiate_card(struct snd_soc_card *card) { struct platform_device *pdev = container_of(card->dev, struct platform_device, dev); struct snd_soc_codec_device *codec_dev = card->socdev->codec_dev; struct snd_soc_codec *codec; struct snd_soc_platform *platform; struct snd_soc_dai *dai; int i, found, ret, ac97; if (card->instantiated) return; found = 0; list_for_each_entry(platform, &platform_list, list) if (card->platform == platform) { found = 1; break; } if (!found) { dev_dbg(card->dev, "Platform %s not registered\n", card->platform->name); return; } ac97 = 0; for (i = 0; i < card->num_links; i++) { found = 0; list_for_each_entry(dai, &dai_list, list) if (card->dai_link[i].cpu_dai == dai) { found = 1; break; } if (!found) { dev_dbg(card->dev, "DAI %s not registered\n", card->dai_link[i].cpu_dai->name); return; } if (card->dai_link[i].cpu_dai->ac97_control) ac97 = 1; } for (i = 0; i < card->num_links; i++) { if (!card->dai_link[i].codec_dai->ops) card->dai_link[i].codec_dai->ops = &null_dai_ops; } /* If we have AC97 in the system then don't wait for the * codec. This will need revisiting if we have to handle * systems with mixed AC97 and non-AC97 parts. Only check for * DAIs currently; we can't do this per link since some AC97 * codecs have non-AC97 DAIs. */ if (!ac97) for (i = 0; i < card->num_links; i++) { found = 0; list_for_each_entry(dai, &dai_list, list) if (card->dai_link[i].codec_dai == dai) { found = 1; break; } if (!found) { dev_dbg(card->dev, "DAI %s not registered\n", card->dai_link[i].codec_dai->name); return; } } /* Note that we do not current check for codec components */ dev_dbg(card->dev, "All components present, instantiating\n"); /* Found everything, bring it up */ card->pmdown_time = pmdown_time; if (card->probe) { ret = card->probe(pdev); if (ret < 0) return; } for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (cpu_dai->probe) { ret = cpu_dai->probe(pdev, cpu_dai); if (ret < 0) goto cpu_dai_err; } } if (codec_dev->probe) { ret = codec_dev->probe(pdev); if (ret < 0) goto cpu_dai_err; } codec = card->codec; if (platform->probe) { ret = platform->probe(pdev); if (ret < 0) goto platform_err; } /* DAPM stream work */ INIT_DELAYED_WORK(&card->delayed_work, close_delayed_work); #ifdef CONFIG_PM /* deferred resume work */ INIT_WORK(&card->deferred_resume_work, soc_resume_deferred); #endif for (i = 0; i < card->num_links; i++) { if (card->dai_link[i].init) { ret = card->dai_link[i].init(codec); if (ret < 0) { printk(KERN_ERR "asoc: failed to init %s\n", card->dai_link[i].stream_name); continue; } } if (card->dai_link[i].codec_dai->ac97_control) ac97 = 1; } snprintf(codec->card->shortname, sizeof(codec->card->shortname), "%s", card->name); snprintf(codec->card->longname, sizeof(codec->card->longname), "%s (%s)", card->name, codec->name); /* Make sure all DAPM widgets are instantiated */ snd_soc_dapm_new_widgets(codec); ret = snd_card_register(codec->card); if (ret < 0) { printk(KERN_ERR "asoc: failed to register soundcard for %s\n", codec->name); goto card_err; } mutex_lock(&codec->mutex); #ifdef CONFIG_SND_SOC_AC97_BUS /* Only instantiate AC97 if not already done by the adaptor * for the generic AC97 subsystem. */ if (ac97 && strcmp(codec->name, "AC97") != 0) { ret = soc_ac97_dev_register(codec); if (ret < 0) { printk(KERN_ERR "asoc: AC97 device register failed\n"); snd_card_free(codec->card); mutex_unlock(&codec->mutex); goto card_err; } } #endif ret = snd_soc_dapm_sys_add(card->socdev->dev); if (ret < 0) printk(KERN_WARNING "asoc: failed to add dapm sysfs entries\n"); ret = device_create_file(card->socdev->dev, &dev_attr_pmdown_time); if (ret < 0) printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n"); ret = device_create_file(card->socdev->dev, &dev_attr_codec_reg); if (ret < 0) printk(KERN_WARNING "asoc: failed to add codec sysfs files\n"); soc_init_codec_debugfs(codec); mutex_unlock(&codec->mutex); card->instantiated = 1; return; card_err: if (platform->remove) platform->remove(pdev); platform_err: if (codec_dev->remove) codec_dev->remove(pdev); cpu_dai_err: for (i--; i >= 0; i--) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (cpu_dai->remove) cpu_dai->remove(pdev, cpu_dai); } if (card->remove) card->remove(pdev); } /* * Attempt to initialise any uninitialised cards. Must be called with * client_mutex. */ static void snd_soc_instantiate_cards(void) { struct snd_soc_card *card; list_for_each_entry(card, &card_list, list) snd_soc_instantiate_card(card); } /* probes a new socdev */ static int soc_probe(struct platform_device *pdev) { int ret = 0; struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; /* Bodge while we push things out of socdev */ card->socdev = socdev; /* Bodge while we unpick instantiation */ card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret != 0) { dev_err(&pdev->dev, "Failed to register card\n"); return ret; } return 0; } /* removes a socdev */ static int soc_remove(struct platform_device *pdev) { int i; struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; struct snd_soc_platform *platform = card->platform; struct snd_soc_codec_device *codec_dev = socdev->codec_dev; if (card->instantiated) { run_delayed_work(&card->delayed_work); if (platform->remove) platform->remove(pdev); if (codec_dev->remove) codec_dev->remove(pdev); for (i = 0; i < card->num_links; i++) { struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; if (cpu_dai->remove) cpu_dai->remove(pdev, cpu_dai); } if (card->remove) card->remove(pdev); } snd_soc_unregister_card(card); return 0; } static int soc_poweroff(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_card *card = socdev->card; if (!card->instantiated) return 0; /* Flush out pmdown_time work - we actually do want to run it * now, we're shutting down so no imminent restart. */ run_delayed_work(&card->delayed_work); snd_soc_dapm_shutdown(socdev); return 0; } static const struct dev_pm_ops soc_pm_ops = { .suspend = soc_suspend, .resume = soc_resume, .poweroff = soc_poweroff, }; /* ASoC platform driver */ static struct platform_driver soc_driver = { .driver = { .name = "soc-audio", .owner = THIS_MODULE, .pm = &soc_pm_ops, }, .probe = soc_probe, .remove = soc_remove, }; /* create a new pcm */ static int soc_new_pcm(struct snd_soc_device *socdev, struct snd_soc_dai_link *dai_link, int num) { struct snd_soc_card *card = socdev->card; struct snd_soc_codec *codec = card->codec; struct snd_soc_platform *platform = card->platform; struct snd_soc_dai *codec_dai = dai_link->codec_dai; struct snd_soc_dai *cpu_dai = dai_link->cpu_dai; struct snd_soc_pcm_runtime *rtd; struct snd_pcm *pcm; char new_name[64]; int ret = 0, playback = 0, capture = 0; rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime), GFP_KERNEL); if (rtd == NULL) return -ENOMEM; rtd->dai = dai_link; rtd->socdev = socdev; codec_dai->codec = card->codec; /* check client and interface hw capabilities */ snprintf(new_name, sizeof(new_name), "%s %s-%d", dai_link->stream_name, codec_dai->name, num); if (codec_dai->playback.channels_min) playback = 1; if (codec_dai->capture.channels_min) capture = 1; ret = snd_pcm_new(codec->card, new_name, codec->pcm_devs++, playback, capture, &pcm); if (ret < 0) { printk(KERN_ERR "asoc: can't create pcm for codec %s\n", codec->name); kfree(rtd); return ret; } dai_link->pcm = pcm; pcm->private_data = rtd; soc_pcm_ops.mmap = platform->pcm_ops->mmap; soc_pcm_ops.ioctl = platform->pcm_ops->ioctl; soc_pcm_ops.copy = platform->pcm_ops->copy; soc_pcm_ops.silence = platform->pcm_ops->silence; soc_pcm_ops.ack = platform->pcm_ops->ack; soc_pcm_ops.page = platform->pcm_ops->page; if (playback) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops); if (capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &soc_pcm_ops); ret = platform->pcm_new(codec->card, codec_dai, pcm); if (ret < 0) { printk(KERN_ERR "asoc: platform pcm constructor failed\n"); kfree(rtd); return ret; } pcm->private_free = platform->pcm_free; printk(KERN_INFO "asoc: %s <-> %s mapping ok\n", codec_dai->name, cpu_dai->name); return ret; } /** * snd_soc_codec_volatile_register: Report if a register is volatile. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indiciating if a CODEC register is volatile. */ int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg) { if (codec->volatile_register) return codec->volatile_register(reg); else return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register); /** * snd_soc_new_ac97_codec - initailise AC97 device * @codec: audio codec * @ops: AC97 bus operations * @num: AC97 codec number * * Initialises AC97 codec resources for use by ad-hoc devices only. */ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, struct snd_ac97_bus_ops *ops, int num) { mutex_lock(&codec->mutex); codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); if (codec->ac97 == NULL) { mutex_unlock(&codec->mutex); return -ENOMEM; } codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL); if (codec->ac97->bus == NULL) { kfree(codec->ac97); codec->ac97 = NULL; mutex_unlock(&codec->mutex); return -ENOMEM; } codec->ac97->bus->ops = ops; codec->ac97->num = num; codec->dev = &codec->ac97->dev; mutex_unlock(&codec->mutex); return 0; } EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec); /** * snd_soc_free_ac97_codec - free AC97 codec device * @codec: audio codec * * Frees AC97 codec device resources. */ void snd_soc_free_ac97_codec(struct snd_soc_codec *codec) { mutex_lock(&codec->mutex); kfree(codec->ac97->bus); kfree(codec->ac97); codec->ac97 = NULL; mutex_unlock(&codec->mutex); } EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec); /** * snd_soc_update_bits - update codec register bits * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Writes new register value. * * Returns 1 for change else 0. */ int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; unsigned int old, new; old = snd_soc_read(codec, reg); new = (old & ~mask) | value; change = old != new; if (change) snd_soc_write(codec, reg, new); return change; } EXPORT_SYMBOL_GPL(snd_soc_update_bits); /** * snd_soc_update_bits_locked - update codec register bits * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Writes new register value, and takes the codec mutex. * * Returns 1 for change else 0. */ int snd_soc_update_bits_locked(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; mutex_lock(&codec->mutex); change = snd_soc_update_bits(codec, reg, mask, value); mutex_unlock(&codec->mutex); return change; } EXPORT_SYMBOL_GPL(snd_soc_update_bits_locked); /** * snd_soc_test_bits - test register for change * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Tests a register with a new value and checks if the new value is * different from the old value. * * Returns 1 for change else 0. */ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; unsigned int old, new; old = snd_soc_read(codec, reg); new = (old & ~mask) | value; change = old != new; return change; } EXPORT_SYMBOL_GPL(snd_soc_test_bits); /** * snd_soc_new_pcms - create new sound card and pcms * @socdev: the SoC audio device * @idx: ALSA card index * @xid: card identification * * Create a new sound card based upon the codec and interface pcms. * * Returns 0 for success, else error. */ int snd_soc_new_pcms(struct snd_soc_device *socdev, int idx, const char *xid) { struct snd_soc_card *card = socdev->card; struct snd_soc_codec *codec = card->codec; int ret, i; mutex_lock(&codec->mutex); /* register a sound card */ ret = snd_card_create(idx, xid, codec->owner, 0, &codec->card); if (ret < 0) { printk(KERN_ERR "asoc: can't create sound card for codec %s\n", codec->name); mutex_unlock(&codec->mutex); return ret; } codec->socdev = socdev; codec->card->dev = socdev->dev; codec->card->private_data = codec; strncpy(codec->card->driver, codec->name, sizeof(codec->card->driver)); /* create the pcms */ for (i = 0; i < card->num_links; i++) { ret = soc_new_pcm(socdev, &card->dai_link[i], i); if (ret < 0) { printk(KERN_ERR "asoc: can't create pcm %s\n", card->dai_link[i].stream_name); mutex_unlock(&codec->mutex); return ret; } /* Check for codec->ac97 to handle the ac97.c fun */ if (card->dai_link[i].codec_dai->ac97_control && codec->ac97) { snd_ac97_dev_add_pdata(codec->ac97, card->dai_link[i].cpu_dai->ac97_pdata); } } mutex_unlock(&codec->mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_new_pcms); /** * snd_soc_free_pcms - free sound card and pcms * @socdev: the SoC audio device * * Frees sound card and pcms associated with the socdev. * Also unregister the codec if it is an AC97 device. */ void snd_soc_free_pcms(struct snd_soc_device *socdev) { struct snd_soc_codec *codec = socdev->card->codec; #ifdef CONFIG_SND_SOC_AC97_BUS struct snd_soc_dai *codec_dai; int i; #endif mutex_lock(&codec->mutex); soc_cleanup_codec_debugfs(codec); #ifdef CONFIG_SND_SOC_AC97_BUS for (i = 0; i < codec->num_dai; i++) { codec_dai = &codec->dai[i]; if (codec_dai->ac97_control && codec->ac97 && strcmp(codec->name, "AC97") != 0) { soc_ac97_dev_unregister(codec); goto free_card; } } free_card: #endif if (codec->card) snd_card_free(codec->card); device_remove_file(socdev->dev, &dev_attr_codec_reg); mutex_unlock(&codec->mutex); } EXPORT_SYMBOL_GPL(snd_soc_free_pcms); /** * snd_soc_set_runtime_hwparams - set the runtime hardware parameters * @substream: the pcm substream * @hw: the hardware parameters * * Sets the substream runtime hardware parameters. */ int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw.info = hw->info; runtime->hw.formats = hw->formats; runtime->hw.period_bytes_min = hw->period_bytes_min; runtime->hw.period_bytes_max = hw->period_bytes_max; runtime->hw.periods_min = hw->periods_min; runtime->hw.periods_max = hw->periods_max; runtime->hw.buffer_bytes_max = hw->buffer_bytes_max; runtime->hw.fifo_size = hw->fifo_size; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_runtime_hwparams); /** * snd_soc_cnew - create new control * @_template: control template * @data: control private data * @long_name: control long name * * Create a new mixer control from a template control. * * Returns 0 for success, else error. */ struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, void *data, char *long_name) { struct snd_kcontrol_new template; memcpy(&template, _template, sizeof(template)); if (long_name) template.name = long_name; template.index = 0; return snd_ctl_new1(&template, data); } EXPORT_SYMBOL_GPL(snd_soc_cnew); /** * snd_soc_add_controls - add an array of controls to a codec. * Convienience function to add a list of controls. Many codecs were * duplicating this code. * * @codec: codec to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_controls(struct snd_soc_codec *codec, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = codec->card; int err, i; for (i = 0; i < num_controls; i++) { const struct snd_kcontrol_new *control = &controls[i]; err = snd_ctl_add(card, snd_soc_cnew(control, codec, NULL)); if (err < 0) { dev_err(codec->dev, "%s: Failed to add %s\n", codec->name, control->name); return err; } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_add_controls); /** * snd_soc_info_enum_double - enumerated double mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a double enumerated * mixer control. * * Returns 0 for success. */ int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = e->shift_l == e->shift_r ? 1 : 2; uinfo->value.enumerated.items = e->max; if (uinfo->value.enumerated.item > e->max - 1) uinfo->value.enumerated.item = e->max - 1; strcpy(uinfo->value.enumerated.name, e->texts[uinfo->value.enumerated.item]); return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_enum_double); /** * snd_soc_get_enum_double - enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, bitmask; for (bitmask = 1; bitmask < e->max; bitmask <<= 1) ; val = snd_soc_read(codec, e->reg); ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & (bitmask - 1); if (e->shift_l != e->shift_r) ucontrol->value.enumerated.item[1] = (val >> e->shift_r) & (bitmask - 1); return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_enum_double); /** * snd_soc_put_enum_double - enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; unsigned int mask, bitmask; for (bitmask = 1; bitmask < e->max; bitmask <<= 1) ; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; val = ucontrol->value.enumerated.item[0] << e->shift_l; mask = (bitmask - 1) << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->max - 1) return -EINVAL; val |= ucontrol->value.enumerated.item[1] << e->shift_r; mask |= (bitmask - 1) << e->shift_r; } return snd_soc_update_bits_locked(codec, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_enum_double); /** * snd_soc_get_value_enum_double - semi enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double semi enumerated mixer. * * Semi enumerated mixer: the enumerated items are referred as values. Can be * used for handling bitfield coded enumeration for example. * * Returns 0 for success. */ int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int reg_val, val, mux; reg_val = snd_soc_read(codec, e->reg); val = (reg_val >> e->shift_l) & e->mask; for (mux = 0; mux < e->max; mux++) { if (val == e->values[mux]) break; } ucontrol->value.enumerated.item[0] = mux; if (e->shift_l != e->shift_r) { val = (reg_val >> e->shift_r) & e->mask; for (mux = 0; mux < e->max; mux++) { if (val == e->values[mux]) break; } ucontrol->value.enumerated.item[1] = mux; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_value_enum_double); /** * snd_soc_put_value_enum_double - semi enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double semi enumerated mixer. * * Semi enumerated mixer: the enumerated items are referred as values. Can be * used for handling bitfield coded enumeration for example. * * Returns 0 for success. */ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; unsigned int mask; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; val = e->values[ucontrol->value.enumerated.item[0]] << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->max - 1) return -EINVAL; val |= e->values[ucontrol->value.enumerated.item[1]] << e->shift_r; mask |= e->mask << e->shift_r; } return snd_soc_update_bits_locked(codec, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_value_enum_double); /** * snd_soc_info_enum_ext - external enumerated single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about an external enumerated * single mixer. * * Returns 0 for success. */ int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = e->max; if (uinfo->value.enumerated.item > e->max - 1) uinfo->value.enumerated.item = e->max - 1; strcpy(uinfo->value.enumerated.name, e->texts[uinfo->value.enumerated.item]); return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_enum_ext); /** * snd_soc_info_volsw_ext - external single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single external mixer control. * * Returns 0 for success. */ int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int max = kcontrol->private_value; if (max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_ext); /** * snd_soc_info_volsw - single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single mixer control. * * Returns 0 for success. */ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = shift == rshift ? 1 : 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw); /** * snd_soc_get_volsw - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a single mixer control. * * Returns 0 for success. */ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; ucontrol->value.integer.value[0] = (snd_soc_read(codec, reg) >> shift) & mask; if (shift != rshift) ucontrol->value.integer.value[1] = (snd_soc_read(codec, reg) >> rshift) & mask; if (invert) { ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; if (shift != rshift) ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw); /** * snd_soc_put_volsw - single mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a single mixer control. * * Returns 0 for success. */ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val2, val_mask; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (shift != rshift) { val2 = (ucontrol->value.integer.value[1] & mask); if (invert) val2 = max - val2; val_mask |= mask << rshift; val |= val2 << rshift; } return snd_soc_update_bits_locked(codec, reg, val_mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); /** * snd_soc_info_volsw_2r - double mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a double mixer control that * spans 2 codec registers. * * Returns 0 for success. */ int snd_soc_info_volsw_2r(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_2r); /** * snd_soc_get_volsw_2r - double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_get_volsw_2r(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; ucontrol->value.integer.value[0] = (snd_soc_read(codec, reg) >> shift) & mask; ucontrol->value.integer.value[1] = (snd_soc_read(codec, reg2) >> shift) & mask; if (invert) { ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_2r); /** * snd_soc_put_volsw_2r - double mixer set callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_put_volsw_2r(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; int err; unsigned int val, val2, val_mask; val_mask = mask << shift; val = (ucontrol->value.integer.value[0] & mask); val2 = (ucontrol->value.integer.value[1] & mask); if (invert) { val = max - val; val2 = max - val2; } val = val << shift; val2 = val2 << shift; err = snd_soc_update_bits_locked(codec, reg, val_mask, val); if (err < 0) return err; err = snd_soc_update_bits_locked(codec, reg2, val_mask, val2); return err; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_2r); /** * snd_soc_info_volsw_s8 - signed mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a signed mixer control. * * Returns 0 for success. */ int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; int min = mc->min; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max - min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8); /** * snd_soc_get_volsw_s8 - signed mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a signed mixer control. * * Returns 0 for success. */ int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; int min = mc->min; int val = snd_soc_read(codec, reg); ucontrol->value.integer.value[0] = ((signed char)(val & 0xff))-min; ucontrol->value.integer.value[1] = ((signed char)((val >> 8) & 0xff))-min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8); /** * snd_soc_put_volsw_sgn - signed mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a signed mixer control. * * Returns 0 for success. */ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; int min = mc->min; unsigned int val; val = (ucontrol->value.integer.value[0]+min) & 0xff; val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8; return snd_soc_update_bits_locked(codec, reg, 0xffff, val); } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8); /** * snd_soc_limit_volume - Set new limit to an existing volume control. * * @codec: where to look for the control * @name: Name of the control * @max: new maximum limit * * Return 0 for success, else error. */ int snd_soc_limit_volume(struct snd_soc_codec *codec, const char *name, int max) { struct snd_card *card = codec->card; struct snd_kcontrol *kctl; struct soc_mixer_control *mc; int found = 0; int ret = -EINVAL; /* Sanity check for name and max */ if (unlikely(!name || max <= 0)) return -EINVAL; list_for_each_entry(kctl, &card->controls, list) { if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) { found = 1; break; } } if (found) { mc = (struct soc_mixer_control *)kctl->private_value; if (max <= mc->max) { mc->platform_max = max; ret = 0; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_limit_volume); /** * snd_soc_info_volsw_2r_sx - double with tlv and variable data size * mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Returns 0 for success. */ int snd_soc_info_volsw_2r_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int max = mc->max; int min = mc->min; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = max-min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_2r_sx); /** * snd_soc_get_volsw_2r_sx - double with tlv and variable data size * mixer get callback * @kcontrol: mixer control * @uinfo: control element information * * Returns 0 for success. */ int snd_soc_get_volsw_2r_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int mask = (1<<mc->shift)-1; int min = mc->min; int val = snd_soc_read(codec, mc->reg) & mask; int valr = snd_soc_read(codec, mc->rreg) & mask; ucontrol->value.integer.value[0] = ((val & 0xff)-min) & mask; ucontrol->value.integer.value[1] = ((valr & 0xff)-min) & mask; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_2r_sx); /** * snd_soc_put_volsw_2r_sx - double with tlv and variable data size * mixer put callback * @kcontrol: mixer control * @uinfo: control element information * * Returns 0 for success. */ int snd_soc_put_volsw_2r_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int mask = (1<<mc->shift)-1; int min = mc->min; int ret; unsigned int val, valr, oval, ovalr; val = ((ucontrol->value.integer.value[0]+min) & 0xff); val &= mask; valr = ((ucontrol->value.integer.value[1]+min) & 0xff); valr &= mask; oval = snd_soc_read(codec, mc->reg) & mask; ovalr = snd_soc_read(codec, mc->rreg) & mask; ret = 0; if (oval != val) { ret = snd_soc_write(codec, mc->reg, val); if (ret < 0) return ret; } if (ovalr != valr) { ret = snd_soc_write(codec, mc->rreg, valr); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_2r_sx); /** * snd_soc_dai_set_sysclk - configure DAI system or master clock. * @dai: DAI * @clk_id: DAI specific clock ID * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the DAI master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { if (dai->ops && dai->ops->set_sysclk) return dai->ops->set_sysclk(dai, clk_id, freq, dir); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk); /** * snd_soc_dai_set_clkdiv - configure DAI clock dividers. * @dai: DAI * @div_id: DAI specific clock divider ID * @div: new clock divisor. * * Configures the clock dividers. This is used to derive the best DAI bit and * frame clocks from the system or master clock. It's best to set the DAI bit * and frame clocks as low as possible to save system power. */ int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) { if (dai->ops && dai->ops->set_clkdiv) return dai->ops->set_clkdiv(dai, div_id, div); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv); /** * snd_soc_dai_set_pll - configure DAI PLL. * @dai: DAI * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { if (dai->ops && dai->ops->set_pll) return dai->ops->set_pll(dai, pll_id, source, freq_in, freq_out); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll); /** * snd_soc_dai_set_fmt - configure DAI hardware audio format. * @dai: DAI * @fmt: SND_SOC_DAIFMT_ format value. * * Configures the DAI hardware format and clocking. */ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { if (dai->ops && dai->ops->set_fmt) return dai->ops->set_fmt(dai, fmt); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt); /** * snd_soc_dai_set_tdm_slot - configure DAI TDM. * @dai: DAI * @tx_mask: bitmask representing active TX slots. * @rx_mask: bitmask representing active RX slots. * @slots: Number of slots in use. * @slot_width: Width in bits for each slot. * * Configures a DAI for TDM operation. Both mask and slots are codec and DAI * specific. */ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { if (dai->ops && dai->ops->set_tdm_slot) return dai->ops->set_tdm_slot(dai, tx_mask, rx_mask, slots, slot_width); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); /** * snd_soc_dai_set_channel_map - configure DAI audio channel map * @dai: DAI * @tx_num: how many TX channels * @tx_slot: pointer to an array which imply the TX slot number channel * 0~num-1 uses * @rx_num: how many RX channels * @rx_slot: pointer to an array which imply the RX slot number channel * 0~num-1 uses * * configure the relationship between channel number and TDM slot number. */ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { if (dai->ops && dai->ops->set_channel_map) return dai->ops->set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map); /** * snd_soc_dai_set_tristate - configure DAI system or master clock. * @dai: DAI * @tristate: tristate enable * * Tristates the DAI so that others can use it. */ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate) { if (dai->ops && dai->ops->set_tristate) return dai->ops->set_tristate(dai, tristate); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate); /** * snd_soc_dai_digital_mute - configure DAI system or master clock. * @dai: DAI * @mute: mute enable * * Mutes the DAI DAC. */ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute) { if (dai->ops && dai->ops->digital_mute) return dai->ops->digital_mute(dai, mute); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute); /** * snd_soc_register_card - Register a card with the ASoC core * * @card: Card to register * * Note that currently this is an internal only function: it will be * exposed to machine drivers after further backporting of ASoC v2 * registration APIs. */ static int snd_soc_register_card(struct snd_soc_card *card) { if (!card->name || !card->dev) return -EINVAL; INIT_LIST_HEAD(&card->list); card->instantiated = 0; mutex_lock(&client_mutex); list_add(&card->list, &card_list); snd_soc_instantiate_cards(); mutex_unlock(&client_mutex); dev_dbg(card->dev, "Registered card '%s'\n", card->name); return 0; } /** * snd_soc_unregister_card - Unregister a card with the ASoC core * * @card: Card to unregister * * Note that currently this is an internal only function: it will be * exposed to machine drivers after further backporting of ASoC v2 * registration APIs. */ static int snd_soc_unregister_card(struct snd_soc_card *card) { mutex_lock(&client_mutex); list_del(&card->list); mutex_unlock(&client_mutex); dev_dbg(card->dev, "Unregistered card '%s'\n", card->name); return 0; } /** * snd_soc_register_dai - Register a DAI with the ASoC core * * @dai: DAI to register */ int snd_soc_register_dai(struct snd_soc_dai *dai) { if (!dai->name) return -EINVAL; /* The device should become mandatory over time */ if (!dai->dev) printk(KERN_WARNING "No device for DAI %s\n", dai->name); if (!dai->ops) dai->ops = &null_dai_ops; INIT_LIST_HEAD(&dai->list); mutex_lock(&client_mutex); list_add(&dai->list, &dai_list); snd_soc_instantiate_cards(); mutex_unlock(&client_mutex); pr_debug("Registered DAI '%s'\n", dai->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_register_dai); /** * snd_soc_unregister_dai - Unregister a DAI from the ASoC core * * @dai: DAI to unregister */ void snd_soc_unregister_dai(struct snd_soc_dai *dai) { mutex_lock(&client_mutex); list_del(&dai->list); mutex_unlock(&client_mutex); pr_debug("Unregistered DAI '%s'\n", dai->name); } EXPORT_SYMBOL_GPL(snd_soc_unregister_dai); /** * snd_soc_register_dais - Register multiple DAIs with the ASoC core * * @dai: Array of DAIs to register * @count: Number of DAIs */ int snd_soc_register_dais(struct snd_soc_dai *dai, size_t count) { int i, ret; for (i = 0; i < count; i++) { ret = snd_soc_register_dai(&dai[i]); if (ret != 0) goto err; } return 0; err: for (i--; i >= 0; i--) snd_soc_unregister_dai(&dai[i]); return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_dais); /** * snd_soc_unregister_dais - Unregister multiple DAIs from the ASoC core * * @dai: Array of DAIs to unregister * @count: Number of DAIs */ void snd_soc_unregister_dais(struct snd_soc_dai *dai, size_t count) { int i; for (i = 0; i < count; i++) snd_soc_unregister_dai(&dai[i]); } EXPORT_SYMBOL_GPL(snd_soc_unregister_dais); /** * snd_soc_register_platform - Register a platform with the ASoC core * * @platform: platform to register */ int snd_soc_register_platform(struct snd_soc_platform *platform) { if (!platform->name) return -EINVAL; INIT_LIST_HEAD(&platform->list); mutex_lock(&client_mutex); list_add(&platform->list, &platform_list); snd_soc_instantiate_cards(); mutex_unlock(&client_mutex); pr_debug("Registered platform '%s'\n", platform->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_register_platform); /** * snd_soc_unregister_platform - Unregister a platform from the ASoC core * * @platform: platform to unregister */ void snd_soc_unregister_platform(struct snd_soc_platform *platform) { mutex_lock(&client_mutex); list_del(&platform->list); mutex_unlock(&client_mutex); pr_debug("Unregistered platform '%s'\n", platform->name); } EXPORT_SYMBOL_GPL(snd_soc_unregister_platform); static u64 codec_format_map[] = { SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE, SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE, SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE, SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE, SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE, SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE, SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_U24_3BE, SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE, SNDRV_PCM_FMTBIT_U20_3LE | SNDRV_PCM_FMTBIT_U20_3BE, SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE, SNDRV_PCM_FMTBIT_U18_3LE | SNDRV_PCM_FMTBIT_U18_3BE, SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE, SNDRV_PCM_FMTBIT_FLOAT64_LE | SNDRV_PCM_FMTBIT_FLOAT64_BE, SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE, }; /* Fix up the DAI formats for endianness: codecs don't actually see * the endianness of the data but we're using the CPU format * definitions which do need to include endianness so we ensure that * codec DAIs always have both big and little endian variants set. */ static void fixup_codec_formats(struct snd_soc_pcm_stream *stream) { int i; for (i = 0; i < ARRAY_SIZE(codec_format_map); i++) if (stream->formats & codec_format_map[i]) stream->formats |= codec_format_map[i]; } /** * snd_soc_register_codec - Register a codec with the ASoC core * * @codec: codec to register */ int snd_soc_register_codec(struct snd_soc_codec *codec) { int i; if (!codec->name) return -EINVAL; /* The device should become mandatory over time */ if (!codec->dev) printk(KERN_WARNING "No device for codec %s\n", codec->name); INIT_LIST_HEAD(&codec->list); for (i = 0; i < codec->num_dai; i++) { fixup_codec_formats(&codec->dai[i].playback); fixup_codec_formats(&codec->dai[i].capture); } mutex_lock(&client_mutex); list_add(&codec->list, &codec_list); snd_soc_instantiate_cards(); mutex_unlock(&client_mutex); pr_debug("Registered codec '%s'\n", codec->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_register_codec); /** * snd_soc_unregister_codec - Unregister a codec from the ASoC core * * @codec: codec to unregister */ void snd_soc_unregister_codec(struct snd_soc_codec *codec) { mutex_lock(&client_mutex); list_del(&codec->list); mutex_unlock(&client_mutex); pr_debug("Unregistered codec '%s'\n", codec->name); } EXPORT_SYMBOL_GPL(snd_soc_unregister_codec); static int __init snd_soc_init(void) { #ifdef CONFIG_DEBUG_FS debugfs_root = debugfs_create_dir("asoc", NULL); if (IS_ERR(debugfs_root) || !debugfs_root) { printk(KERN_WARNING "ASoC: Failed to create debugfs directory\n"); debugfs_root = NULL; } #endif return platform_driver_register(&soc_driver); } static void __exit snd_soc_exit(void) { #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(debugfs_root); #endif platform_driver_unregister(&soc_driver); } module_init(snd_soc_init); module_exit(snd_soc_exit); /* Module information */ MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk"); MODULE_DESCRIPTION("ALSA SoC Core"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:soc-audio");
gpl-2.0
MilysTW/linux-sunxi-cb2
drivers/net/wireless/rtl8189es/hal/rtl8188e/usb/rtl8188eu_led.c
150
4268
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #include <drv_conf.h> #include <osdep_service.h> #include <drv_types.h> #include <rtl8188e_hal.h> //================================================================================ // LED object. //================================================================================ //================================================================================ // Prototype of protected function. //================================================================================ //================================================================================ // LED_819xUsb routines. //================================================================================ // // Description: // Turn on LED according to LedPin specified. // void SwLedOn( _adapter *padapter, PLED_871x pLed ) { u8 LedCfg; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); if( (padapter->bSurpriseRemoved == _TRUE) || ( padapter->bDriverStopped == _TRUE)) { return; } LedCfg = rtw_read8(padapter, REG_LEDCFG2); switch(pLed->LedPin) { case LED_PIN_LED0: rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0xf0)|BIT5|BIT6); // SW control led0 on. break; case LED_PIN_LED1: rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0x0f)|BIT5); // SW control led1 on. break; default: break; } pLed->bLedOn = _TRUE; } // // Description: // Turn off LED according to LedPin specified. // void SwLedOff( _adapter *padapter, PLED_871x pLed ) { u8 LedCfg; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); if((padapter->bSurpriseRemoved == _TRUE) || ( padapter->bDriverStopped == _TRUE)) { goto exit; } LedCfg = rtw_read8(padapter, REG_LEDCFG2);//0x4E switch(pLed->LedPin) { case LED_PIN_LED0: if(pHalData->bLedOpenDrain == _TRUE) // Open-drain arrangement for controlling the LED) { LedCfg &= 0x90; // Set to software control. rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3)); LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG); LedCfg &= 0xFE; rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg); } else { rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3|BIT5|BIT6)); } break; case LED_PIN_LED1: LedCfg &= 0x0f; // Set to software control. rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3)); break; default: break; } exit: pLed->bLedOn = _FALSE; } //================================================================================ // Interface to manipulate LED objects. //================================================================================ //================================================================================ // Default LED behavior. //================================================================================ // // Description: // Initialize all LED_871x objects. // void rtl8188eu_InitSwLeds( _adapter *padapter ) { struct led_priv *pledpriv = &(padapter->ledpriv); pledpriv->LedControlHandler = LedControl871x; InitLed871x(padapter, &(pledpriv->SwLed0), LED_PIN_LED0); InitLed871x(padapter,&(pledpriv->SwLed1), LED_PIN_LED1); } // // Description: // DeInitialize all LED_819xUsb objects. // void rtl8188eu_DeInitSwLeds( _adapter *padapter ) { struct led_priv *ledpriv = &(padapter->ledpriv); DeInitLed871x( &(ledpriv->SwLed0) ); DeInitLed871x( &(ledpriv->SwLed1) ); }
gpl-2.0
janztec/empc-arpi-linux
drivers/staging/line6/midibuf.c
406
5854
/* * Line6 Linux USB driver - 0.9.1beta * * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/slab.h> #include "midibuf.h" static int midibuf_message_length(unsigned char code) { int message_length; if (code < 0x80) message_length = -1; else if (code < 0xf0) { static const int length[] = { 3, 3, 3, 3, 2, 2, 3 }; message_length = length[(code >> 4) - 8]; } else { /* Note that according to the MIDI specification 0xf2 is the "Song Position Pointer", but this is used by Line6 to send sysex messages to the host. */ static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1 }; message_length = length[code & 0x0f]; } return message_length; } static int midibuf_is_empty(struct midi_buffer *this) { return (this->pos_read == this->pos_write) && !this->full; } static int midibuf_is_full(struct midi_buffer *this) { return this->full; } void line6_midibuf_reset(struct midi_buffer *this) { this->pos_read = this->pos_write = this->full = 0; this->command_prev = -1; } int line6_midibuf_init(struct midi_buffer *this, int size, int split) { this->buf = kmalloc(size, GFP_KERNEL); if (this->buf == NULL) return -ENOMEM; this->size = size; this->split = split; line6_midibuf_reset(this); return 0; } void line6_midibuf_status(struct midi_buffer *this) { pr_debug("midibuf size=%d split=%d pos_read=%d pos_write=%d full=%d command_prev=%02x\n", this->size, this->split, this->pos_read, this->pos_write, this->full, this->command_prev); } int line6_midibuf_bytes_free(struct midi_buffer *this) { return midibuf_is_full(this) ? 0 : (this->pos_read - this->pos_write + this->size - 1) % this->size + 1; } int line6_midibuf_bytes_used(struct midi_buffer *this) { return midibuf_is_empty(this) ? 0 : (this->pos_write - this->pos_read + this->size - 1) % this->size + 1; } int line6_midibuf_write(struct midi_buffer *this, unsigned char *data, int length) { int bytes_free; int length1, length2; int skip_active_sense = 0; if (midibuf_is_full(this) || (length <= 0)) return 0; /* skip trailing active sense */ if (data[length - 1] == 0xfe) { --length; skip_active_sense = 1; } bytes_free = line6_midibuf_bytes_free(this); if (length > bytes_free) length = bytes_free; if (length > 0) { length1 = this->size - this->pos_write; if (length < length1) { /* no buffer wraparound */ memcpy(this->buf + this->pos_write, data, length); this->pos_write += length; } else { /* buffer wraparound */ length2 = length - length1; memcpy(this->buf + this->pos_write, data, length1); memcpy(this->buf, data + length1, length2); this->pos_write = length2; } if (this->pos_write == this->pos_read) this->full = 1; } return length + skip_active_sense; } int line6_midibuf_read(struct midi_buffer *this, unsigned char *data, int length) { int bytes_used; int length1, length2; int command; int midi_length; int repeat = 0; int i; /* we need to be able to store at least a 3 byte MIDI message */ if (length < 3) return -EINVAL; if (midibuf_is_empty(this)) return 0; bytes_used = line6_midibuf_bytes_used(this); if (length > bytes_used) length = bytes_used; length1 = this->size - this->pos_read; /* check MIDI command length */ command = this->buf[this->pos_read]; if (command & 0x80) { midi_length = midibuf_message_length(command); this->command_prev = command; } else { if (this->command_prev > 0) { int midi_length_prev = midibuf_message_length(this->command_prev); if (midi_length_prev > 0) { midi_length = midi_length_prev - 1; repeat = 1; } else midi_length = -1; } else midi_length = -1; } if (midi_length < 0) { /* search for end of message */ if (length < length1) { /* no buffer wraparound */ for (i = 1; i < length; ++i) if (this->buf[this->pos_read + i] & 0x80) break; midi_length = i; } else { /* buffer wraparound */ length2 = length - length1; for (i = 1; i < length1; ++i) if (this->buf[this->pos_read + i] & 0x80) break; if (i < length1) midi_length = i; else { for (i = 0; i < length2; ++i) if (this->buf[i] & 0x80) break; midi_length = length1 + i; } } if (midi_length == length) midi_length = -1; /* end of message not found */ } if (midi_length < 0) { if (!this->split) return 0; /* command is not yet complete */ } else { if (length < midi_length) return 0; /* command is not yet complete */ length = midi_length; } if (length < length1) { /* no buffer wraparound */ memcpy(data + repeat, this->buf + this->pos_read, length); this->pos_read += length; } else { /* buffer wraparound */ length2 = length - length1; memcpy(data + repeat, this->buf + this->pos_read, length1); memcpy(data + repeat + length1, this->buf, length2); this->pos_read = length2; } if (repeat) data[0] = this->command_prev; this->full = 0; return length + repeat; } int line6_midibuf_ignore(struct midi_buffer *this, int length) { int bytes_used = line6_midibuf_bytes_used(this); if (length > bytes_used) length = bytes_used; this->pos_read = (this->pos_read + length) % this->size; this->full = 0; return length; } int line6_midibuf_skip_message(struct midi_buffer *this, unsigned short mask) { int cmd = this->command_prev; if ((cmd >= 0x80) && (cmd < 0xf0)) if ((mask & (1 << (cmd & 0x0f))) == 0) return 1; return 0; } void line6_midibuf_destroy(struct midi_buffer *this) { kfree(this->buf); this->buf = NULL; }
gpl-2.0