repo_name
string
path
string
copies
string
size
string
content
string
license
string
mtb3000gt/Deathly_Kernel_D2
drivers/video/omap2/omapfb/omapfb-ioctl.c
4784
19411
/* * linux/drivers/video/omap2/omapfb-ioctl.c * * Copyright (C) 2008 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fb.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/mm.h> #include <linux/omapfb.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <video/omapdss.h> #include <plat/vrfb.h> #include <plat/vram.h> #include "omapfb.h" static u8 get_mem_idx(struct omapfb_info *ofbi) { if (ofbi->id == ofbi->region->id) return 0; return OMAPFB_MEM_IDX_ENABLED | ofbi->region->id; } static struct omapfb2_mem_region *get_mem_region(struct omapfb_info *ofbi, u8 mem_idx) { struct omapfb2_device *fbdev = ofbi->fbdev; if (mem_idx & OMAPFB_MEM_IDX_ENABLED) mem_idx &= OMAPFB_MEM_IDX_MASK; else mem_idx = ofbi->id; if (mem_idx >= fbdev->num_fbs) return NULL; return &fbdev->regions[mem_idx]; } static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_overlay *ovl; struct omap_overlay_info old_info; struct omapfb2_mem_region *old_rg, *new_rg; int r = 0; DBG("omapfb_setup_plane\n"); if (ofbi->num_overlays != 1) { r = -EINVAL; goto out; } /* XXX uses only the first overlay */ ovl = ofbi->overlays[0]; old_rg = ofbi->region; new_rg = get_mem_region(ofbi, pi->mem_idx); if (!new_rg) { r = -EINVAL; goto out; } /* Take the locks in a specific order to keep lockdep happy */ if (old_rg->id < new_rg->id) { omapfb_get_mem_region(old_rg); omapfb_get_mem_region(new_rg); } else if (new_rg->id < old_rg->id) { omapfb_get_mem_region(new_rg); omapfb_get_mem_region(old_rg); } else omapfb_get_mem_region(old_rg); if (pi->enabled && !new_rg->size) { /* * This plane's memory was freed, can't enable it * until it's reallocated. */ r = -EINVAL; goto put_mem; } ovl->get_overlay_info(ovl, &old_info); if (old_rg != new_rg) { ofbi->region = new_rg; set_fb_fix(fbi); } if (!pi->enabled) { r = ovl->disable(ovl); if (r) goto undo; } if (pi->enabled) { r = omapfb_setup_overlay(fbi, ovl, pi->pos_x, pi->pos_y, pi->out_width, pi->out_height); if (r) goto undo; } else { struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); info.pos_x = pi->pos_x; info.pos_y = pi->pos_y; info.out_width = pi->out_width; info.out_height = pi->out_height; r = ovl->set_overlay_info(ovl, &info); if (r) goto undo; } if (ovl->manager) ovl->manager->apply(ovl->manager); if (pi->enabled) { r = ovl->enable(ovl); if (r) goto undo; } /* Release the locks in a specific order to keep lockdep happy */ if (old_rg->id > new_rg->id) { omapfb_put_mem_region(old_rg); omapfb_put_mem_region(new_rg); } else if (new_rg->id > old_rg->id) { omapfb_put_mem_region(new_rg); omapfb_put_mem_region(old_rg); } else omapfb_put_mem_region(old_rg); return 0; undo: if (old_rg != new_rg) { ofbi->region = old_rg; set_fb_fix(fbi); } ovl->set_overlay_info(ovl, &old_info); put_mem: /* Release the locks in a specific order to keep lockdep happy */ if (old_rg->id > new_rg->id) { omapfb_put_mem_region(old_rg); omapfb_put_mem_region(new_rg); } else if (new_rg->id > old_rg->id) { omapfb_put_mem_region(new_rg); omapfb_put_mem_region(old_rg); } else omapfb_put_mem_region(old_rg); out: dev_err(fbdev->dev, "setup_plane failed\n"); return r; } static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) { struct omapfb_info *ofbi = FB2OFB(fbi); if (ofbi->num_overlays != 1) { memset(pi, 0, sizeof(*pi)); } else { struct omap_overlay *ovl; struct omap_overlay_info ovli; ovl = ofbi->overlays[0]; ovl->get_overlay_info(ovl, &ovli); pi->pos_x = ovli.pos_x; pi->pos_y = ovli.pos_y; pi->enabled = ovl->is_enabled(ovl); pi->channel_out = 0; /* xxx */ pi->mirror = 0; pi->mem_idx = get_mem_idx(ofbi); pi->out_width = ovli.out_width; pi->out_height = ovli.out_height; } return 0; } static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omapfb2_mem_region *rg; int r = 0, i; size_t size; if (mi->type != OMAPFB_MEMTYPE_SDRAM) return -EINVAL; size = PAGE_ALIGN(mi->size); rg = ofbi->region; down_write_nested(&rg->lock, rg->id); atomic_inc(&rg->lock_count); if (atomic_read(&rg->map_count)) { r = -EBUSY; goto out; } for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]); int j; if (ofbi2->region != rg) continue; for (j = 0; j < ofbi2->num_overlays; j++) { struct omap_overlay *ovl; ovl = ofbi2->overlays[j]; if (ovl->is_enabled(ovl)) { r = -EBUSY; goto out; } } } if (rg->size != size || rg->type != mi->type) { r = omapfb_realloc_fbmem(fbi, size, mi->type); if (r) { dev_err(fbdev->dev, "realloc fbmem failed\n"); goto out; } } out: atomic_dec(&rg->lock_count); up_write(&rg->lock); return r; } static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_mem_region *rg; rg = omapfb_get_mem_region(ofbi->region); memset(mi, 0, sizeof(*mi)); mi->size = rg->size; mi->type = rg->type; omapfb_put_mem_region(rg); return 0; } static int omapfb_update_window_nolock(struct fb_info *fbi, u32 x, u32 y, u32 w, u32 h) { struct omap_dss_device *display = fb2display(fbi); u16 dw, dh; if (!display) return 0; if (w == 0 || h == 0) return 0; display->driver->get_resolution(display, &dw, &dh); if (x + w > dw || y + h > dh) return -EINVAL; return display->driver->update(display, x, y, w, h); } /* This function is exported for SGX driver use */ int omapfb_update_window(struct fb_info *fbi, u32 x, u32 y, u32 w, u32 h) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; int r; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); r = omapfb_update_window_nolock(fbi, x, y, w, h); omapfb_unlock(fbdev); unlock_fb_info(fbi); return r; } EXPORT_SYMBOL(omapfb_update_window); int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode) { struct omap_dss_device *display = fb2display(fbi); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omapfb_display_data *d; int r; if (!display) return -EINVAL; if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE) return -EINVAL; omapfb_lock(fbdev); d = get_display_data(fbdev, display); if (d->update_mode == mode) { omapfb_unlock(fbdev); return 0; } r = 0; if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { if (mode == OMAPFB_AUTO_UPDATE) omapfb_start_auto_update(fbdev, display); else /* MANUAL_UPDATE */ omapfb_stop_auto_update(fbdev, display); d->update_mode = mode; } else { /* AUTO_UPDATE */ if (mode == OMAPFB_MANUAL_UPDATE) r = -EINVAL; } omapfb_unlock(fbdev); return r; } int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode) { struct omap_dss_device *display = fb2display(fbi); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omapfb_display_data *d; if (!display) return -EINVAL; omapfb_lock(fbdev); d = get_display_data(fbdev, display); *mode = d->update_mode; omapfb_unlock(fbdev); return 0; } /* XXX this color key handling is a hack... */ static struct omapfb_color_key omapfb_color_keys[2]; static int _omapfb_set_color_key(struct omap_overlay_manager *mgr, struct omapfb_color_key *ck) { struct omap_overlay_manager_info info; enum omap_dss_trans_key_type kt; int r; mgr->get_manager_info(mgr, &info); if (ck->key_type == OMAPFB_COLOR_KEY_DISABLED) { info.trans_enabled = false; omapfb_color_keys[mgr->id] = *ck; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); return r; } switch (ck->key_type) { case OMAPFB_COLOR_KEY_GFX_DST: kt = OMAP_DSS_COLOR_KEY_GFX_DST; break; case OMAPFB_COLOR_KEY_VID_SRC: kt = OMAP_DSS_COLOR_KEY_VID_SRC; break; default: return -EINVAL; } info.default_color = ck->background; info.trans_key = ck->trans_key; info.trans_key_type = kt; info.trans_enabled = true; omapfb_color_keys[mgr->id] = *ck; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); return r; } static int omapfb_set_color_key(struct fb_info *fbi, struct omapfb_color_key *ck) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; int r; int i; struct omap_overlay_manager *mgr = NULL; omapfb_lock(fbdev); for (i = 0; i < ofbi->num_overlays; i++) { if (ofbi->overlays[i]->manager) { mgr = ofbi->overlays[i]->manager; break; } } if (!mgr) { r = -EINVAL; goto err; } r = _omapfb_set_color_key(mgr, ck); err: omapfb_unlock(fbdev); return r; } static int omapfb_get_color_key(struct fb_info *fbi, struct omapfb_color_key *ck) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_overlay_manager *mgr = NULL; int r = 0; int i; omapfb_lock(fbdev); for (i = 0; i < ofbi->num_overlays; i++) { if (ofbi->overlays[i]->manager) { mgr = ofbi->overlays[i]->manager; break; } } if (!mgr) { r = -EINVAL; goto err; } *ck = omapfb_color_keys[mgr->id]; err: omapfb_unlock(fbdev); return r; } static int omapfb_memory_read(struct fb_info *fbi, struct omapfb_memory_read *mr) { struct omap_dss_device *display = fb2display(fbi); void *buf; int r; if (!display || !display->driver->memory_read) return -ENOENT; if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) return -EFAULT; if (mr->w * mr->h * 3 > mr->buffer_size) return -EINVAL; buf = vmalloc(mr->buffer_size); if (!buf) { DBG("vmalloc failed\n"); return -ENOMEM; } r = display->driver->memory_read(display, buf, mr->buffer_size, mr->x, mr->y, mr->w, mr->h); if (r > 0) { if (copy_to_user(mr->buffer, buf, mr->buffer_size)) r = -EFAULT; } vfree(buf); return r; } static int omapfb_get_ovl_colormode(struct omapfb2_device *fbdev, struct omapfb_ovl_colormode *mode) { int ovl_idx = mode->overlay_idx; int mode_idx = mode->mode_idx; struct omap_overlay *ovl; enum omap_color_mode supported_modes; struct fb_var_screeninfo var; int i; if (ovl_idx >= fbdev->num_overlays) return -ENODEV; ovl = fbdev->overlays[ovl_idx]; supported_modes = ovl->supported_modes; mode_idx = mode->mode_idx; for (i = 0; i < sizeof(supported_modes) * 8; i++) { if (!(supported_modes & (1 << i))) continue; /* * It's possible that the FB doesn't support a mode * that is supported by the overlay, so call the * following here. */ if (dss_mode_to_fb_mode(1 << i, &var) < 0) continue; mode_idx--; if (mode_idx < 0) break; } if (i == sizeof(supported_modes) * 8) return -ENOENT; mode->bits_per_pixel = var.bits_per_pixel; mode->nonstd = var.nonstd; mode->red = var.red; mode->green = var.green; mode->blue = var.blue; mode->transp = var.transp; return 0; } static int omapfb_wait_for_go(struct fb_info *fbi) { struct omapfb_info *ofbi = FB2OFB(fbi); int r = 0; int i; for (i = 0; i < ofbi->num_overlays; ++i) { struct omap_overlay *ovl = ofbi->overlays[i]; r = ovl->wait_for_go(ovl); if (r) break; } return r; } int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) { struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_dss_device *display = fb2display(fbi); union { struct omapfb_update_window_old uwnd_o; struct omapfb_update_window uwnd; struct omapfb_plane_info plane_info; struct omapfb_caps caps; struct omapfb_mem_info mem_info; struct omapfb_color_key color_key; struct omapfb_ovl_colormode ovl_colormode; enum omapfb_update_mode update_mode; int test_num; struct omapfb_memory_read memory_read; struct omapfb_vram_info vram_info; struct omapfb_tearsync_info tearsync_info; struct omapfb_display_info display_info; u32 crt; } p; int r = 0; switch (cmd) { case OMAPFB_SYNC_GFX: DBG("ioctl SYNC_GFX\n"); if (!display || !display->driver->sync) { /* DSS1 never returns an error here, so we neither */ /*r = -EINVAL;*/ break; } r = display->driver->sync(display); break; case OMAPFB_UPDATE_WINDOW_OLD: DBG("ioctl UPDATE_WINDOW_OLD\n"); if (!display || !display->driver->update) { r = -EINVAL; break; } if (copy_from_user(&p.uwnd_o, (void __user *)arg, sizeof(p.uwnd_o))) { r = -EFAULT; break; } r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y, p.uwnd_o.width, p.uwnd_o.height); break; case OMAPFB_UPDATE_WINDOW: DBG("ioctl UPDATE_WINDOW\n"); if (!display || !display->driver->update) { r = -EINVAL; break; } if (copy_from_user(&p.uwnd, (void __user *)arg, sizeof(p.uwnd))) { r = -EFAULT; break; } r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y, p.uwnd.width, p.uwnd.height); break; case OMAPFB_SETUP_PLANE: DBG("ioctl SETUP_PLANE\n"); if (copy_from_user(&p.plane_info, (void __user *)arg, sizeof(p.plane_info))) r = -EFAULT; else r = omapfb_setup_plane(fbi, &p.plane_info); break; case OMAPFB_QUERY_PLANE: DBG("ioctl QUERY_PLANE\n"); r = omapfb_query_plane(fbi, &p.plane_info); if (r < 0) break; if (copy_to_user((void __user *)arg, &p.plane_info, sizeof(p.plane_info))) r = -EFAULT; break; case OMAPFB_SETUP_MEM: DBG("ioctl SETUP_MEM\n"); if (copy_from_user(&p.mem_info, (void __user *)arg, sizeof(p.mem_info))) r = -EFAULT; else r = omapfb_setup_mem(fbi, &p.mem_info); break; case OMAPFB_QUERY_MEM: DBG("ioctl QUERY_MEM\n"); r = omapfb_query_mem(fbi, &p.mem_info); if (r < 0) break; if (copy_to_user((void __user *)arg, &p.mem_info, sizeof(p.mem_info))) r = -EFAULT; break; case OMAPFB_GET_CAPS: DBG("ioctl GET_CAPS\n"); if (!display) { r = -EINVAL; break; } memset(&p.caps, 0, sizeof(p.caps)); if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) p.caps.ctrl |= OMAPFB_CAPS_MANUAL_UPDATE; if (display->caps & OMAP_DSS_DISPLAY_CAP_TEAR_ELIM) p.caps.ctrl |= OMAPFB_CAPS_TEARSYNC; if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps))) r = -EFAULT; break; case OMAPFB_GET_OVERLAY_COLORMODE: DBG("ioctl GET_OVERLAY_COLORMODE\n"); if (copy_from_user(&p.ovl_colormode, (void __user *)arg, sizeof(p.ovl_colormode))) { r = -EFAULT; break; } r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode); if (r < 0) break; if (copy_to_user((void __user *)arg, &p.ovl_colormode, sizeof(p.ovl_colormode))) r = -EFAULT; break; case OMAPFB_SET_UPDATE_MODE: DBG("ioctl SET_UPDATE_MODE\n"); if (get_user(p.update_mode, (int __user *)arg)) r = -EFAULT; else r = omapfb_set_update_mode(fbi, p.update_mode); break; case OMAPFB_GET_UPDATE_MODE: DBG("ioctl GET_UPDATE_MODE\n"); r = omapfb_get_update_mode(fbi, &p.update_mode); if (r) break; if (put_user(p.update_mode, (enum omapfb_update_mode __user *)arg)) r = -EFAULT; break; case OMAPFB_SET_COLOR_KEY: DBG("ioctl SET_COLOR_KEY\n"); if (copy_from_user(&p.color_key, (void __user *)arg, sizeof(p.color_key))) r = -EFAULT; else r = omapfb_set_color_key(fbi, &p.color_key); break; case OMAPFB_GET_COLOR_KEY: DBG("ioctl GET_COLOR_KEY\n"); r = omapfb_get_color_key(fbi, &p.color_key); if (r) break; if (copy_to_user((void __user *)arg, &p.color_key, sizeof(p.color_key))) r = -EFAULT; break; case FBIO_WAITFORVSYNC: if (get_user(p.crt, (__u32 __user *)arg)) { r = -EFAULT; break; } if (p.crt != 0) { r = -ENODEV; break; } /* FALLTHROUGH */ case OMAPFB_WAITFORVSYNC: DBG("ioctl WAITFORVSYNC\n"); if (!display) { r = -EINVAL; break; } r = display->manager->wait_for_vsync(display->manager); break; case OMAPFB_WAITFORGO: DBG("ioctl WAITFORGO\n"); if (!display) { r = -EINVAL; break; } r = omapfb_wait_for_go(fbi); break; /* LCD and CTRL tests do the same thing for backward * compatibility */ case OMAPFB_LCD_TEST: DBG("ioctl LCD_TEST\n"); if (get_user(p.test_num, (int __user *)arg)) { r = -EFAULT; break; } if (!display || !display->driver->run_test) { r = -EINVAL; break; } r = display->driver->run_test(display, p.test_num); break; case OMAPFB_CTRL_TEST: DBG("ioctl CTRL_TEST\n"); if (get_user(p.test_num, (int __user *)arg)) { r = -EFAULT; break; } if (!display || !display->driver->run_test) { r = -EINVAL; break; } r = display->driver->run_test(display, p.test_num); break; case OMAPFB_MEMORY_READ: DBG("ioctl MEMORY_READ\n"); if (copy_from_user(&p.memory_read, (void __user *)arg, sizeof(p.memory_read))) { r = -EFAULT; break; } r = omapfb_memory_read(fbi, &p.memory_read); break; case OMAPFB_GET_VRAM_INFO: { unsigned long vram, free, largest; DBG("ioctl GET_VRAM_INFO\n"); omap_vram_get_info(&vram, &free, &largest); p.vram_info.total = vram; p.vram_info.free = free; p.vram_info.largest_free_block = largest; if (copy_to_user((void __user *)arg, &p.vram_info, sizeof(p.vram_info))) r = -EFAULT; break; } case OMAPFB_SET_TEARSYNC: { DBG("ioctl SET_TEARSYNC\n"); if (copy_from_user(&p.tearsync_info, (void __user *)arg, sizeof(p.tearsync_info))) { r = -EFAULT; break; } if (!display || !display->driver->enable_te) { r = -ENODEV; break; } r = display->driver->enable_te(display, !!p.tearsync_info.enabled); break; } case OMAPFB_GET_DISPLAY_INFO: { u16 xres, yres; DBG("ioctl GET_DISPLAY_INFO\n"); if (display == NULL) { r = -ENODEV; break; } display->driver->get_resolution(display, &xres, &yres); p.display_info.xres = xres; p.display_info.yres = yres; if (display->driver->get_dimensions) { u32 w, h; display->driver->get_dimensions(display, &w, &h); p.display_info.width = w; p.display_info.height = h; } else { p.display_info.width = 0; p.display_info.height = 0; } if (copy_to_user((void __user *)arg, &p.display_info, sizeof(p.display_info))) r = -EFAULT; break; } default: dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd); r = -EINVAL; } if (r < 0) DBG("ioctl failed: %d\n", r); return r; }
gpl-2.0
TimesysGit/advantech-linux
arch/frv/kernel/irq-mb93091.c
7344
3687
/* irq-mb93091.c: MB93091 FPGA interrupt handling * * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> #define __reg16(ADDR) (*(volatile unsigned short *)(ADDR)) #define __get_IMR() ({ __reg16(0xffc00004); }) #define __set_IMR(M) do { __reg16(0xffc00004) = (M); wmb(); } while(0) #define __get_IFR() ({ __reg16(0xffc0000c); }) #define __clr_IFR(M) do { __reg16(0xffc0000c) = ~(M); wmb(); } while(0) /* * on-motherboard FPGA PIC operations */ static void frv_fpga_mask(struct irq_data *d) { uint16_t imr = __get_IMR(); imr |= 1 << (d->irq - IRQ_BASE_FPGA); __set_IMR(imr); } static void frv_fpga_ack(struct irq_data *d) { __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); } static void frv_fpga_mask_ack(struct irq_data *d) { uint16_t imr = __get_IMR(); imr |= 1 << (d->irq - IRQ_BASE_FPGA); __set_IMR(imr); __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); } static void frv_fpga_unmask(struct irq_data *d) { uint16_t imr = __get_IMR(); imr &= ~(1 << (d->irq - IRQ_BASE_FPGA)); __set_IMR(imr); } static struct irq_chip frv_fpga_pic = { .name = "mb93091", .irq_ack = frv_fpga_ack, .irq_mask = frv_fpga_mask, .irq_mask_ack = frv_fpga_mask_ack, .irq_unmask = frv_fpga_unmask, }; /* * FPGA PIC interrupt handler */ static irqreturn_t fpga_interrupt(int irq, void *_mask) { uint16_t imr, mask = (unsigned long) _mask; imr = __get_IMR(); mask = mask & ~imr & __get_IFR(); /* poll all the triggered IRQs */ while (mask) { int irq; asm("scan %1,gr0,%0" : "=r"(irq) : "r"(mask)); irq = 31 - irq; mask &= ~(1 << irq); generic_handle_irq(IRQ_BASE_FPGA + irq); } return IRQ_HANDLED; } /* * define an interrupt action for each FPGA PIC output * - use dev_id to indicate the FPGA PIC input to output mappings */ static struct irqaction fpga_irq[4] = { [0] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "fpga.0", .dev_id = (void *) 0x0028UL, }, [1] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "fpga.1", .dev_id = (void *) 0x0050UL, }, [2] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "fpga.2", .dev_id = (void *) 0x1c00UL, }, [3] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "fpga.3", .dev_id = (void *) 0x6386UL, } }; /* * initialise the motherboard FPGA's PIC */ void __init fpga_init(void) { int irq; /* all PIC inputs are all set to be low-level driven, apart from the * NMI button (15) which is fixed at falling-edge */ __set_IMR(0x7ffe); __clr_IFR(0x0000); for (irq = IRQ_BASE_FPGA + 1; irq <= IRQ_BASE_FPGA + 14; irq++) irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq); irq_set_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq); /* the FPGA drives the first four external IRQ inputs on the CPU PIC */ setup_irq(IRQ_CPU_EXTERNAL0, &fpga_irq[0]); setup_irq(IRQ_CPU_EXTERNAL1, &fpga_irq[1]); setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[2]); setup_irq(IRQ_CPU_EXTERNAL3, &fpga_irq[3]); }
gpl-2.0
romeokoi/kernel-olympus-3.1
drivers/power/pmu_battery.c
12720
5424
/* * Battery class driver for Apple PMU * * Copyright © 2006 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/power_supply.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/slab.h> static struct pmu_battery_dev { struct power_supply bat; struct pmu_battery_info *pbi; char name[16]; int propval; } *pbats[PMU_MAX_BATTERIES]; #define to_pmu_battery_dev(x) container_of(x, struct pmu_battery_dev, bat) /********************************************************************* * Power *********************************************************************/ static int pmu_get_ac_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = (!!(pmu_power_flags & PMU_PWR_AC_PRESENT)) || (pmu_battery_count == 0); break; default: return -EINVAL; } return 0; } static enum power_supply_property pmu_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static struct power_supply pmu_ac = { .name = "pmu-ac", .type = POWER_SUPPLY_TYPE_MAINS, .properties = pmu_ac_props, .num_properties = ARRAY_SIZE(pmu_ac_props), .get_property = pmu_get_ac_prop, }; /********************************************************************* * Battery properties *********************************************************************/ static char *pmu_batt_types[] = { "Smart", "Comet", "Hooper", "Unknown" }; static char *pmu_bat_get_model_name(struct pmu_battery_info *pbi) { switch (pbi->flags & PMU_BATT_TYPE_MASK) { case PMU_BATT_TYPE_SMART: return pmu_batt_types[0]; case PMU_BATT_TYPE_COMET: return pmu_batt_types[1]; case PMU_BATT_TYPE_HOOPER: return pmu_batt_types[2]; default: break; } return pmu_batt_types[3]; } static int pmu_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pmu_battery_dev *pbat = to_pmu_battery_dev(psy); struct pmu_battery_info *pbi = pbat->pbi; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (pbi->flags & PMU_BATT_CHARGING) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (pmu_power_flags & PMU_PWR_AC_PRESENT) val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = !!(pbi->flags & PMU_BATT_PRESENT); break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = pmu_bat_get_model_name(pbi); break; case POWER_SUPPLY_PROP_ENERGY_AVG: val->intval = pbi->charge * 1000; /* mWh -> µWh */ break; case POWER_SUPPLY_PROP_ENERGY_FULL: val->intval = pbi->max_charge * 1000; /* mWh -> µWh */ break; case POWER_SUPPLY_PROP_CURRENT_AVG: val->intval = pbi->amperage * 1000; /* mA -> µA */ break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: val->intval = pbi->voltage * 1000; /* mV -> µV */ break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: val->intval = pbi->time_remaining; break; default: return -EINVAL; } return 0; } static enum power_supply_property pmu_bat_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_ENERGY_AVG, POWER_SUPPLY_PROP_ENERGY_FULL, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, }; /********************************************************************* * Initialisation *********************************************************************/ static struct platform_device *bat_pdev; static int __init pmu_bat_init(void) { int ret; int i; bat_pdev = platform_device_register_simple("pmu-battery", 0, NULL, 0); if (IS_ERR(bat_pdev)) { ret = PTR_ERR(bat_pdev); goto pdev_register_failed; } ret = power_supply_register(&bat_pdev->dev, &pmu_ac); if (ret) goto ac_register_failed; for (i = 0; i < pmu_battery_count; i++) { struct pmu_battery_dev *pbat = kzalloc(sizeof(*pbat), GFP_KERNEL); if (!pbat) break; sprintf(pbat->name, "PMU_battery_%d", i); pbat->bat.name = pbat->name; pbat->bat.properties = pmu_bat_props; pbat->bat.num_properties = ARRAY_SIZE(pmu_bat_props); pbat->bat.get_property = pmu_bat_get_property; pbat->pbi = &pmu_batteries[i]; ret = power_supply_register(&bat_pdev->dev, &pbat->bat); if (ret) { kfree(pbat); goto battery_register_failed; } pbats[i] = pbat; } goto success; battery_register_failed: while (i--) { if (!pbats[i]) continue; power_supply_unregister(&pbats[i]->bat); kfree(pbats[i]); } power_supply_unregister(&pmu_ac); ac_register_failed: platform_device_unregister(bat_pdev); pdev_register_failed: success: return ret; } static void __exit pmu_bat_exit(void) { int i; for (i = 0; i < PMU_MAX_BATTERIES; i++) { if (!pbats[i]) continue; power_supply_unregister(&pbats[i]->bat); kfree(pbats[i]); } power_supply_unregister(&pmu_ac); platform_device_unregister(bat_pdev); } module_init(pmu_bat_init); module_exit(pmu_bat_exit); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PMU battery driver");
gpl-2.0
androidarmv6/android_kernel_samsung_bcm21553-common
net/rxrpc/ar-local.c
12720
7694
/* AF_RXRPC local endpoint management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" static LIST_HEAD(rxrpc_locals); DEFINE_RWLOCK(rxrpc_local_lock); static DECLARE_RWSEM(rxrpc_local_sem); static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq); static void rxrpc_destroy_local(struct work_struct *work); /* * allocate a new local */ static struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) { struct rxrpc_local *local; local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { INIT_WORK(&local->destroyer, &rxrpc_destroy_local); INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls); INIT_WORK(&local->rejecter, &rxrpc_reject_packets); INIT_LIST_HEAD(&local->services); INIT_LIST_HEAD(&local->link); init_rwsem(&local->defrag_sem); skb_queue_head_init(&local->accept_queue); skb_queue_head_init(&local->reject_queue); spin_lock_init(&local->lock); rwlock_init(&local->services_lock); atomic_set(&local->usage, 1); local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); } _leave(" = %p", local); return local; } /* * create the local socket * - must be called with rxrpc_local_sem writelocked */ static int rxrpc_create_local(struct rxrpc_local *local) { struct sock *sock; int ret, opt; _enter("%p{%d}", local, local->srx.transport_type); /* create a socket to represent the local endpoint */ ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP, &local->socket); if (ret < 0) { _leave(" = %d [socket]", ret); return ret; } /* if a local address was supplied then bind it */ if (local->srx.transport_len > sizeof(sa_family_t)) { _debug("bind"); ret = kernel_bind(local->socket, (struct sockaddr *) &local->srx.transport, local->srx.transport_len); if (ret < 0) { _debug("bind failed"); goto error; } } /* we want to receive ICMP errors */ opt = 1; ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, (char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } /* we want to set the don't fragment bit */ opt = IP_PMTUDISC_DO; ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } write_lock_bh(&rxrpc_local_lock); list_add(&local->link, &rxrpc_locals); write_unlock_bh(&rxrpc_local_lock); /* set the socket up */ sock = local->socket->sk; sock->sk_user_data = local; sock->sk_data_ready = rxrpc_data_ready; sock->sk_error_report = rxrpc_UDP_error_report; _leave(" = 0"); return 0; error: kernel_sock_shutdown(local->socket, SHUT_RDWR); local->socket->sk->sk_user_data = NULL; sock_release(local->socket); local->socket = NULL; _leave(" = %d", ret); return ret; } /* * create a new local endpoint using the specified UDP address */ struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx) { struct rxrpc_local *local; int ret; _enter("{%d,%u,%pI4+%hu}", srx->transport_type, srx->transport.family, &srx->transport.sin.sin_addr, ntohs(srx->transport.sin.sin_port)); down_write(&rxrpc_local_sem); /* see if we have a suitable local local endpoint already */ read_lock_bh(&rxrpc_local_lock); list_for_each_entry(local, &rxrpc_locals, link) { _debug("CMP {%d,%u,%pI4+%hu}", local->srx.transport_type, local->srx.transport.family, &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port)); if (local->srx.transport_type != srx->transport_type || local->srx.transport.family != srx->transport.family) continue; switch (srx->transport.family) { case AF_INET: if (local->srx.transport.sin.sin_port != srx->transport.sin.sin_port) continue; if (memcmp(&local->srx.transport.sin.sin_addr, &srx->transport.sin.sin_addr, sizeof(struct in_addr)) != 0) continue; goto found_local; default: BUG(); } } read_unlock_bh(&rxrpc_local_lock); /* we didn't find one, so we need to create one */ local = rxrpc_alloc_local(srx); if (!local) { up_write(&rxrpc_local_sem); return ERR_PTR(-ENOMEM); } ret = rxrpc_create_local(local); if (ret < 0) { up_write(&rxrpc_local_sem); kfree(local); _leave(" = %d", ret); return ERR_PTR(ret); } up_write(&rxrpc_local_sem); _net("LOCAL new %d {%d,%u,%pI4+%hu}", local->debug_id, local->srx.transport_type, local->srx.transport.family, &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port)); _leave(" = %p [new]", local); return local; found_local: rxrpc_get_local(local); read_unlock_bh(&rxrpc_local_lock); up_write(&rxrpc_local_sem); _net("LOCAL old %d {%d,%u,%pI4+%hu}", local->debug_id, local->srx.transport_type, local->srx.transport.family, &local->srx.transport.sin.sin_addr, ntohs(local->srx.transport.sin.sin_port)); _leave(" = %p [reuse]", local); return local; } /* * release a local endpoint */ void rxrpc_put_local(struct rxrpc_local *local) { _enter("%p{u=%d}", local, atomic_read(&local->usage)); ASSERTCMP(atomic_read(&local->usage), >, 0); /* to prevent a race, the decrement and the dequeue must be effectively * atomic */ write_lock_bh(&rxrpc_local_lock); if (unlikely(atomic_dec_and_test(&local->usage))) { _debug("destroy local"); rxrpc_queue_work(&local->destroyer); } write_unlock_bh(&rxrpc_local_lock); _leave(""); } /* * destroy a local endpoint */ static void rxrpc_destroy_local(struct work_struct *work) { struct rxrpc_local *local = container_of(work, struct rxrpc_local, destroyer); _enter("%p{%d}", local, atomic_read(&local->usage)); down_write(&rxrpc_local_sem); write_lock_bh(&rxrpc_local_lock); if (atomic_read(&local->usage) > 0) { write_unlock_bh(&rxrpc_local_lock); up_read(&rxrpc_local_sem); _leave(" [resurrected]"); return; } list_del(&local->link); local->socket->sk->sk_user_data = NULL; write_unlock_bh(&rxrpc_local_lock); downgrade_write(&rxrpc_local_sem); ASSERT(list_empty(&local->services)); ASSERT(!work_pending(&local->acceptor)); ASSERT(!work_pending(&local->rejecter)); /* finish cleaning up the local descriptor */ rxrpc_purge_queue(&local->accept_queue); rxrpc_purge_queue(&local->reject_queue); kernel_sock_shutdown(local->socket, SHUT_RDWR); sock_release(local->socket); up_read(&rxrpc_local_sem); _net("DESTROY LOCAL %d", local->debug_id); kfree(local); if (list_empty(&rxrpc_locals)) wake_up_all(&rxrpc_local_wq); _leave(""); } /* * preemptively destroy all local local endpoint rather than waiting for * them to be destroyed */ void __exit rxrpc_destroy_all_locals(void) { DECLARE_WAITQUEUE(myself,current); _enter(""); /* we simply have to wait for them to go away */ if (!list_empty(&rxrpc_locals)) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&rxrpc_local_wq, &myself); while (!list_empty(&rxrpc_locals)) { schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } remove_wait_queue(&rxrpc_local_wq, &myself); set_current_state(TASK_RUNNING); } _leave(""); }
gpl-2.0
nikitines/zte-kernel-roamer2
lib/xz/xz_crc32.c
13232
1261
/* * CRC32 using the polynomial from IEEE-802.3 * * Authors: Lasse Collin <lasse.collin@tukaani.org> * Igor Pavlov <http://7-zip.org/> * * This file has been put into the public domain. * You can do whatever you want with this file. */ /* * This is not the fastest implementation, but it is pretty compact. * The fastest versions of xz_crc32() on modern CPUs without hardware * accelerated CRC instruction are 3-5 times as fast as this version, * but they are bigger and use more memory for the lookup table. */ #include "xz_private.h" /* * STATIC_RW_DATA is used in the pre-boot environment on some architectures. * See <linux/decompress/mm.h> for details. */ #ifndef STATIC_RW_DATA # define STATIC_RW_DATA static #endif STATIC_RW_DATA uint32_t xz_crc32_table[256]; XZ_EXTERN void xz_crc32_init(void) { const uint32_t poly = 0xEDB88320; uint32_t i; uint32_t j; uint32_t r; for (i = 0; i < 256; ++i) { r = i; for (j = 0; j < 8; ++j) r = (r >> 1) ^ (poly & ~((r & 1) - 1)); xz_crc32_table[i] = r; } return; } XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) { crc = ~crc; while (size != 0) { crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); --size; } return ~crc; }
gpl-2.0
Pafcholini/Nadia_Kernel_Hammerhead
drivers/parport/probe.c
13744
7589
/* * Parallel port device probing code * * Authors: Carsten Gross, carsten@sol.wohnheim.uni-ulm.de * Philip Blundell <philb@gnu.org> */ #include <linux/module.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/uaccess.h> static const struct { const char *token; const char *descr; } classes[] = { { "", "Legacy device" }, { "PRINTER", "Printer" }, { "MODEM", "Modem" }, { "NET", "Network device" }, { "HDC", "Hard disk" }, { "PCMCIA", "PCMCIA" }, { "MEDIA", "Multimedia device" }, { "FDC", "Floppy disk" }, { "PORTS", "Ports" }, { "SCANNER", "Scanner" }, { "DIGICAM", "Digital camera" }, { "", "Unknown device" }, { "", "Unspecified" }, { "SCSIADAPTER", "SCSI adapter" }, { NULL, NULL } }; static void pretty_print(struct parport *port, int device) { struct parport_device_info *info = &port->probe_info[device + 1]; printk(KERN_INFO "%s", port->name); if (device >= 0) printk (" (addr %d)", device); printk (": %s", classes[info->class].descr); if (info->class) printk(", %s %s", info->mfr, info->model); printk("\n"); } static void parse_data(struct parport *port, int device, char *str) { char *txt = kmalloc(strlen(str)+1, GFP_KERNEL); char *p = txt, *q; int guessed_class = PARPORT_CLASS_UNSPEC; struct parport_device_info *info = &port->probe_info[device + 1]; if (!txt) { printk(KERN_WARNING "%s probe: memory squeeze\n", port->name); return; } strcpy(txt, str); while (p) { char *sep; q = strchr(p, ';'); if (q) *q = 0; sep = strchr(p, ':'); if (sep) { char *u; *(sep++) = 0; /* Get rid of trailing blanks */ u = sep + strlen (sep) - 1; while (u >= p && *u == ' ') *u-- = '\0'; u = p; while (*u) { *u = toupper(*u); u++; } if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) { kfree(info->mfr); info->mfr = kstrdup(sep, GFP_KERNEL); } else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) { kfree(info->model); info->model = kstrdup(sep, GFP_KERNEL); } else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) { int i; kfree(info->class_name); info->class_name = kstrdup(sep, GFP_KERNEL); for (u = sep; *u; u++) *u = toupper(*u); for (i = 0; classes[i].token; i++) { if (!strcmp(classes[i].token, sep)) { info->class = i; goto rock_on; } } printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep); info->class = PARPORT_CLASS_OTHER; } else if (!strcmp(p, "CMD") || !strcmp(p, "COMMAND SET")) { kfree(info->cmdset); info->cmdset = kstrdup(sep, GFP_KERNEL); /* if it speaks printer language, it's probably a printer */ if (strstr(sep, "PJL") || strstr(sep, "PCL")) guessed_class = PARPORT_CLASS_PRINTER; } else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) { kfree(info->description); info->description = kstrdup(sep, GFP_KERNEL); } } rock_on: if (q) p = q + 1; else p = NULL; } /* If the device didn't tell us its class, maybe we have managed to guess one from the things it did say. */ if (info->class == PARPORT_CLASS_UNSPEC) info->class = guessed_class; pretty_print (port, device); kfree(txt); } /* Read up to count-1 bytes of device id. Terminate buffer with * '\0'. Buffer begins with two Device ID length bytes as given by * device. */ static ssize_t parport_read_device_id (struct parport *port, char *buffer, size_t count) { unsigned char length[2]; unsigned lelen, belen; size_t idlens[4]; unsigned numidlens; unsigned current_idlen; ssize_t retval; size_t len; /* First two bytes are MSB,LSB of inclusive length. */ retval = parport_read (port, length, 2); if (retval < 0) return retval; if (retval != 2) return -EIO; if (count < 2) return 0; memcpy(buffer, length, 2); len = 2; /* Some devices wrongly send LE length, and some send it two * bytes short. Construct a sorted array of lengths to try. */ belen = (length[0] << 8) + length[1]; lelen = (length[1] << 8) + length[0]; idlens[0] = min(belen, lelen); idlens[1] = idlens[0]+2; if (belen != lelen) { int off = 2; /* Don't try lengths of 0x100 and 0x200 as 1 and 2 */ if (idlens[0] <= 2) off = 0; idlens[off] = max(belen, lelen); idlens[off+1] = idlens[off]+2; numidlens = off+2; } else { /* Some devices don't truly implement Device ID, but * just return constant nibble forever. This catches * also those cases. */ if (idlens[0] == 0 || idlens[0] > 0xFFF) { printk (KERN_DEBUG "%s: reported broken Device ID" " length of %#zX bytes\n", port->name, idlens[0]); return -EIO; } numidlens = 2; } /* Try to respect the given ID length despite all the bugs in * the ID length. Read according to shortest possible ID * first. */ for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) { size_t idlen = idlens[current_idlen]; if (idlen+1 >= count) break; retval = parport_read (port, buffer+len, idlen-len); if (retval < 0) return retval; len += retval; if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { if (belen != len) { printk (KERN_DEBUG "%s: Device ID was %zd bytes" " while device told it would be %d" " bytes\n", port->name, len, belen); } goto done; } /* This might end reading the Device ID too * soon. Hopefully the needed fields were already in * the first 256 bytes or so that we must have read so * far. */ if (buffer[len-1] == ';') { printk (KERN_DEBUG "%s: Device ID reading stopped" " before device told data not available. " "Current idlen %u of %u, len bytes %02X %02X\n", port->name, current_idlen, numidlens, length[0], length[1]); goto done; } } if (current_idlen < numidlens) { /* Buffer not large enough, read to end of buffer. */ size_t idlen, len2; if (len+1 < count) { retval = parport_read (port, buffer+len, count-len-1); if (retval < 0) return retval; len += retval; } /* Read the whole ID since some devices would not * otherwise give back the Device ID from beginning * next time when asked. */ idlen = idlens[current_idlen]; len2 = len; while(len2 < idlen && retval > 0) { char tmp[4]; retval = parport_read (port, tmp, min(sizeof tmp, idlen-len2)); if (retval < 0) return retval; len2 += retval; } } /* In addition, there are broken devices out there that don't even finish off with a semi-colon. We do not need to care about those at this time. */ done: buffer[len] = '\0'; return len; } /* Get Std 1284 Device ID. */ ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; struct pardevice *dev = parport_open (devnum, "Device ID probe"); if (!dev) return -ENXIO; parport_claim_or_block (dev); /* Negotiate to compatibility mode, and then to device ID * mode. (This so that we start form beginning of device ID if * already in device ID mode.) */ parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); retval = parport_negotiate (dev->port, IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID); if (!retval) { retval = parport_read_device_id (dev->port, buffer, count); parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); if (retval > 2) parse_data (dev->port, dev->daisy, buffer+2); } parport_release (dev); parport_close (dev); return retval; }
gpl-2.0
jpoimboe/linux
drivers/misc/eeprom/at24.c
177
19593
/* * at24.c - handle most I2C EEPROMs * * Copyright (C) 2005-2007 David Brownell * Copyright (C) 2008 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/mod_devicetable.h> #include <linux/log2.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/of.h> #include <linux/acpi.h> #include <linux/i2c.h> #include <linux/platform_data/at24.h> /* * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. * Differences between different vendor product lines (like Atmel AT24C or * MicroChip 24LC, etc) won't much matter for typical read/write access. * There are also I2C RAM chips, likewise interchangeable. One example * would be the PCF8570, which acts like a 24c02 EEPROM (256 bytes). * * However, misconfiguration can lose data. "Set 16-bit memory address" * to a part with 8-bit addressing will overwrite data. Writing with too * big a page size also loses data. And it's not safe to assume that the * conventional addresses 0x50..0x57 only hold eeproms; a PCF8563 RTC * uses 0x51, for just one example. * * Accordingly, explicit board-specific configuration data should be used * in almost all cases. (One partial exception is an SMBus used to access * "SPD" data for DRAM sticks. Those only use 24c02 EEPROMs.) * * So this driver uses "new style" I2C driver binding, expecting to be * told what devices exist. That may be in arch/X/mach-Y/board-Z.c or * similar kernel-resident tables; or, configuration data coming from * a bootloader. * * Other than binding model, current differences from "eeprom" driver are * that this one handles write access and isn't restricted to 24c02 devices. * It also handles larger devices (32 kbit and up) with two-byte addresses, * which won't work on pure SMBus systems. */ struct at24_data { struct at24_platform_data chip; struct memory_accessor macc; int use_smbus; int use_smbus_write; /* * Lock protects against activities from other Linux tasks, * but not from changes by other I2C masters. */ struct mutex lock; struct bin_attribute bin; u8 *writebuf; unsigned write_max; unsigned num_addresses; /* * Some chips tie up multiple I2C addresses; dummy devices reserve * them for us, and we'll use them with SMBus calls. */ struct i2c_client *client[]; }; /* * This parameter is to help this driver avoid blocking other drivers out * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C * clock, one 256 byte read takes about 1/43 second which is excessive; * but the 1/170 second it takes at 400 kHz may be quite reasonable; and * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible. * * This value is forced to be a power of two so that writes align on pages. */ static unsigned io_limit = 128; module_param(io_limit, uint, 0); MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); /* * Specs often allow 5 msec for a page write, sometimes 20 msec; * it's important to recover from write timeouts. */ static unsigned write_timeout = 25; module_param(write_timeout, uint, 0); MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)"); #define AT24_SIZE_BYTELEN 5 #define AT24_SIZE_FLAGS 8 #define AT24_BITMASK(x) (BIT(x) - 1) /* create non-zero magic value for given eeprom parameters */ #define AT24_DEVICE_MAGIC(_len, _flags) \ ((1 << AT24_SIZE_FLAGS | (_flags)) \ << AT24_SIZE_BYTELEN | ilog2(_len)) static const struct i2c_device_id at24_ids[] = { /* needs 8 addresses as A0-A2 are ignored */ { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) }, /* old variants can't be handled with this generic entry! */ { "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) }, { "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) }, /* spd is a 24c02 in memory DIMMs */ { "spd", AT24_DEVICE_MAGIC(2048 / 8, AT24_FLAG_READONLY | AT24_FLAG_IRUGO) }, { "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) }, /* 24rf08 quirk is handled at i2c-core */ { "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) }, { "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) }, { "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) }, { "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) }, { "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) }, { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) }, { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) }, { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) }, { "at24", 0 }, { /* END OF LIST */ } }; MODULE_DEVICE_TABLE(i2c, at24_ids); static const struct acpi_device_id at24_acpi_ids[] = { { "INT3499", AT24_DEVICE_MAGIC(8192 / 8, 0) }, { } }; MODULE_DEVICE_TABLE(acpi, at24_acpi_ids); /*-------------------------------------------------------------------------*/ /* * This routine supports chips which consume multiple I2C addresses. It * computes the addressing information to be used for a given r/w request. * Assumes that sanity checks for offset happened at sysfs-layer. */ static struct i2c_client *at24_translate_offset(struct at24_data *at24, unsigned *offset) { unsigned i; if (at24->chip.flags & AT24_FLAG_ADDR16) { i = *offset >> 16; *offset &= 0xffff; } else { i = *offset >> 8; *offset &= 0xff; } return at24->client[i]; } static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, unsigned offset, size_t count) { struct i2c_msg msg[2]; u8 msgbuf[2]; struct i2c_client *client; unsigned long timeout, read_time; int status, i; memset(msg, 0, sizeof(msg)); /* * REVISIT some multi-address chips don't rollover page reads to * the next slave address, so we may need to truncate the count. * Those chips might need another quirk flag. * * If the real hardware used four adjacent 24c02 chips and that * were misconfigured as one 24c08, that would be a similar effect: * one "eeprom" file not four, but larger reads would fail when * they crossed certain pages. */ /* * Slave address and byte offset derive from the offset. Always * set the byte address; on a multi-master board, another master * may have changed the chip's "current" address pointer. */ client = at24_translate_offset(at24, &offset); if (count > io_limit) count = io_limit; if (at24->use_smbus) { /* Smaller eeproms can work given some SMBus extension calls */ if (count > I2C_SMBUS_BLOCK_MAX) count = I2C_SMBUS_BLOCK_MAX; } else { /* * When we have a better choice than SMBus calls, use a * combined I2C message. Write address; then read up to * io_limit data bytes. Note that read page rollover helps us * here (unlike writes). msgbuf is u8 and will cast to our * needs. */ i = 0; if (at24->chip.flags & AT24_FLAG_ADDR16) msgbuf[i++] = offset >> 8; msgbuf[i++] = offset; msg[0].addr = client->addr; msg[0].buf = msgbuf; msg[0].len = i; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].buf = buf; msg[1].len = count; } /* * Reads fail if the previous write didn't complete yet. We may * loop a few times until this one succeeds, waiting at least * long enough for one entire page write to work. */ timeout = jiffies + msecs_to_jiffies(write_timeout); do { read_time = jiffies; if (at24->use_smbus) { status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf); } else { status = i2c_transfer(client->adapter, msg, 2); if (status == 2) status = count; } dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", count, offset, status, jiffies); if (status == count) return count; /* REVISIT: at HZ=100, this is sloooow */ msleep(1); } while (time_before(read_time, timeout)); return -ETIMEDOUT; } static ssize_t at24_read(struct at24_data *at24, char *buf, loff_t off, size_t count) { ssize_t retval = 0; if (unlikely(!count)) return count; /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. */ mutex_lock(&at24->lock); while (count) { ssize_t status; status = at24_eeprom_read(at24, buf, off, count); if (status <= 0) { if (retval == 0) retval = status; break; } buf += status; off += status; count -= status; retval += status; } mutex_unlock(&at24->lock); return retval; } static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct at24_data *at24; at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_read(at24, buf, off, count); } /* * Note that if the hardware write-protect pin is pulled high, the whole * chip is normally write protected. But there are plenty of product * variants here, including OTP fuses and partial chip protect. * * We only use page mode writes; the alternative is sloooow. This routine * writes at most one page. */ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, unsigned offset, size_t count) { struct i2c_client *client; struct i2c_msg msg; ssize_t status = 0; unsigned long timeout, write_time; unsigned next_page; /* Get corresponding I2C address and adjust offset */ client = at24_translate_offset(at24, &offset); /* write_max is at most a page */ if (count > at24->write_max) count = at24->write_max; /* Never roll over backwards, to the start of this page */ next_page = roundup(offset + 1, at24->chip.page_size); if (offset + count > next_page) count = next_page - offset; /* If we'll use I2C calls for I/O, set up the message */ if (!at24->use_smbus) { int i = 0; msg.addr = client->addr; msg.flags = 0; /* msg.buf is u8 and casts will mask the values */ msg.buf = at24->writebuf; if (at24->chip.flags & AT24_FLAG_ADDR16) msg.buf[i++] = offset >> 8; msg.buf[i++] = offset; memcpy(&msg.buf[i], buf, count); msg.len = i + count; } /* * Writes fail if the previous one didn't complete yet. We may * loop a few times until this one succeeds, waiting at least * long enough for one entire page write to work. */ timeout = jiffies + msecs_to_jiffies(write_timeout); do { write_time = jiffies; if (at24->use_smbus_write) { switch (at24->use_smbus_write) { case I2C_SMBUS_I2C_BLOCK_DATA: status = i2c_smbus_write_i2c_block_data(client, offset, count, buf); break; case I2C_SMBUS_BYTE_DATA: status = i2c_smbus_write_byte_data(client, offset, buf[0]); break; } if (status == 0) status = count; } else { status = i2c_transfer(client->adapter, &msg, 1); if (status == 1) status = count; } dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n", count, offset, status, jiffies); if (status == count) return count; /* REVISIT: at HZ=100, this is sloooow */ msleep(1); } while (time_before(write_time, timeout)); return -ETIMEDOUT; } static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, size_t count) { ssize_t retval = 0; if (unlikely(!count)) return count; /* * Write data to chip, protecting against concurrent updates * from this host, but not from other I2C masters. */ mutex_lock(&at24->lock); while (count) { ssize_t status; status = at24_eeprom_write(at24, buf, off, count); if (status <= 0) { if (retval == 0) retval = status; break; } buf += status; off += status; count -= status; retval += status; } mutex_unlock(&at24->lock); return retval; } static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct at24_data *at24; at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_write(at24, buf, off, count); } /*-------------------------------------------------------------------------*/ /* * This lets other kernel code access the eeprom data. For example, it * might hold a board's Ethernet address, or board-specific calibration * data generated on the manufacturing floor. */ static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf, off_t offset, size_t count) { struct at24_data *at24 = container_of(macc, struct at24_data, macc); return at24_read(at24, buf, offset, count); } static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf, off_t offset, size_t count) { struct at24_data *at24 = container_of(macc, struct at24_data, macc); return at24_write(at24, buf, offset, count); } /*-------------------------------------------------------------------------*/ #ifdef CONFIG_OF static void at24_get_ofdata(struct i2c_client *client, struct at24_platform_data *chip) { const __be32 *val; struct device_node *node = client->dev.of_node; if (node) { if (of_get_property(node, "read-only", NULL)) chip->flags |= AT24_FLAG_READONLY; val = of_get_property(node, "pagesize", NULL); if (val) chip->page_size = be32_to_cpup(val); } } #else static void at24_get_ofdata(struct i2c_client *client, struct at24_platform_data *chip) { } #endif /* CONFIG_OF */ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct at24_platform_data chip; kernel_ulong_t magic = 0; bool writable; int use_smbus = 0; int use_smbus_write = 0; struct at24_data *at24; int err; unsigned i, num_addresses; if (client->dev.platform_data) { chip = *(struct at24_platform_data *)client->dev.platform_data; } else { if (id) { magic = id->driver_data; } else { const struct acpi_device_id *aid; aid = acpi_match_device(at24_acpi_ids, &client->dev); if (aid) magic = aid->driver_data; } if (!magic) return -ENODEV; chip.byte_len = BIT(magic & AT24_BITMASK(AT24_SIZE_BYTELEN)); magic >>= AT24_SIZE_BYTELEN; chip.flags = magic & AT24_BITMASK(AT24_SIZE_FLAGS); /* * This is slow, but we can't know all eeproms, so we better * play safe. Specifying custom eeprom-types via platform_data * is recommended anyhow. */ chip.page_size = 1; /* update chipdata if OF is present */ at24_get_ofdata(client, &chip); chip.setup = NULL; chip.context = NULL; } if (!is_power_of_2(chip.byte_len)) dev_warn(&client->dev, "byte_len looks suspicious (no power of 2)!\n"); if (!chip.page_size) { dev_err(&client->dev, "page_size must not be 0!\n"); return -EINVAL; } if (!is_power_of_2(chip.page_size)) dev_warn(&client->dev, "page_size looks suspicious (no power of 2)!\n"); /* Use I2C operations unless we're stuck with SMBus extensions. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { if (chip.flags & AT24_FLAG_ADDR16) return -EPFNOSUPPORT; if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { use_smbus = I2C_SMBUS_I2C_BLOCK_DATA; } else if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) { use_smbus = I2C_SMBUS_WORD_DATA; } else if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { use_smbus = I2C_SMBUS_BYTE_DATA; } else { return -EPFNOSUPPORT; } } /* Use I2C operations unless we're stuck with SMBus extensions. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) { use_smbus_write = I2C_SMBUS_I2C_BLOCK_DATA; } else if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) { use_smbus_write = I2C_SMBUS_BYTE_DATA; chip.page_size = 1; } } if (chip.flags & AT24_FLAG_TAKE8ADDR) num_addresses = 8; else num_addresses = DIV_ROUND_UP(chip.byte_len, (chip.flags & AT24_FLAG_ADDR16) ? 65536 : 256); at24 = devm_kzalloc(&client->dev, sizeof(struct at24_data) + num_addresses * sizeof(struct i2c_client *), GFP_KERNEL); if (!at24) return -ENOMEM; mutex_init(&at24->lock); at24->use_smbus = use_smbus; at24->use_smbus_write = use_smbus_write; at24->chip = chip; at24->num_addresses = num_addresses; /* * Export the EEPROM bytes through sysfs, since that's convenient. * By default, only root should see the data (maybe passwords etc) */ sysfs_bin_attr_init(&at24->bin); at24->bin.attr.name = "eeprom"; at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR; at24->bin.read = at24_bin_read; at24->bin.size = chip.byte_len; at24->macc.read = at24_macc_read; writable = !(chip.flags & AT24_FLAG_READONLY); if (writable) { if (!use_smbus || use_smbus_write) { unsigned write_max = chip.page_size; at24->macc.write = at24_macc_write; at24->bin.write = at24_bin_write; at24->bin.attr.mode |= S_IWUSR; if (write_max > io_limit) write_max = io_limit; if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX) write_max = I2C_SMBUS_BLOCK_MAX; at24->write_max = write_max; /* buffer (data + address at the beginning) */ at24->writebuf = devm_kzalloc(&client->dev, write_max + 2, GFP_KERNEL); if (!at24->writebuf) return -ENOMEM; } else { dev_warn(&client->dev, "cannot write due to controller restrictions."); } } at24->client[0] = client; /* use dummy devices for multiple-address chips */ for (i = 1; i < num_addresses; i++) { at24->client[i] = i2c_new_dummy(client->adapter, client->addr + i); if (!at24->client[i]) { dev_err(&client->dev, "address 0x%02x unavailable\n", client->addr + i); err = -EADDRINUSE; goto err_clients; } } err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin); if (err) goto err_clients; i2c_set_clientdata(client, at24); dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n", at24->bin.size, client->name, writable ? "writable" : "read-only", at24->write_max); if (use_smbus == I2C_SMBUS_WORD_DATA || use_smbus == I2C_SMBUS_BYTE_DATA) { dev_notice(&client->dev, "Falling back to %s reads, " "performance will suffer\n", use_smbus == I2C_SMBUS_WORD_DATA ? "word" : "byte"); } /* export data to kernel code */ if (chip.setup) chip.setup(&at24->macc, chip.context); return 0; err_clients: for (i = 1; i < num_addresses; i++) if (at24->client[i]) i2c_unregister_device(at24->client[i]); return err; } static int at24_remove(struct i2c_client *client) { struct at24_data *at24; int i; at24 = i2c_get_clientdata(client); sysfs_remove_bin_file(&client->dev.kobj, &at24->bin); for (i = 1; i < at24->num_addresses; i++) i2c_unregister_device(at24->client[i]); return 0; } /*-------------------------------------------------------------------------*/ static struct i2c_driver at24_driver = { .driver = { .name = "at24", .acpi_match_table = ACPI_PTR(at24_acpi_ids), }, .probe = at24_probe, .remove = at24_remove, .id_table = at24_ids, }; static int __init at24_init(void) { if (!io_limit) { pr_err("at24: io_limit must not be 0!\n"); return -EINVAL; } io_limit = rounddown_pow_of_two(io_limit); return i2c_add_driver(&at24_driver); } module_init(at24_init); static void __exit at24_exit(void) { i2c_del_driver(&at24_driver); } module_exit(at24_exit); MODULE_DESCRIPTION("Driver for most I2C EEPROMs"); MODULE_AUTHOR("David Brownell and Wolfram Sang"); MODULE_LICENSE("GPL");
gpl-2.0
tepelmann/linux-perf-cumulate
drivers/net/ethernet/dec/ewrk3.c
177
52306
/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux. Written 1994 by David C. Davies. Copyright 1994 Digital Equipment Corporation. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. This driver is written for the Digital Equipment Corporation series of EtherWORKS ethernet cards: DE203 Turbo (BNC) DE204 Turbo (TP) DE205 Turbo (TP BNC) The driver has been tested on a relatively busy network using the DE205 card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s (7.8Mb/s) to a DECstation 5000/200. The author may be reached at davies@maniac.ultranet.com. ========================================================================= This driver has been written substantially from scratch, although its inheritance of style and stack interface from 'depca.c' and in turn from Donald Becker's 'lance.c' should be obvious. The DE203/4/5 boards all use a new proprietary chip in place of the LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422). Use the depca.c driver in the standard distribution for the LANCE based cards from DIGITAL; this driver will not work with them. The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O only makes all the card accesses through I/O transactions and no high (shared) memory is used. This mode provides a >48% performance penalty and is deprecated in this driver, although allowed to provide initial setup when hardstrapped. The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is no point in using any mode other than the 2kB mode - their performances are virtually identical, although the driver has been tested in the 2kB and 32kB modes. I would suggest you uncomment the line: FORCE_2K_MODE; to allow the driver to configure the card as a 2kB card at your current base address, thus leaving more room to clutter your system box with other memory hungry boards. As many ISA and EISA cards can be supported under this driver as you wish, limited primarily by the available IRQ lines, rather than by the available I/O addresses (24 ISA, 16 EISA). I have checked different configurations of multiple depca cards and ewrk3 cards and have not found a problem yet (provided you have at least depca.c v0.38) ... The board IRQ setting must be at an unused IRQ which is auto-probed using Donald Becker's autoprobe routines. All these cards are at {5,10,11,15}. No 16MB memory limitation should exist with this driver as DMA is not used and the common memory area is in low memory on the network card (my current system has 20MB and I've not had problems yet). The ability to load this driver as a loadable module has been included and used extensively during the driver development (to save those long reboot sequences). To utilise this ability, you have to do 8 things: 0) have a copy of the loadable modules code installed on your system. 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite temporary directory. 2) edit the source code near line 1898 to reflect the I/O address and IRQ you're using. 3) compile ewrk3.c, but include -DMODULE in the command line to ensure that the correct bits are compiled (see end of source code). 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a kernel with the ewrk3 configuration turned off and reboot. 5) insmod ewrk3.o [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y] [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards] 6) run the net startup bits for your new eth?? interface manually (usually /etc/rc.inet[12] at boot time). 7) enjoy! Note that autoprobing is not allowed in loadable modules - the system is already up and running and you're messing with interrupts. To unload a module, turn off the associated interface 'ifconfig eth?? down' then 'rmmod ewrk3'. Promiscuous mode has been turned off in this driver, but all the multicast address bits have been turned on. This improved the send performance on a busy network by about 13%. Ioctl's have now been provided (primarily because I wanted to grab some packet size statistics). They are patterned after 'plipconfig.c' from a suggestion by Alan Cox. Using these ioctls, you can enable promiscuous mode, add/delete multicast addresses, change the hardware address, get packet size distribution statistics and muck around with the control and status register. I'll add others if and when the need arises. TO DO: ------ Revision History ---------------- Version Date Description 0.1 26-aug-94 Initial writing. ALPHA code release. 0.11 31-aug-94 Fixed: 2k mode memory base calc., LeMAC version calc., IRQ vector assignments during autoprobe. 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card. Fixed up MCA hash table algorithm. 0.20 4-sep-94 Added IOCTL functionality. 0.21 14-sep-94 Added I/O mode. 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0. 0.22 16-sep-94 Added more IOCTLs & tidied up. 0.23 21-sep-94 Added transmit cut through. 0.24 31-oct-94 Added uid checks in some ioctls. 0.30 1-nov-94 BETA code release. 0.31 5-dec-94 Added check/allocate region code. 0.32 16-jan-95 Broadcast packet fix. 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>. 0.40 27-Dec-95 Rationalise MODULE and autoprobe code. Rewrite for portability & updated. ALPHA support from <jestabro@amt.tay1.dec.com> Added verify_area() calls in ewrk3_ioctl() from suggestion by <heiko@colossus.escape.de>. Add new multicasting code. 0.41 20-Jan-96 Fix IRQ set up problem reported by <kenneth@bbs.sas.ntu.ac.sg>. 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi> 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com> 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net> 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com> 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com> 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua> ioctl locking, signature search cleanup <akropel1@rochester.rr.com> ========================================================================= */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/time.h> #include <linux/types.h> #include <linux/unistd.h> #include <linux/ctype.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> #include "ewrk3.h" #define DRV_NAME "ewrk3" #define DRV_VERSION "0.48" static char version[] __initdata = DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n"; #ifdef EWRK3_DEBUG static int ewrk3_debug = EWRK3_DEBUG; #else static int ewrk3_debug = 1; #endif #define EWRK3_NDA 0xffe0 /* No Device Address */ #define PROBE_LENGTH 32 #define ETH_PROM_SIG 0xAA5500FFUL #ifndef EWRK3_SIGNATURE #define EWRK3_SIGNATURE {"DE203","DE204","DE205",""} #define EWRK3_STRLEN 8 #endif #ifndef EWRK3_RAM_BASE_ADDRESSES #define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000} #endif /* ** Sets up the I/O area for the autoprobe. */ #define EWRK3_IO_BASE 0x100 /* Start address for probe search */ #define EWRK3_IOP_INC 0x20 /* I/O address increment */ #define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */ #ifndef MAX_NUM_EWRK3S #define MAX_NUM_EWRK3S 21 #endif #ifndef EWRK3_EISA_IO_PORTS #define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ #endif #ifndef MAX_EISA_SLOTS #define MAX_EISA_SLOTS 16 #define EISA_SLOT_INC 0x1000 #endif #define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */ /* ** EtherWORKS 3 shared memory window sizes */ #define IO_ONLY 0x00 #define SHMEM_2K 0x800 #define SHMEM_32K 0x8000 #define SHMEM_64K 0x10000 /* ** EtherWORKS 3 IRQ ENABLE/DISABLE */ #define ENABLE_IRQs { \ icr |= lp->irq_mask;\ outb(icr, EWRK3_ICR); /* Enable the IRQs */\ } #define DISABLE_IRQs { \ icr = inb(EWRK3_ICR);\ icr &= ~lp->irq_mask;\ outb(icr, EWRK3_ICR); /* Disable the IRQs */\ } /* ** EtherWORKS 3 START/STOP */ #define START_EWRK3 { \ csr = inb(EWRK3_CSR);\ csr &= ~(CSR_TXD|CSR_RXD);\ outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\ } #define STOP_EWRK3 { \ csr = (CSR_TXD|CSR_RXD);\ outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\ } /* ** The EtherWORKS 3 private structure */ #define EWRK3_PKT_STAT_SZ 16 #define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you increase EWRK3_PKT_STAT_SZ */ struct ewrk3_stats { u32 bins[EWRK3_PKT_STAT_SZ]; u32 unicast; u32 multicast; u32 broadcast; u32 excessive_collisions; u32 tx_underruns; u32 excessive_underruns; }; struct ewrk3_private { char adapter_name[80]; /* Name exported to /proc/ioports */ u_long shmem_base; /* Shared memory start address */ void __iomem *shmem; u_long shmem_length; /* Shared memory window length */ struct ewrk3_stats pktStats; /* Private stats counters */ u_char irq_mask; /* Adapter IRQ mask bits */ u_char mPage; /* Maximum 2kB Page number */ u_char lemac; /* Chip rev. level */ u_char hard_strapped; /* Don't allow a full open */ u_char txc; /* Transmit cut through */ void __iomem *mctbl; /* Pointer to the multicast table */ u_char led_mask; /* Used to reserve LED access for ethtool */ spinlock_t hw_lock; }; /* ** Force the EtherWORKS 3 card to be in 2kB MODE */ #define FORCE_2K_MODE { \ shmem_length = SHMEM_2K;\ outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\ } /* ** Public Functions */ static int ewrk3_open(struct net_device *dev); static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); static int ewrk3_close(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops ethtool_ops_203; static const struct ethtool_ops ethtool_ops; /* ** Private functions */ static int ewrk3_hw_init(struct net_device *dev, u_long iobase); static void ewrk3_init(struct net_device *dev); static int ewrk3_rx(struct net_device *dev); static int ewrk3_tx(struct net_device *dev); static void ewrk3_timeout(struct net_device *dev); static void EthwrkSignature(char *name, char *eeprom_image); static int DevicePresent(u_long iobase); static void SetMulticastFilter(struct net_device *dev); static int EISA_signature(char *name, s32 eisa_id); static int Read_EEPROM(u_long iobase, u_char eaddr); static int Write_EEPROM(short data, u_long iobase, u_char eaddr); static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType); static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq); static int isa_probe(struct net_device *dev, u_long iobase); static int eisa_probe(struct net_device *dev, u_long iobase); static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12}; static char name[EWRK3_STRLEN + 1]; static int num_ewrks3s; /* ** Miscellaneous defines... */ #define INIT_EWRK3 {\ outb(EEPROM_INIT, EWRK3_IOPR);\ mdelay(1);\ } #ifndef MODULE struct net_device * __init ewrk3_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private)); int err; if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) { sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } err = ewrk3_probe1(dev, dev->base_addr, dev->irq); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq) { int err; dev->base_addr = iobase; dev->irq = irq; /* Address PROM pattern */ err = isa_probe(dev, iobase); if (err != 0) err = eisa_probe(dev, iobase); if (err) return err; err = register_netdev(dev); if (err) release_region(dev->base_addr, EWRK3_TOTAL_SIZE); return err; } static const struct net_device_ops ewrk3_netdev_ops = { .ndo_open = ewrk3_open, .ndo_start_xmit = ewrk3_queue_pkt, .ndo_stop = ewrk3_close, .ndo_set_rx_mode = set_multicast_list, .ndo_do_ioctl = ewrk3_ioctl, .ndo_tx_timeout = ewrk3_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __init ewrk3_hw_init(struct net_device *dev, u_long iobase) { struct ewrk3_private *lp; int i, status = 0; u_long mem_start, shmem_length; u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0; u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0; /* ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot. ** This also disables the EISA_ENABLE bit in the EISA Control Register. */ if (iobase > 0x400) eisa_cr = inb(EISA_CR); INIT_EWRK3; nicsr = inb(EWRK3_CSR); icr = inb(EWRK3_ICR); icr &= 0x70; outb(icr, EWRK3_ICR); /* Disable all the IRQs */ if (nicsr != (CSR_TXD | CSR_RXD)) return -ENXIO; /* Check that the EEPROM is alive and well and not living on Pluto... */ for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) { union { short val; char c[2]; } tmp; tmp.val = (short) Read_EEPROM(iobase, (i >> 1)); eeprom_image[i] = tmp.c[0]; eeprom_image[i + 1] = tmp.c[1]; chksum += eeprom_image[i] + eeprom_image[i + 1]; } if (chksum != 0) { /* Bad EEPROM Data! */ printk("%s: Device has a bad on-board EEPROM.\n", dev->name); return -ENXIO; } EthwrkSignature(name, eeprom_image); if (*name == '\0') return -ENXIO; dev->base_addr = iobase; if (iobase > 0x400) { outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */ } lemac = eeprom_image[EEPROM_CHIPVER]; cmr = inb(EWRK3_CMR); if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) || ((lemac == LeMAC2) && !(cmr & CMR_HS))) { printk("%s: %s at %#4lx", dev->name, name, iobase); hard_strapped = 1; } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) { /* EISA slot address */ printk("%s: %s at %#4lx (EISA slot %ld)", dev->name, name, iobase, ((iobase >> 12) & 0x0f)); } else { /* ISA port address */ printk("%s: %s at %#4lx", dev->name, name, iobase); } printk(", h/w address "); if (lemac != LeMAC2) DevicePresent(iobase); /* need after EWRK3_INIT */ status = get_hw_addr(dev, eeprom_image, lemac); printk("%pM\n", dev->dev_addr); if (status) { printk(" which has an EEPROM CRC error.\n"); return -ENXIO; } if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */ cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS); if (eeprom_image[EEPROM_MISC0] & READ_AHEAD) cmr |= CMR_RA; if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND) cmr |= CMR_WB; if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL) cmr |= CMR_POLARITY; if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK) cmr |= CMR_LINK; if (eeprom_image[EEPROM_MISC0] & _0WS_ENA) cmr |= CMR_0WS; } if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM) cmr |= CMR_DRAM; outb(cmr, EWRK3_CMR); cr = inb(EWRK3_CR); /* Set up the Control Register */ cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD; if (cr & SETUP_APD) cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS; cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS; cr |= eeprom_image[EEPROM_MISC0] & ENA_16; outb(cr, EWRK3_CR); /* ** Determine the base address and window length for the EWRK3 ** RAM from the memory base register. */ mem_start = inb(EWRK3_MBR); shmem_length = 0; if (mem_start != 0) { if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) { mem_start *= SHMEM_64K; shmem_length = SHMEM_64K; } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) { mem_start *= SHMEM_32K; shmem_length = SHMEM_32K; } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) { mem_start = mem_start * SHMEM_2K + 0x80000; shmem_length = SHMEM_2K; } else { return -ENXIO; } } /* ** See the top of this source code for comments about ** uncommenting this line. */ /* FORCE_2K_MODE; */ if (hard_strapped) { printk(" is hard strapped.\n"); } else if (mem_start) { printk(" has a %dk RAM window", (int) (shmem_length >> 10)); printk(" at 0x%.5lx", mem_start); } else { printk(" is in I/O only mode"); } lp = netdev_priv(dev); lp->shmem_base = mem_start; lp->shmem = ioremap(mem_start, shmem_length); if (!lp->shmem) return -ENOMEM; lp->shmem_length = shmem_length; lp->lemac = lemac; lp->hard_strapped = hard_strapped; lp->led_mask = CR_LED; spin_lock_init(&lp->hw_lock); lp->mPage = 64; if (cmr & CMR_DRAM) lp->mPage <<= 1; /* 2 DRAMS on module */ sprintf(lp->adapter_name, "%s (%s)", name, dev->name); lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM; if (!hard_strapped) { /* ** Enable EWRK3 board interrupts for autoprobing */ icr |= ICR_IE; /* Enable interrupts */ outb(icr, EWRK3_ICR); /* The DMA channel may be passed in on this parameter. */ dev->dma = 0; /* To auto-IRQ we enable the initialization-done and DMA err, interrupts. For now we will always get a DMA error. */ if (dev->irq < 2) { #ifndef MODULE u_char irqnum; unsigned long irq_mask; irq_mask = probe_irq_on(); /* ** Trigger a TNE interrupt. */ icr |= ICR_TNEM; outb(1, EWRK3_TDQ); /* Write to the TX done queue */ outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */ irqnum = irq[((icr & IRQ_SEL) >> 4)]; mdelay(20); dev->irq = probe_irq_off(irq_mask); if ((dev->irq) && (irqnum == dev->irq)) { printk(" and uses IRQ%d.\n", dev->irq); } else { if (!dev->irq) { printk(" and failed to detect IRQ line.\n"); } else if ((irqnum == 1) && (lemac == LeMAC2)) { printk(" and an illegal IRQ line detected.\n"); } else { printk(", but incorrect IRQ line detected.\n"); } iounmap(lp->shmem); return -ENXIO; } DISABLE_IRQs; /* Mask all interrupts */ #endif /* MODULE */ } else { printk(" and requires IRQ%d.\n", dev->irq); } } if (ewrk3_debug > 1) { printk(version); } /* The EWRK3-specific entries in the device structure. */ dev->netdev_ops = &ewrk3_netdev_ops; if (lp->adapter_name[4] == '3') SET_ETHTOOL_OPS(dev, &ethtool_ops_203); else SET_ETHTOOL_OPS(dev, &ethtool_ops); dev->watchdog_timeo = QUEUE_PKT_TIMEOUT; dev->mem_start = 0; return 0; } static int ewrk3_open(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; int status = 0; u_char icr, csr; /* ** Stop the TX and RX... */ STOP_EWRK3; if (!lp->hard_strapped) { if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) { printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq); status = -EAGAIN; } else { /* ** Re-initialize the EWRK3... */ ewrk3_init(dev); if (ewrk3_debug > 1) { printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq); printk(" physical address: %pM\n", dev->dev_addr); if (lp->shmem_length == 0) { printk(" no shared memory, I/O only mode\n"); } else { printk(" start of shared memory: 0x%08lx\n", lp->shmem_base); printk(" window length: 0x%04lx\n", lp->shmem_length); } printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1)); printk(" csr: 0x%02x\n", inb(EWRK3_CSR)); printk(" cr: 0x%02x\n", inb(EWRK3_CR)); printk(" icr: 0x%02x\n", inb(EWRK3_ICR)); printk(" cmr: 0x%02x\n", inb(EWRK3_CMR)); printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC)); } netif_start_queue(dev); /* ** Unmask EWRK3 board interrupts */ icr = inb(EWRK3_ICR); ENABLE_IRQs; } } else { printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name); printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n"); return -EINVAL; } return status; } /* ** Initialize the EtherWORKS 3 operating conditions */ static void ewrk3_init(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_char csr, page; u_long iobase = dev->base_addr; int i; /* ** Enable any multicasts */ set_multicast_list(dev); /* ** Set hardware MAC address. Address is initialized from the EEPROM ** during startup but may have since been changed by the user. */ for (i=0; i<ETH_ALEN; i++) outb(dev->dev_addr[i], EWRK3_PAR0 + i); /* ** Clean out any remaining entries in all the queues here */ while (inb(EWRK3_TQ)); while (inb(EWRK3_TDQ)); while (inb(EWRK3_RQ)); while (inb(EWRK3_FMQ)); /* ** Write a clean free memory queue */ for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */ outb(page, EWRK3_FMQ); /* to the Free Memory Queue */ } START_EWRK3; /* Enable the TX and/or RX */ } /* * Transmit timeout */ static void ewrk3_timeout(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_char icr, csr; u_long iobase = dev->base_addr; if (!lp->hard_strapped) { printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n", dev->name, inb(EWRK3_CSR)); /* ** Mask all board interrupts */ DISABLE_IRQs; /* ** Stop the TX and RX... */ STOP_EWRK3; ewrk3_init(dev); /* ** Unmask EWRK3 board interrupts */ ENABLE_IRQs; dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } } /* ** Writes a socket buffer to the free page queue */ static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; void __iomem *buf = NULL; u_char icr; u_char page; spin_lock_irq (&lp->hw_lock); DISABLE_IRQs; /* if no resources available, exit, request packet be queued */ if (inb (EWRK3_FMQC) == 0) { printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n", dev->name); printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n", dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR), inb (EWRK3_FMQC)); goto err_out; } /* ** Get a free page from the FMQ */ if ((page = inb (EWRK3_FMQ)) >= lp->mPage) { printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n", (u_char) page); goto err_out; } /* ** Set up shared memory window and pointer into the window */ if (lp->shmem_length == IO_ONLY) { outb (page, EWRK3_IOPR); } else if (lp->shmem_length == SHMEM_2K) { buf = lp->shmem; outb (page, EWRK3_MPR); } else if (lp->shmem_length == SHMEM_32K) { buf = (((short) page << 11) & 0x7800) + lp->shmem; outb ((page >> 4), EWRK3_MPR); } else if (lp->shmem_length == SHMEM_64K) { buf = (((short) page << 11) & 0xf800) + lp->shmem; outb ((page >> 5), EWRK3_MPR); } else { printk (KERN_ERR "%s: Oops - your private data area is hosed!\n", dev->name); BUG (); } /* ** Set up the buffer control structures and copy the data from ** the socket buffer to the shared memory . */ if (lp->shmem_length == IO_ONLY) { int i; u_char *p = skb->data; outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA); outb ((char) (skb->len & 0xff), EWRK3_DATA); outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA); outb ((char) 0x04, EWRK3_DATA); for (i = 0; i < skb->len; i++) { outb (*p++, EWRK3_DATA); } outb (page, EWRK3_TQ); /* Start sending pkt */ } else { writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */ buf += 1; writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */ buf += 1; if (lp->txc) { writeb(((skb->len >> 8) & 0xff) | XCT, buf); buf += 1; writeb (0x04, buf); /* index byte */ buf += 1; writeb (0x00, (buf + skb->len)); /* Write the XCT flag */ memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */ outb (page, EWRK3_TQ); /* Start sending pkt */ memcpy_toio (buf + PRELOAD, skb->data + PRELOAD, skb->len - PRELOAD); writeb (0xff, (buf + skb->len)); /* Write the XCT flag */ } else { writeb ((skb->len >> 8) & 0xff, buf); buf += 1; writeb (0x04, buf); /* index byte */ buf += 1; memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */ outb (page, EWRK3_TQ); /* Start sending pkt */ } } ENABLE_IRQs; spin_unlock_irq (&lp->hw_lock); dev->stats.tx_bytes += skb->len; dev_kfree_skb (skb); /* Check for free resources: stop Tx queue if there are none */ if (inb (EWRK3_FMQC) == 0) netif_stop_queue (dev); return NETDEV_TX_OK; err_out: ENABLE_IRQs; spin_unlock_irq (&lp->hw_lock); return NETDEV_TX_BUSY; } /* ** The EWRK3 interrupt handler. */ static irqreturn_t ewrk3_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct ewrk3_private *lp; u_long iobase; u_char icr, cr, csr; lp = netdev_priv(dev); iobase = dev->base_addr; /* get the interrupt information */ csr = inb(EWRK3_CSR); /* ** Mask the EWRK3 board interrupts and turn on the LED */ spin_lock(&lp->hw_lock); DISABLE_IRQs; cr = inb(EWRK3_CR); cr |= lp->led_mask; outb(cr, EWRK3_CR); if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */ ewrk3_rx(dev); if (csr & CSR_TNE) /* Tx interrupt (packet sent) */ ewrk3_tx(dev); /* ** Now deal with the TX/RX disable flags. These are set when there ** are no more resources. If resources free up then enable these ** interrupts, otherwise mask them - failure to do this will result ** in the system hanging in an interrupt loop. */ if (inb(EWRK3_FMQC)) { /* any resources available? */ lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */ csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */ outb(csr, EWRK3_CSR); netif_wake_queue(dev); } else { lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */ } /* Unmask the EWRK3 board interrupts and turn off the LED */ cr &= ~(lp->led_mask); outb(cr, EWRK3_CR); ENABLE_IRQs; spin_unlock(&lp->hw_lock); return IRQ_HANDLED; } /* Called with lp->hw_lock held */ static int ewrk3_rx(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; int i, status = 0; u_char page; void __iomem *buf = NULL; while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */ if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */ /* ** Set up shared memory window and pointer into the window */ if (lp->shmem_length == IO_ONLY) { outb(page, EWRK3_IOPR); } else if (lp->shmem_length == SHMEM_2K) { buf = lp->shmem; outb(page, EWRK3_MPR); } else if (lp->shmem_length == SHMEM_32K) { buf = (((short) page << 11) & 0x7800) + lp->shmem; outb((page >> 4), EWRK3_MPR); } else if (lp->shmem_length == SHMEM_64K) { buf = (((short) page << 11) & 0xf800) + lp->shmem; outb((page >> 5), EWRK3_MPR); } else { status = -1; printk("%s: Oops - your private data area is hosed!\n", dev->name); } if (!status) { char rx_status; int pkt_len; if (lp->shmem_length == IO_ONLY) { rx_status = inb(EWRK3_DATA); pkt_len = inb(EWRK3_DATA); pkt_len |= ((u_short) inb(EWRK3_DATA) << 8); } else { rx_status = readb(buf); buf += 1; pkt_len = readw(buf); buf += 3; } if (!(rx_status & R_ROK)) { /* There was an error. */ dev->stats.rx_errors++; /* Update the error stats. */ if (rx_status & R_DBE) dev->stats.rx_frame_errors++; if (rx_status & R_CRC) dev->stats.rx_crc_errors++; if (rx_status & R_PLL) dev->stats.rx_fifo_errors++; } else { struct sk_buff *skb; skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb != NULL) { unsigned char *p; skb_reserve(skb, 2); /* Align to 16 bytes */ p = skb_put(skb, pkt_len); if (lp->shmem_length == IO_ONLY) { *p = inb(EWRK3_DATA); /* dummy read */ for (i = 0; i < pkt_len; i++) { *p++ = inb(EWRK3_DATA); } } else { memcpy_fromio(p, buf, pkt_len); } for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) { if (pkt_len < i * EWRK3_PKT_BIN_SZ) { lp->pktStats.bins[i]++; i = EWRK3_PKT_STAT_SZ; } } p = skb->data; /* Look at the dest addr */ if (is_multicast_ether_addr(p)) { if (is_broadcast_ether_addr(p)) { lp->pktStats.broadcast++; } else { lp->pktStats.multicast++; } } else if (ether_addr_equal(p, dev->dev_addr)) { lp->pktStats.unicast++; } lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ if (lp->pktStats.bins[0] == 0) { /* Reset counters */ memset(&lp->pktStats, 0, sizeof(lp->pktStats)); } /* ** Notify the upper protocol layers that there is another ** packet to handle */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* ** Update stats */ dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } else { printk("%s: Insufficient memory; nuking packet.\n", dev->name); dev->stats.rx_dropped++; /* Really, deferred. */ break; } } } /* ** Return the received buffer to the free memory queue */ outb(page, EWRK3_FMQ); } else { printk("ewrk3_rx(): Illegal page number, page %d\n", page); printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC)); } } return status; } /* ** Buffer sent - check for TX buffer errors. ** Called with lp->hw_lock held */ static int ewrk3_tx(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; u_char tx_status; while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ if (tx_status & T_VSTS) { /* The status is valid */ if (tx_status & T_TXE) { dev->stats.tx_errors++; if (tx_status & T_NCL) dev->stats.tx_carrier_errors++; if (tx_status & T_LCL) dev->stats.tx_window_errors++; if (tx_status & T_CTU) { if ((tx_status & T_COLL) ^ T_XUR) { lp->pktStats.tx_underruns++; } else { lp->pktStats.excessive_underruns++; } } else if (tx_status & T_COLL) { if ((tx_status & T_COLL) ^ T_XCOLL) { dev->stats.collisions++; } else { lp->pktStats.excessive_collisions++; } } } else { dev->stats.tx_packets++; } } } return 0; } static int ewrk3_close(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; u_char icr, csr; netif_stop_queue(dev); if (ewrk3_debug > 1) { printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inb(EWRK3_CSR)); } /* ** We stop the EWRK3 here... mask interrupts and stop TX & RX */ DISABLE_IRQs; STOP_EWRK3; /* ** Clean out the TX and RX queues here (note that one entry ** may get added to either the TXD or RX queues if the TX or RX ** just starts processing a packet before the STOP_EWRK3 command ** is received. This will be flushed in the ewrk3_open() call). */ while (inb(EWRK3_TQ)); while (inb(EWRK3_TDQ)); while (inb(EWRK3_RQ)); if (!lp->hard_strapped) { free_irq(dev->irq, dev); } return 0; } /* ** Set or clear the multicast filter for this adapter. */ static void set_multicast_list(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; u_char csr; csr = inb(EWRK3_CSR); if (lp->shmem_length == IO_ONLY) { lp->mctbl = NULL; } else { lp->mctbl = lp->shmem + PAGE0_HTE; } csr &= ~(CSR_PME | CSR_MCE); if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ csr |= CSR_PME; outb(csr, EWRK3_CSR); } else { SetMulticastFilter(dev); csr |= CSR_MCE; outb(csr, EWRK3_CSR); } } /* ** Calculate the hash code and update the logical address filter ** from a list of ethernet multicast addresses. ** Little endian crc one liner from Matt Thomas, DEC. ** ** Note that when clearing the table, the broadcast bit must remain asserted ** to receive broadcast messages. */ static void SetMulticastFilter(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); struct netdev_hw_addr *ha; u_long iobase = dev->base_addr; int i; char bit, byte; short __iomem *p = lp->mctbl; u16 hashcode; u32 crc; spin_lock_irq(&lp->hw_lock); if (lp->shmem_length == IO_ONLY) { outb(0, EWRK3_IOPR); outw(PAGE0_HTE, EWRK3_PIR1); } else { outb(0, EWRK3_MPR); } if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { if (lp->shmem_length == IO_ONLY) { outb(0xff, EWRK3_DATA); } else { /* memset didn't work here */ writew(0xffff, p); p++; i++; } } } else { /* Clear table except for broadcast bit */ if (lp->shmem_length == IO_ONLY) { for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) { outb(0x00, EWRK3_DATA); } outb(0x80, EWRK3_DATA); i++; /* insert the broadcast bit */ for (; i < (HASH_TABLE_LEN >> 3); i++) { outb(0x00, EWRK3_DATA); } } else { memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3); writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1); } /* Update table */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */ if (lp->shmem_length == IO_ONLY) { u_char tmp; outw(PAGE0_HTE + byte, EWRK3_PIR1); tmp = inb(EWRK3_DATA); tmp |= bit; outw(PAGE0_HTE + byte, EWRK3_PIR1); outb(tmp, EWRK3_DATA); } else { writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte); } } } spin_unlock_irq(&lp->hw_lock); } /* ** ISA bus I/O device probe */ static int __init isa_probe(struct net_device *dev, u_long ioaddr) { int i = num_ewrks3s, maxSlots; int ret = -ENODEV; u_long iobase; if (ioaddr >= 0x400) goto out; if (ioaddr == 0) { /* Autoprobing */ iobase = EWRK3_IO_BASE; /* Get the first slot address */ maxSlots = 24; } else { /* Probe a specific location */ iobase = ioaddr; maxSlots = i + 1; } for (; (i < maxSlots) && (dev != NULL); iobase += EWRK3_IOP_INC, i++) { if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) { if (DevicePresent(iobase) == 0) { int irq = dev->irq; ret = ewrk3_hw_init(dev, iobase); if (!ret) break; dev->irq = irq; } release_region(iobase, EWRK3_TOTAL_SIZE); } } out: return ret; } /* ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually ** the motherboard. */ static int __init eisa_probe(struct net_device *dev, u_long ioaddr) { int i, maxSlots; u_long iobase; int ret = -ENODEV; if (ioaddr < 0x1000) goto out; iobase = ioaddr; i = (ioaddr >> 12); maxSlots = i + 1; for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) { if (EISA_signature(name, EISA_ID) == 0) { if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) && DevicePresent(iobase) == 0) { int irq = dev->irq; ret = ewrk3_hw_init(dev, iobase); if (!ret) break; dev->irq = irq; } release_region(iobase, EWRK3_TOTAL_SIZE); } } out: return ret; } /* ** Read the EWRK3 EEPROM using this routine */ static int Read_EEPROM(u_long iobase, u_char eaddr) { int i; outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */ outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */ for (i = 0; i < 5000; i++) inb(EWRK3_CSR); /* wait 1msec */ return inw(EWRK3_EPROM1); /* 16 bits data return */ } /* ** Write the EWRK3 EEPROM using this routine */ static int Write_EEPROM(short data, u_long iobase, u_char eaddr) { int i; outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */ for (i = 0; i < 5000; i++) inb(EWRK3_CSR); /* wait 1msec */ outw(data, EWRK3_EPROM1); /* write data to register */ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */ outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */ for (i = 0; i < 75000; i++) inb(EWRK3_CSR); /* wait 15msec */ outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */ for (i = 0; i < 5000; i++) inb(EWRK3_CSR); /* wait 1msec */ return 0; } /* ** Look for a particular board name in the on-board EEPROM. */ static void __init EthwrkSignature(char *name, char *eeprom_image) { int i; char *signatures[] = EWRK3_SIGNATURE; for (i=0; *signatures[i] != '\0'; i++) if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) ) break; if (*signatures[i] != '\0') { memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN); name[EWRK3_STRLEN] = '\0'; } else name[0] = '\0'; } /* ** Look for a special sequence in the Ethernet station address PROM that ** is common across all EWRK3 products. ** ** Search the Ethernet address ROM for the signature. Since the ROM address ** counter can start at an arbitrary point, the search must include the entire ** probe sequence length plus the (length_of_the_signature - 1). ** Stop the search IMMEDIATELY after the signature is found so that the ** PROM address counter is correctly positioned at the start of the ** ethernet address for later read out. */ static int __init DevicePresent(u_long iobase) { union { struct { u32 a; u32 b; } llsig; char Sig[sizeof(u32) << 1]; } dev; short sigLength; char data; int i, j, status = 0; dev.llsig.a = ETH_PROM_SIG; dev.llsig.b = ETH_PROM_SIG; sigLength = sizeof(u32) << 1; for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) { data = inb(EWRK3_APROM); if (dev.Sig[j] == data) { /* track signature */ j++; } else { /* lost signature; begin search again */ if (data == dev.Sig[0]) { j = 1; } else { j = 0; } } } if (j != sigLength) { status = -ENODEV; /* search failed */ } return status; } static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType) { int i, j, k; u_short chksum; u_char crc, lfsr, sd, status = 0; u_long iobase = dev->base_addr; u16 tmp; if (chipType == LeMAC2) { for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) { sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j]; outb(dev->dev_addr[j], EWRK3_PAR0 + j); for (k = 0; k < 8; k++, sd >>= 1) { lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7; crc = (crc >> 1) + lfsr; } } if (crc != eeprom_image[EEPROM_PA_CRC]) status = -1; } else { for (i = 0, k = 0; i < ETH_ALEN;) { k <<= 1; if (k > 0xffff) k -= 0xffff; k += (u_char) (tmp = inb(EWRK3_APROM)); dev->dev_addr[i] = (u_char) tmp; outb(dev->dev_addr[i], EWRK3_PAR0 + i); i++; k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8); dev->dev_addr[i] = (u_char) tmp; outb(dev->dev_addr[i], EWRK3_PAR0 + i); i++; if (k > 0xffff) k -= 0xffff; } if (k == 0xffff) k = 0; chksum = inb(EWRK3_APROM); chksum |= (inb(EWRK3_APROM) << 8); if (k != chksum) status = -1; } return status; } /* ** Look for a particular board name in the EISA configuration space */ static int __init EISA_signature(char *name, s32 eisa_id) { u_long i; char *signatures[] = EWRK3_SIGNATURE; char ManCode[EWRK3_STRLEN]; union { s32 ID; char Id[4]; } Eisa; int status = 0; *name = '\0'; for (i = 0; i < 4; i++) { Eisa.Id[i] = inb(eisa_id + i); } ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40); ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40); ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30); ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30); ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30); ManCode[5] = '\0'; for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) { if (strstr(ManCode, signatures[i]) != NULL) { strcpy(name, ManCode); status = 1; } } return status; /* return the device name string */ } static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); sprintf(info->fw_version, "%d", fwrev); strcpy(info->bus_info, "N/A"); info->eedump_len = EEPROM_MAX; } static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct ewrk3_private *lp = netdev_priv(dev); unsigned long iobase = dev->base_addr; u8 cr = inb(EWRK3_CR); switch (lp->adapter_name[4]) { case '3': /* DE203 */ ecmd->supported = SUPPORTED_BNC; ecmd->port = PORT_BNC; break; case '4': /* DE204 */ ecmd->supported = SUPPORTED_TP; ecmd->port = PORT_TP; break; case '5': /* DE205 */ ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI; ecmd->autoneg = !(cr & CR_APD); /* ** Port is only valid if autoneg is disabled ** and even then we don't know if AUI is jumpered. */ if (!ecmd->autoneg) ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP; break; } ecmd->supported |= SUPPORTED_10baseT_Half; ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = DUPLEX_HALF; return 0; } static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct ewrk3_private *lp = netdev_priv(dev); unsigned long iobase = dev->base_addr; unsigned long flags; u8 cr; /* DE205 is the only card with anything to set */ if (lp->adapter_name[4] != '5') return -EOPNOTSUPP; /* Sanity-check parameters */ if (ecmd->speed != SPEED_10) return -EINVAL; if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC) return -EINVAL; /* AUI is not software-selectable */ if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF) return -EINVAL; if (ecmd->phy_address != 0) return -EINVAL; spin_lock_irqsave(&lp->hw_lock, flags); cr = inb(EWRK3_CR); /* If Autoneg is set, change to Auto Port mode */ /* Otherwise, disable Auto Port and set port explicitly */ if (ecmd->autoneg) { cr &= ~CR_APD; } else { cr |= CR_APD; if (ecmd->port == PORT_TP) cr &= ~CR_PSEL; /* Force TP */ else cr |= CR_PSEL; /* Force BNC */ } /* Commit the changes */ outb(cr, EWRK3_CR); spin_unlock_irqrestore(&lp->hw_lock, flags); return 0; } static u32 ewrk3_get_link(struct net_device *dev) { unsigned long iobase = dev->base_addr; u8 cmr = inb(EWRK3_CMR); /* DE203 has BNC only and link status does not apply */ /* On DE204 this is always valid since TP is the only port. */ /* On DE205 this reflects TP status even if BNC or AUI is selected. */ return !(cmr & CMR_LINK); } static int ewrk3_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct ewrk3_private *lp = netdev_priv(dev); unsigned long iobase = dev->base_addr; u8 cr; spin_lock_irq(&lp->hw_lock); switch (state) { case ETHTOOL_ID_ACTIVE: /* Prevent ISR from twiddling the LED */ lp->led_mask = 0; spin_unlock_irq(&lp->hw_lock); return 2; /* cycle on/off twice per second */ case ETHTOOL_ID_ON: cr = inb(EWRK3_CR); outb(cr | CR_LED, EWRK3_CR); break; case ETHTOOL_ID_OFF: cr = inb(EWRK3_CR); outb(cr & ~CR_LED, EWRK3_CR); break; case ETHTOOL_ID_INACTIVE: lp->led_mask = CR_LED; cr = inb(EWRK3_CR); outb(cr & ~CR_LED, EWRK3_CR); } spin_unlock_irq(&lp->hw_lock); return 0; } static const struct ethtool_ops ethtool_ops_203 = { .get_drvinfo = ewrk3_get_drvinfo, .get_settings = ewrk3_get_settings, .set_settings = ewrk3_set_settings, .set_phys_id = ewrk3_set_phys_id, }; static const struct ethtool_ops ethtool_ops = { .get_drvinfo = ewrk3_get_drvinfo, .get_settings = ewrk3_get_settings, .set_settings = ewrk3_set_settings, .get_link = ewrk3_get_link, .set_phys_id = ewrk3_set_phys_id, }; /* ** Perform IOCTL call functions here. Some are privileged operations and the ** effective uid is checked in those cases. */ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct ewrk3_private *lp = netdev_priv(dev); struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru; u_long iobase = dev->base_addr; int i, j, status = 0; u_char csr; unsigned long flags; union ewrk3_addr { u_char addr[HASH_TABLE_LEN * ETH_ALEN]; u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1]; }; union ewrk3_addr *tmp; /* All we handle are private IOCTLs */ if (cmd != EWRK3IOCTL) return -EOPNOTSUPP; tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL); if(tmp==NULL) return -ENOMEM; switch (ioc->cmd) { case EWRK3_GET_HWADDR: /* Get the hardware address */ for (i = 0; i < ETH_ALEN; i++) { tmp->addr[i] = dev->dev_addr[i]; } ioc->len = ETH_ALEN; if (copy_to_user(ioc->data, tmp->addr, ioc->len)) status = -EFAULT; break; case EWRK3_SET_HWADDR: /* Set the hardware address */ if (capable(CAP_NET_ADMIN)) { spin_lock_irqsave(&lp->hw_lock, flags); csr = inb(EWRK3_CSR); csr |= (CSR_TXD | CSR_RXD); outb(csr, EWRK3_CSR); /* Disable the TX and RX */ spin_unlock_irqrestore(&lp->hw_lock, flags); if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) { status = -EFAULT; break; } spin_lock_irqsave(&lp->hw_lock, flags); for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = tmp->addr[i]; outb(tmp->addr[i], EWRK3_PAR0 + i); } csr = inb(EWRK3_CSR); csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */ outb(csr, EWRK3_CSR); spin_unlock_irqrestore(&lp->hw_lock, flags); } else { status = -EPERM; } break; case EWRK3_SET_PROM: /* Set Promiscuous Mode */ if (capable(CAP_NET_ADMIN)) { spin_lock_irqsave(&lp->hw_lock, flags); csr = inb(EWRK3_CSR); csr |= CSR_PME; csr &= ~CSR_MCE; outb(csr, EWRK3_CSR); spin_unlock_irqrestore(&lp->hw_lock, flags); } else { status = -EPERM; } break; case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */ if (capable(CAP_NET_ADMIN)) { spin_lock_irqsave(&lp->hw_lock, flags); csr = inb(EWRK3_CSR); csr &= ~CSR_PME; outb(csr, EWRK3_CSR); spin_unlock_irqrestore(&lp->hw_lock, flags); } else { status = -EPERM; } break; case EWRK3_GET_MCA: /* Get the multicast address table */ spin_lock_irqsave(&lp->hw_lock, flags); if (lp->shmem_length == IO_ONLY) { outb(0, EWRK3_IOPR); outw(PAGE0_HTE, EWRK3_PIR1); for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { tmp->addr[i] = inb(EWRK3_DATA); } } else { outb(0, EWRK3_MPR); memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3)); } spin_unlock_irqrestore(&lp->hw_lock, flags); ioc->len = (HASH_TABLE_LEN >> 3); if (copy_to_user(ioc->data, tmp->addr, ioc->len)) status = -EFAULT; break; case EWRK3_SET_MCA: /* Set a multicast address */ if (capable(CAP_NET_ADMIN)) { if (ioc->len > HASH_TABLE_LEN) { status = -EINVAL; break; } if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) { status = -EFAULT; break; } set_multicast_list(dev); } else { status = -EPERM; } break; case EWRK3_CLR_MCA: /* Clear all multicast addresses */ if (capable(CAP_NET_ADMIN)) { set_multicast_list(dev); } else { status = -EPERM; } break; case EWRK3_MCA_EN: /* Enable multicast addressing */ if (capable(CAP_NET_ADMIN)) { spin_lock_irqsave(&lp->hw_lock, flags); csr = inb(EWRK3_CSR); csr |= CSR_MCE; csr &= ~CSR_PME; outb(csr, EWRK3_CSR); spin_unlock_irqrestore(&lp->hw_lock, flags); } else { status = -EPERM; } break; case EWRK3_GET_STATS: { /* Get the driver statistics */ struct ewrk3_stats *tmp_stats = kmalloc(sizeof(lp->pktStats), GFP_KERNEL); if (!tmp_stats) { status = -ENOMEM; break; } spin_lock_irqsave(&lp->hw_lock, flags); memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats)); spin_unlock_irqrestore(&lp->hw_lock, flags); ioc->len = sizeof(lp->pktStats); if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats))) status = -EFAULT; kfree(tmp_stats); break; } case EWRK3_CLR_STATS: /* Zero out the driver statistics */ if (capable(CAP_NET_ADMIN)) { spin_lock_irqsave(&lp->hw_lock, flags); memset(&lp->pktStats, 0, sizeof(lp->pktStats)); spin_unlock_irqrestore(&lp->hw_lock,flags); } else { status = -EPERM; } break; case EWRK3_GET_CSR: /* Get the CSR Register contents */ tmp->addr[0] = inb(EWRK3_CSR); ioc->len = 1; if (copy_to_user(ioc->data, tmp->addr, ioc->len)) status = -EFAULT; break; case EWRK3_SET_CSR: /* Set the CSR Register contents */ if (capable(CAP_NET_ADMIN)) { if (copy_from_user(tmp->addr, ioc->data, 1)) { status = -EFAULT; break; } outb(tmp->addr[0], EWRK3_CSR); } else { status = -EPERM; } break; case EWRK3_GET_EEPROM: /* Get the EEPROM contents */ if (capable(CAP_NET_ADMIN)) { for (i = 0; i < (EEPROM_MAX >> 1); i++) { tmp->val[i] = (short) Read_EEPROM(iobase, i); } i = EEPROM_MAX; tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */ for (j = 0; j < ETH_ALEN; j++) { tmp->addr[i++] = inb(EWRK3_PAR0 + j); } ioc->len = EEPROM_MAX + 1 + ETH_ALEN; if (copy_to_user(ioc->data, tmp->addr, ioc->len)) status = -EFAULT; } else { status = -EPERM; } break; case EWRK3_SET_EEPROM: /* Set the EEPROM contents */ if (capable(CAP_NET_ADMIN)) { if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) { status = -EFAULT; break; } for (i = 0; i < (EEPROM_MAX >> 1); i++) { Write_EEPROM(tmp->val[i], iobase, i); } } else { status = -EPERM; } break; case EWRK3_GET_CMR: /* Get the CMR Register contents */ tmp->addr[0] = inb(EWRK3_CMR); ioc->len = 1; if (copy_to_user(ioc->data, tmp->addr, ioc->len)) status = -EFAULT; break; case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */ if (capable(CAP_NET_ADMIN)) { lp->txc = 1; } else { status = -EPERM; } break; case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */ if (capable(CAP_NET_ADMIN)) { lp->txc = 0; } else { status = -EPERM; } break; default: status = -EOPNOTSUPP; } kfree(tmp); return status; } #ifdef MODULE static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S]; static int ndevs; static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, }; /* '21' below should really be 'MAX_NUM_EWRK3S' */ module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)"); MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)"); static __exit void ewrk3_exit_module(void) { int i; for( i=0; i<ndevs; i++ ) { struct net_device *dev = ewrk3_devs[i]; struct ewrk3_private *lp = netdev_priv(dev); ewrk3_devs[i] = NULL; unregister_netdev(dev); release_region(dev->base_addr, EWRK3_TOTAL_SIZE); iounmap(lp->shmem); free_netdev(dev); } } static __init int ewrk3_init_module(void) { int i=0; while( io[i] && irq[i] ) { struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private)); if (!dev) break; if (ewrk3_probe1(dev, io[i], irq[i]) != 0) { free_netdev(dev); break; } ewrk3_devs[ndevs++] = dev; i++; } return ndevs ? 0 : -EIO; } /* Hack for breakage in new module stuff */ module_exit(ewrk3_exit_module); module_init(ewrk3_init_module); #endif /* MODULE */ MODULE_LICENSE("GPL");
gpl-2.0
alexpotter1/DeltaKernel_msm8974_hammerhead
arch/arm/mach-msm/msm_dsps.c
1969
15821
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * msm_dsps - control DSPS clocks, gpios and vregs. * */ #include <linux/types.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/msm_dsps.h> #include <mach/irqs.h> #include <mach/msm_iomap.h> #include <mach/msm_smsm.h> #include <mach/msm_dsps.h> #include <mach/subsystem_restart.h> #include <mach/ramdump.h> #include "timer.h" #define DRV_NAME "msm_dsps" #define DRV_VERSION "4.03" #define PPSS_TIMER0_32KHZ_REG 0x1004 #define PPSS_TIMER0_20MHZ_REG 0x0804 /** * Driver Context * * @dev_class - device class. * @dev_num - device major & minor number. * @dev - the device. * @cdev - character device for user interface. * @pdata - platform data. * @pil - handle to DSPS Firmware loader. * @dspsfw_ramdump_dev - handle to ramdump device for DSPS * @dspsfw_ramdump_segments - Ramdump segment information for DSPS * @smem_ramdump_dev - handle to ramdump device for smem * @smem_ramdump_segments - Ramdump segment information for smem * @is_on - DSPS is on. * @ref_count - open/close reference count. * @ppss_base - ppss registers virtual base address. */ struct dsps_drv { struct class *dev_class; dev_t dev_num; struct device *dev; struct cdev *cdev; struct msm_dsps_platform_data *pdata; void *pil; int is_on; int ref_count; void __iomem *ppss_base; }; /** * Driver context. */ static struct dsps_drv *drv; /** * Load DSPS Firmware. */ static int dsps_load(void) { pr_debug("%s.\n", __func__); drv->pil = subsystem_get("dsps"); if (IS_ERR(drv->pil)) { pr_err("%s: fail to load DSPS firmware.\n", __func__); return -ENODEV; } msleep(20); return 0; } /** * Unload DSPS Firmware. */ static void dsps_unload(void) { pr_debug("%s.\n", __func__); subsystem_put(drv->pil); } /** * Suspend DSPS CPU. * * Only call if dsps_pwr_ctl_en is false. * If dsps_pwr_ctl_en is true, then DSPS will control its own power state. */ static void dsps_suspend(void) { pr_debug("%s.\n", __func__); writel_relaxed(1, drv->ppss_base + drv->pdata->ppss_pause_reg); mb(); /* Make sure write commited before ioctl returns. */ } /** * Resume DSPS CPU. * * Only call if dsps_pwr_ctl_en is false. * If dsps_pwr_ctl_en is true, then DSPS will control its own power state. */ static void dsps_resume(void) { pr_debug("%s.\n", __func__); writel_relaxed(0, drv->ppss_base + drv->pdata->ppss_pause_reg); mb(); /* Make sure write commited before ioctl returns. */ } /** * Read DSPS slow timer. */ static u32 dsps_read_slow_timer(void) { u32 val; /* Read the timer value from the MSM sclk. The MSM slow clock & DSPS * timers are in sync, so these are the same value */ val = msm_timer_get_sclk_ticks(); pr_debug("%s.count=%d.\n", __func__, val); return val; } /** * Read DSPS fast timer. */ static u32 dsps_read_fast_timer(void) { u32 val; val = readl_relaxed(drv->ppss_base + PPSS_TIMER0_20MHZ_REG); rmb(); /* order reads from the user output buffer */ pr_debug("%s.count=%d.\n", __func__, val); return val; } /** * Power on request. * * Set clocks to ON. * Set sensors chip-select GPIO to non-reset (on) value. * */ static int dsps_power_on_handler(void) { int ret = 0; int i, ci, gi, ri; pr_debug("%s.\n", __func__); if (drv->is_on) { pr_debug("%s: already ON.\n", __func__); return 0; } for (ci = 0; ci < drv->pdata->clks_num; ci++) { const char *name = drv->pdata->clks[ci].name; u32 rate = drv->pdata->clks[ci].rate; struct clk *clock = drv->pdata->clks[ci].clock; if (clock == NULL) continue; if (rate > 0) { ret = clk_set_rate(clock, rate); pr_debug("%s: clk %s set rate %d.", __func__, name, rate); if (ret) { pr_err("%s: clk %s set rate %d. err=%d.", __func__, name, rate, ret); goto clk_err; } } ret = clk_prepare_enable(clock); if (ret) { pr_err("%s: enable clk %s err %d.", __func__, name, ret); goto clk_err; } } for (gi = 0; gi < drv->pdata->gpios_num; gi++) { const char *name = drv->pdata->gpios[gi].name; int num = drv->pdata->gpios[gi].num; int val = drv->pdata->gpios[gi].on_val; int is_owner = drv->pdata->gpios[gi].is_owner; if (!is_owner) continue; ret = gpio_direction_output(num, val); if (ret) { pr_err("%s: set GPIO %s num %d to %d err %d.", __func__, name, num, val, ret); goto gpio_err; } } for (ri = 0; ri < drv->pdata->regs_num; ri++) { const char *name = drv->pdata->regs[ri].name; struct regulator *reg = drv->pdata->regs[ri].reg; int volt = drv->pdata->regs[ri].volt; if (reg == NULL) continue; pr_debug("%s: set regulator %s.", __func__, name); ret = regulator_set_voltage(reg, volt, volt); if (ret) { pr_err("%s: set regulator %s voltage %d err = %d.\n", __func__, name, volt, ret); goto reg_err; } ret = regulator_enable(reg); if (ret) { pr_err("%s: enable regulator %s err = %d.\n", __func__, name, ret); goto reg_err; } } drv->is_on = true; return 0; /* * If failling to set ANY clock/gpio/regulator to ON then we set * them back to OFF to avoid consuming power for unused * clocks/gpios/regulators. */ reg_err: for (i = 0; i < ri; i++) { struct regulator *reg = drv->pdata->regs[ri].reg; if (reg == NULL) continue; regulator_disable(reg); } gpio_err: for (i = 0; i < gi; i++) { int num = drv->pdata->gpios[i].num; int val = drv->pdata->gpios[i].off_val; int is_owner = drv->pdata->gpios[i].is_owner; if (!is_owner) continue; ret = gpio_direction_output(num, val); } clk_err: for (i = 0; i < ci; i++) { struct clk *clock = drv->pdata->clks[i].clock; if (clock == NULL) continue; clk_disable_unprepare(clock); } return -ENODEV; } /** * Power off request. * * Set clocks to OFF. * Set sensors chip-select GPIO to reset (off) value. * */ static int dsps_power_off_handler(void) { int ret; int i; pr_debug("%s.\n", __func__); if (!drv->is_on) { pr_debug("%s: already OFF.\n", __func__); return 0; } for (i = 0; i < drv->pdata->clks_num; i++) if (drv->pdata->clks[i].clock) { const char *name = drv->pdata->clks[i].name; pr_debug("%s: set clk %s off.", __func__, name); clk_disable_unprepare(drv->pdata->clks[i].clock); } for (i = 0; i < drv->pdata->regs_num; i++) if (drv->pdata->regs[i].reg) { const char *name = drv->pdata->regs[i].name; pr_debug("%s: set regulator %s off.", __func__, name); regulator_disable(drv->pdata->regs[i].reg); } /* Clocks on/off has reference count but GPIOs don't. */ drv->is_on = false; for (i = 0; i < drv->pdata->gpios_num; i++) { const char *name = drv->pdata->gpios[i].name; int num = drv->pdata->gpios[i].num; int val = drv->pdata->gpios[i].off_val; pr_debug("%s: set gpio %s off.", __func__, name); ret = gpio_direction_output(num, val); if (ret) { pr_err("%s: set GPIO %s err %d.", __func__, name, ret); return ret; } } return 0; } /** * IO Control - handle commands from client. * */ static long dsps_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; u32 val = 0; pr_debug("%s.\n", __func__); switch (cmd) { case DSPS_IOCTL_ON: if (!drv->pdata->dsps_pwr_ctl_en) { ret = dsps_power_on_handler(); dsps_resume(); } break; case DSPS_IOCTL_OFF: if (!drv->pdata->dsps_pwr_ctl_en) { dsps_suspend(); ret = dsps_power_off_handler(); } break; case DSPS_IOCTL_READ_SLOW_TIMER: val = dsps_read_slow_timer(); ret = put_user(val, (u32 __user *) arg); break; case DSPS_IOCTL_READ_FAST_TIMER: val = dsps_read_fast_timer(); ret = put_user(val, (u32 __user *) arg); break; case DSPS_IOCTL_RESET: pr_err("%s: User-initiated DSPS reset.\nResetting DSPS\n", __func__); subsystem_restart("dsps"); ret = 0; break; default: ret = -EINVAL; break; } return ret; } /** * allocate resources. * @pdev - pointer to platform device. */ static int dsps_alloc_resources(struct platform_device *pdev) { int ret = -ENODEV; struct resource *ppss_res; int i; pr_debug("%s.\n", __func__); if ((drv->pdata->signature != DSPS_SIGNATURE)) { pr_err("%s: invalid signature for pdata.", __func__); return -EINVAL; } ppss_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppss_reg"); if (!ppss_res) { pr_err("%s: failed to get ppss_reg resource.\n", __func__); return -EINVAL; } for (i = 0; i < drv->pdata->clks_num; i++) { const char *name = drv->pdata->clks[i].name; struct clk *clock; drv->pdata->clks[i].clock = NULL; pr_debug("%s: get clk %s.", __func__, name); clock = clk_get(drv->dev, name); if (IS_ERR(clock)) { pr_err("%s: can't get clk %s.", __func__, name); goto clk_err; } drv->pdata->clks[i].clock = clock; } for (i = 0; i < drv->pdata->gpios_num; i++) { const char *name = drv->pdata->gpios[i].name; int num = drv->pdata->gpios[i].num; drv->pdata->gpios[i].is_owner = false; pr_debug("%s: get gpio %s.", __func__, name); ret = gpio_request(num, name); if (ret) { pr_err("%s: request GPIO %s err %d.", __func__, name, ret); goto gpio_err; } drv->pdata->gpios[i].is_owner = true; } for (i = 0; i < drv->pdata->regs_num; i++) { const char *name = drv->pdata->regs[i].name; drv->pdata->regs[i].reg = NULL; pr_debug("%s: get regulator %s.", __func__, name); drv->pdata->regs[i].reg = regulator_get(drv->dev, name); if (IS_ERR(drv->pdata->regs[i].reg)) { pr_err("%s: get regulator %s failed.", __func__, name); goto reg_err; } } drv->ppss_base = ioremap(ppss_res->start, resource_size(ppss_res)); if (drv->pdata->init) drv->pdata->init(drv->pdata); return 0; reg_err: for (i = 0; i < drv->pdata->regs_num; i++) { if (drv->pdata->regs[i].reg) { regulator_put(drv->pdata->regs[i].reg); drv->pdata->regs[i].reg = NULL; } } gpio_err: for (i = 0; i < drv->pdata->gpios_num; i++) if (drv->pdata->gpios[i].is_owner) { gpio_free(drv->pdata->gpios[i].num); drv->pdata->gpios[i].is_owner = false; } clk_err: for (i = 0; i < drv->pdata->clks_num; i++) if (drv->pdata->clks[i].clock) { clk_put(drv->pdata->clks[i].clock); drv->pdata->clks[i].clock = NULL; } return ret; } /** * Open File. * */ static int dsps_open(struct inode *ip, struct file *fp) { int ret = 0; pr_debug("%s.\n", __func__); if (drv->ref_count == 0) { /* clocks must be ON before loading.*/ ret = dsps_power_on_handler(); if (ret) return ret; ret = dsps_load(); if (ret) { dsps_power_off_handler(); return ret; } if (!drv->pdata->dsps_pwr_ctl_en) dsps_resume(); } drv->ref_count++; return ret; } /** * free resources. * */ static void dsps_free_resources(void) { int i; pr_debug("%s.\n", __func__); for (i = 0; i < drv->pdata->clks_num; i++) if (drv->pdata->clks[i].clock) { clk_put(drv->pdata->clks[i].clock); drv->pdata->clks[i].clock = NULL; } for (i = 0; i < drv->pdata->gpios_num; i++) if (drv->pdata->gpios[i].is_owner) { gpio_free(drv->pdata->gpios[i].num); drv->pdata->gpios[i].is_owner = false; } for (i = 0; i < drv->pdata->regs_num; i++) { if (drv->pdata->regs[i].reg) { regulator_put(drv->pdata->regs[i].reg); drv->pdata->regs[i].reg = NULL; } } iounmap(drv->ppss_base); } /** * Close File. * * The client shall close and re-open the file for re-loading the DSPS * firmware. * The file system will close the file if the user space app has crashed. * * If the DSPS is running, then we must reset DSPS CPU & HW before * setting the clocks off. * The DSPS reset should be done as part of the subsystem_put(). * The DSPS reset should be used for error recovery if the DSPS firmware * has crashed and re-loading the firmware is required. */ static int dsps_release(struct inode *inode, struct file *file) { pr_debug("%s.\n", __func__); drv->ref_count--; if (drv->ref_count == 0) { if (!drv->pdata->dsps_pwr_ctl_en) { dsps_suspend(); dsps_unload(); dsps_power_off_handler(); } } return 0; } const struct file_operations dsps_fops = { .owner = THIS_MODULE, .open = dsps_open, .release = dsps_release, .unlocked_ioctl = dsps_ioctl, }; /** * platform driver * */ static int __devinit dsps_probe(struct platform_device *pdev) { int ret; pr_debug("%s.\n", __func__); if (pdev->dev.platform_data == NULL) { pr_err("%s: platform data is NULL.\n", __func__); return -ENODEV; } drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (drv == NULL) { pr_err("%s: kzalloc fail.\n", __func__); goto alloc_err; } drv->pdata = pdev->dev.platform_data; drv->dev_class = class_create(THIS_MODULE, DRV_NAME); if (drv->dev_class == NULL) { pr_err("%s: class_create fail.\n", __func__); goto res_err; } ret = alloc_chrdev_region(&drv->dev_num, 0, 1, DRV_NAME); if (ret) { pr_err("%s: alloc_chrdev_region fail.\n", __func__); goto alloc_chrdev_region_err; } drv->dev = device_create(drv->dev_class, NULL, drv->dev_num, drv, DRV_NAME); if (IS_ERR(drv->dev)) { pr_err("%s: device_create fail.\n", __func__); goto device_create_err; } drv->cdev = cdev_alloc(); if (drv->cdev == NULL) { pr_err("%s: cdev_alloc fail.\n", __func__); goto cdev_alloc_err; } cdev_init(drv->cdev, &dsps_fops); drv->cdev->owner = THIS_MODULE; ret = cdev_add(drv->cdev, drv->dev_num, 1); if (ret) { pr_err("%s: cdev_add fail.\n", __func__); goto cdev_add_err; } ret = dsps_alloc_resources(pdev); if (ret) { pr_err("%s: failed to allocate dsps resources.\n", __func__); goto cdev_add_err; } return 0; cdev_add_err: kfree(drv->cdev); cdev_alloc_err: device_destroy(drv->dev_class, drv->dev_num); device_create_err: unregister_chrdev_region(drv->dev_num, 1); alloc_chrdev_region_err: class_destroy(drv->dev_class); res_err: kfree(drv); drv = NULL; alloc_err: return -ENODEV; } static int __devexit dsps_remove(struct platform_device *pdev) { pr_debug("%s.\n", __func__); dsps_power_off_handler(); dsps_free_resources(); cdev_del(drv->cdev); kfree(drv->cdev); drv->cdev = NULL; device_destroy(drv->dev_class, drv->dev_num); unregister_chrdev_region(drv->dev_num, 1); class_destroy(drv->dev_class); kfree(drv); drv = NULL; return 0; } static struct platform_driver dsps_driver = { .probe = dsps_probe, .remove = __exit_p(dsps_remove), .driver = { .name = "msm_dsps", }, }; /** * Module Init. */ static int __init dsps_init(void) { int ret; pr_info("%s driver version %s.\n", DRV_NAME, DRV_VERSION); ret = platform_driver_register(&dsps_driver); if (ret) pr_err("dsps_init.err=%d.\n", ret); return ret; } /** * Module Exit. */ static void __exit dsps_exit(void) { pr_debug("%s.\n", __func__); platform_driver_unregister(&dsps_driver); } module_init(dsps_init); module_exit(dsps_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Dedicated Sensors Processor Subsystem (DSPS) driver"); MODULE_AUTHOR("Amir Samuelov <amirs@codeaurora.org>");
gpl-2.0
apasricha/KVMTrace-kernel-mod
drivers/gpu/drm/ttm/ttm_bo_manager.c
2225
4384
/************************************************************************** * * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/module.h> /** * Currently we use a spinlock for the lock, but a mutex *may* be * more appropriate to reduce scheduling latency if the range manager * ends up with very fragmented allocation patterns. */ struct ttm_range_manager { struct drm_mm mm; spinlock_t lock; }; static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; struct drm_mm_node *node = NULL; unsigned long lpfn; int ret; lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; do { ret = drm_mm_pre_get(mm); if (unlikely(ret)) return ret; spin_lock(&rman->lock); node = drm_mm_search_free_in_range(mm, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, 1); if (unlikely(node == NULL)) { spin_unlock(&rman->lock); return 0; } node = drm_mm_get_block_atomic_range(node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn); spin_unlock(&rman->lock); } while (node == NULL); mem->mm_node = node; mem->start = node->start; return 0; } static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; if (mem->mm_node) { spin_lock(&rman->lock); drm_mm_put_block(mem->mm_node); spin_unlock(&rman->lock); mem->mm_node = NULL; } } static int ttm_bo_man_init(struct ttm_mem_type_manager *man, unsigned long p_size) { struct ttm_range_manager *rman; int ret; rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; ret = drm_mm_init(&rman->mm, 0, p_size); if (ret) { kfree(rman); return ret; } spin_lock_init(&rman->lock); man->priv = rman; return 0; } static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; spin_lock(&rman->lock); if (drm_mm_clean(mm)) { drm_mm_takedown(mm); spin_unlock(&rman->lock); kfree(rman); man->priv = NULL; return 0; } spin_unlock(&rman->lock); return -EBUSY; } static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, const char *prefix) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; spin_lock(&rman->lock); drm_mm_debug_table(&rman->mm, prefix); spin_unlock(&rman->lock); } const struct ttm_mem_type_manager_func ttm_bo_manager_func = { ttm_bo_man_init, ttm_bo_man_takedown, ttm_bo_man_get_node, ttm_bo_man_put_node, ttm_bo_man_debug }; EXPORT_SYMBOL(ttm_bo_manager_func);
gpl-2.0
ench0/android_kernel_samsung_hlte
drivers/edac/i7core_edac.c
2737
65636
/* Intel i7 core/Nehalem Memory Controller kernel module * * This driver supports the memory controllers found on the Intel * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx, * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield * and Westmere-EP. * * This file may be distributed under the terms of the * GNU General Public License version 2 only. * * Copyright (c) 2009-2010 by: * Mauro Carvalho Chehab <mchehab@redhat.com> * * Red Hat Inc. http://www.redhat.com * * Forked and adapted from the i5400_edac driver * * Based on the following public Intel datasheets: * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor * Datasheet, Volume 2: * http://download.intel.com/design/processor/datashts/320835.pdf * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/edac.h> #include <linux/mmzone.h> #include <linux/smp.h> #include <asm/mce.h> #include <asm/processor.h> #include <asm/div64.h> #include "edac_core.h" /* Static vars */ static LIST_HEAD(i7core_edac_list); static DEFINE_MUTEX(i7core_edac_lock); static int probed; static int use_pci_fixup; module_param(use_pci_fixup, int, 0444); MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); /* * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core * registers start at bus 255, and are not reported by BIOS. * We currently find devices with only 2 sockets. In order to support more QPI * Quick Path Interconnect, just increment this number. */ #define MAX_SOCKET_BUSES 2 /* * Alter this version for the module when modifications are made */ #define I7CORE_REVISION " Ver: 1.0.0" #define EDAC_MOD_STR "i7core_edac" /* * Debug macros */ #define i7core_printk(level, fmt, arg...) \ edac_printk(level, "i7core", fmt, ##arg) #define i7core_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg) /* * i7core Memory Controller Registers */ /* OFFSETS for Device 0 Function 0 */ #define MC_CFG_CONTROL 0x90 #define MC_CFG_UNLOCK 0x02 #define MC_CFG_LOCK 0x00 /* OFFSETS for Device 3 Function 0 */ #define MC_CONTROL 0x48 #define MC_STATUS 0x4c #define MC_MAX_DOD 0x64 /* * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ #define MC_TEST_ERR_RCV1 0x60 #define DIMM2_COR_ERR(r) ((r) & 0x7fff) #define MC_TEST_ERR_RCV0 0x64 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM0_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */ #define MC_SSRCONTROL 0x48 #define SSR_MODE_DISABLE 0x00 #define SSR_MODE_ENABLE 0x01 #define SSR_MODE_MASK 0x03 #define MC_SCRUB_CONTROL 0x4c #define STARTSCRUB (1 << 24) #define SCRUBINTERVAL_MASK 0xffffff #define MC_COR_ECC_CNT_0 0x80 #define MC_COR_ECC_CNT_1 0x84 #define MC_COR_ECC_CNT_2 0x88 #define MC_COR_ECC_CNT_3 0x8c #define MC_COR_ECC_CNT_4 0x90 #define MC_COR_ECC_CNT_5 0x94 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff) #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff) /* OFFSETS for Devices 4,5 and 6 Function 0 */ #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58 #define THREE_DIMMS_PRESENT (1 << 24) #define SINGLE_QUAD_RANK_PRESENT (1 << 23) #define QUAD_RANK_PRESENT (1 << 22) #define REGISTERED_DIMM (1 << 15) #define MC_CHANNEL_MAPPER 0x60 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1) #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1) #define MC_CHANNEL_RANK_PRESENT 0x7c #define RANK_PRESENT_MASK 0xffff #define MC_CHANNEL_ADDR_MATCH 0xf0 #define MC_CHANNEL_ERROR_MASK 0xf8 #define MC_CHANNEL_ERROR_INJECT 0xfc #define INJECT_ADDR_PARITY 0x10 #define INJECT_ECC 0x08 #define MASK_CACHELINE 0x06 #define MASK_FULL_CACHELINE 0x06 #define MASK_MSB32_CACHELINE 0x04 #define MASK_LSB32_CACHELINE 0x02 #define NO_MASK_CACHELINE 0x00 #define REPEAT_EN 0x01 /* OFFSETS for Devices 4,5 and 6 Function 1 */ #define MC_DOD_CH_DIMM0 0x48 #define MC_DOD_CH_DIMM1 0x4c #define MC_DOD_CH_DIMM2 0x50 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10)) #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10) #define DIMM_PRESENT_MASK (1 << 9) #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9) #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7)) #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7) #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5)) #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5) #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2)) #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2) #define MC_DOD_NUMCOL_MASK 3 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK) #define MC_RANK_PRESENT 0x7c #define MC_SAG_CH_0 0x80 #define MC_SAG_CH_1 0x84 #define MC_SAG_CH_2 0x88 #define MC_SAG_CH_3 0x8c #define MC_SAG_CH_4 0x90 #define MC_SAG_CH_5 0x94 #define MC_SAG_CH_6 0x98 #define MC_SAG_CH_7 0x9c #define MC_RIR_LIMIT_CH_0 0x40 #define MC_RIR_LIMIT_CH_1 0x44 #define MC_RIR_LIMIT_CH_2 0x48 #define MC_RIR_LIMIT_CH_3 0x4C #define MC_RIR_LIMIT_CH_4 0x50 #define MC_RIR_LIMIT_CH_5 0x54 #define MC_RIR_LIMIT_CH_6 0x58 #define MC_RIR_LIMIT_CH_7 0x5C #define MC_RIR_LIMIT_MASK ((1 << 10) - 1) #define MC_RIR_WAY_CH 0x80 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7) #define MC_RIR_WAY_RANK_MASK 0x7 /* * i7core structs */ #define NUM_CHANS 3 #define MAX_DIMMS 3 /* Max DIMMS per channel */ #define MAX_MCR_FUNC 4 #define MAX_CHAN_FUNC 3 struct i7core_info { u32 mc_control; u32 mc_status; u32 max_dod; u32 ch_map; }; struct i7core_inject { int enable; u32 section; u32 type; u32 eccmask; /* Error address mask */ int channel, dimm, rank, bank, page, col; }; struct i7core_channel { u32 ranks; u32 dimms; }; struct pci_id_descr { int dev; int func; int dev_id; int optional; }; struct pci_id_table { const struct pci_id_descr *descr; int n_devs; }; struct i7core_dev { struct list_head list; u8 socket; struct pci_dev **pdev; int n_devs; struct mem_ctl_info *mci; }; struct i7core_pvt { struct pci_dev *pci_noncore; struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; struct i7core_dev *i7core_dev; struct i7core_info info; struct i7core_inject inject; struct i7core_channel channel[NUM_CHANS]; int ce_count_available; int csrow_map[NUM_CHANS][MAX_DIMMS]; /* ECC corrected errors counts per udimm */ unsigned long udimm_ce_count[MAX_DIMMS]; int udimm_last_ce_count[MAX_DIMMS]; /* ECC corrected errors counts per rdimm */ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS]; int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS]; bool is_registered, enable_scrub; /* Fifo double buffers */ struct mce mce_entry[MCE_LOG_LEN]; struct mce mce_outentry[MCE_LOG_LEN]; /* Fifo in/out counters */ unsigned mce_in, mce_out; /* Count indicator to show errors not got */ unsigned mce_overrun; /* DCLK Frequency used for computing scrub rate */ int dclk_freq; /* Struct to control EDAC polling */ struct edac_pci_ctl_info *i7core_pci; }; #define PCI_DESCR(device, function, device_id) \ .dev = (device), \ .func = (function), \ .dev_id = (device_id) static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, /* Generic Non-core registers */ /* * This is the PCI device on i7core and on Xeon 35xx (8086:2c41) * On Xeon 55xx, however, it has a different id (8086:2c40). So, * the probing code needs to test for the other address in case of * failure of this one */ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) }, }; static const struct pci_id_descr pci_dev_descr_lynnfield[] = { { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) }, { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) }, { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) }, { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) }, { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) }, { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, /* * This is the PCI device has an alternate address on some * processors like Core i7 860 */ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, }; static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, /* Exists only for RDIMM */ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 }, { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) }, /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) }, { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) }, { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) }, /* Channel 1 */ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) }, { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) }, { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) }, { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) }, /* Channel 2 */ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) }, { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, /* Generic Non-core registers */ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) }, }; #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } static const struct pci_id_table pci_dev_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), {0,} /* 0 terminated list. */ }; /* * pci_device_id table for which devices we are looking for */ static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, {0,} /* 0 terminated list. */ }; /**************************************************************************** Anciliary status routines ****************************************************************************/ /* MC_CONTROL bits */ #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch))) #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1)) /* MC_STATUS bits */ #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4)) #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch)) /* MC_MAX_DOD read functions */ static inline int numdimms(u32 dimms) { return (dimms & 0x3) + 1; } static inline int numrank(u32 rank) { static int ranks[4] = { 1, 2, 4, -EINVAL }; return ranks[rank & 0x3]; } static inline int numbank(u32 bank) { static int banks[4] = { 4, 8, 16, -EINVAL }; return banks[bank & 0x3]; } static inline int numrow(u32 row) { static int rows[8] = { 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, -EINVAL, -EINVAL, -EINVAL, }; return rows[row & 0x7]; } static inline int numcol(u32 col) { static int cols[8] = { 1 << 10, 1 << 11, 1 << 12, -EINVAL, }; return cols[col & 0x3]; } static struct i7core_dev *get_i7core_dev(u8 socket) { struct i7core_dev *i7core_dev; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { if (i7core_dev->socket == socket) return i7core_dev; } return NULL; } static struct i7core_dev *alloc_i7core_dev(u8 socket, const struct pci_id_table *table) { struct i7core_dev *i7core_dev; i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL); if (!i7core_dev) return NULL; i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs, GFP_KERNEL); if (!i7core_dev->pdev) { kfree(i7core_dev); return NULL; } i7core_dev->socket = socket; i7core_dev->n_devs = table->n_devs; list_add_tail(&i7core_dev->list, &i7core_edac_list); return i7core_dev; } static void free_i7core_dev(struct i7core_dev *i7core_dev) { list_del(&i7core_dev->list); kfree(i7core_dev->pdev); kfree(i7core_dev); } /**************************************************************************** Memory check routines ****************************************************************************/ static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot, unsigned func) { struct i7core_dev *i7core_dev = get_i7core_dev(socket); int i; if (!i7core_dev) return NULL; for (i = 0; i < i7core_dev->n_devs; i++) { if (!i7core_dev->pdev[i]) continue; if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot && PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) { return i7core_dev->pdev[i]; } } return NULL; } /** * i7core_get_active_channels() - gets the number of channels and csrows * @socket: Quick Path Interconnect socket * @channels: Number of channels that will be returned * @csrows: Number of csrows found * * Since EDAC core needs to know in advance the number of available channels * and csrows, in order to allocate memory for csrows/channels, it is needed * to run two similar steps. At the first step, implemented on this function, * it checks the number of csrows/channels present at one socket. * this is used in order to properly allocate the size of mci components. * * It should be noticed that none of the current available datasheets explain * or even mention how csrows are seen by the memory controller. So, we need * to add a fake description for csrows. * So, this driver is attributing one DIMM memory for one csrow. */ static int i7core_get_active_channels(const u8 socket, unsigned *channels, unsigned *csrows) { struct pci_dev *pdev = NULL; int i, j; u32 status, control; *channels = 0; *csrows = 0; pdev = get_pdev_slot_func(socket, 3, 0); if (!pdev) { i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n", socket); return -ENODEV; } /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_STATUS, &status); pci_read_config_dword(pdev, MC_CONTROL, &control); for (i = 0; i < NUM_CHANS; i++) { u32 dimm_dod[3]; /* Check if the channel is active */ if (!(control & (1 << (8 + i)))) continue; /* Check if the channel is disabled */ if (status & (1 << i)) continue; pdev = get_pdev_slot_func(socket, i + 4, 1); if (!pdev) { i7core_printk(KERN_ERR, "Couldn't find socket %d " "fn %d.%d!!!\n", socket, i + 4, 1); return -ENODEV; } /* Devices 4-6 function 1 */ pci_read_config_dword(pdev, MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pdev, MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pdev, MC_DOD_CH_DIMM2, &dimm_dod[2]); (*channels)++; for (j = 0; j < 3; j++) { if (!DIMM_PRESENT(dimm_dod[j])) continue; (*csrows)++; } } debugf0("Number of active channels on socket %d: %d\n", socket, *channels); return 0; } static int get_dimm_config(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct csrow_info *csr; struct pci_dev *pdev; int i, j; int csrow = 0; unsigned long last_page = 0; enum edac_type mode; enum mem_type mtype; /* Get data from the MC register, function 0 */ pdev = pvt->pci_mcr[0]; if (!pdev) return -ENODEV; /* Device 3 function 0 reads */ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control); pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status); pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map); if (ECC_ENABLED(pvt)) { debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); if (ECCx8(pvt)) mode = EDAC_S8ECD8ED; else mode = EDAC_S4ECD4ED; } else { debugf0("ECC disabled\n"); mode = EDAC_NONE; } /* FIXME: need to handle the error codes */ debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " "x%x x 0x%x\n", numdimms(pvt->info.max_dod), numrank(pvt->info.max_dod >> 2), numbank(pvt->info.max_dod >> 4), numrow(pvt->info.max_dod >> 6), numcol(pvt->info.max_dod >> 9)); for (i = 0; i < NUM_CHANS; i++) { u32 data, dimm_dod[3], value[8]; if (!pvt->pci_ch[i][0]) continue; if (!CH_ACTIVE(pvt, i)) { debugf0("Channel %i is not active\n", i); continue; } if (CH_DISABLED(pvt, i)) { debugf0("Channel %i is disabled\n", i); continue; } /* Devices 4-6 function 0 */ pci_read_config_dword(pvt->pci_ch[i][0], MC_CHANNEL_DIMM_INIT_PARAMS, &data); pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ? 4 : 2; if (data & REGISTERED_DIMM) mtype = MEM_RDDR3; else mtype = MEM_DDR3; #if 0 if (data & THREE_DIMMS_PRESENT) pvt->channel[i].dimms = 3; else if (data & SINGLE_QUAD_RANK_PRESENT) pvt->channel[i].dimms = 1; else pvt->channel[i].dimms = 2; #endif /* Devices 4-6 function 1 */ pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM0, &dimm_dod[0]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM1, &dimm_dod[1]); pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM2, &dimm_dod[2]); debugf0("Ch%d phy rd%d, wr%d (0x%08x): " "%d ranks, %cDIMMs\n", i, RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), data, pvt->channel[i].ranks, (data & REGISTERED_DIMM) ? 'R' : 'U'); for (j = 0; j < 3; j++) { u32 banks, ranks, rows, cols; u32 size, npages; if (!DIMM_PRESENT(dimm_dod[j])) continue; banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); cols = numcol(MC_DOD_NUMCOL(dimm_dod[j])); /* DDR3 has 8 I/O banks */ size = (rows * cols * banks * ranks) >> (20 - 3); pvt->channel[i].dimms++; debugf0("\tdimm %d %d Mb offset: %x, " "bank: %d, rank: %d, row: %#x, col: %#x\n", j, size, RANKOFFSET(dimm_dod[j]), banks, ranks, rows, cols); npages = MiB_TO_PAGES(size); csr = &mci->csrows[csrow]; csr->first_page = last_page + 1; last_page += npages; csr->last_page = last_page; csr->nr_pages = npages; csr->page_mask = 0; csr->grain = 8; csr->csrow_idx = csrow; csr->nr_channels = 1; csr->channels[0].chan_idx = i; csr->channels[0].ce_count = 0; pvt->csrow_map[i][j] = csrow; switch (banks) { case 4: csr->dtype = DEV_X4; break; case 8: csr->dtype = DEV_X8; break; case 16: csr->dtype = DEV_X16; break; default: csr->dtype = DEV_UNKNOWN; } csr->edac_mode = mode; csr->mtype = mtype; snprintf(csr->channels[0].label, sizeof(csr->channels[0].label), "CPU#%uChannel#%u_DIMM#%u", pvt->i7core_dev->socket, i, j); csrow++; } pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]); pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]); pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]); pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]); pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); for (j = 0; j < 8; j++) debugf1("\t\t%#x\t%#x\t%#x\n", (value[j] >> 27) & 0x1, (value[j] >> 24) & 0x7, (value[j] & ((1 << 24) - 1))); } return 0; } /**************************************************************************** Error insertion routines ****************************************************************************/ /* The i7core has independent error injection features per channel. However, to have a simpler code, we don't allow enabling error injection on more than one channel. Also, since a change at an inject parameter will be applied only at enable, we're disabling error injection on all write calls to the sysfs nodes that controls the error code injection. */ static int disable_inject(const struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; pvt->inject.enable = 0; if (!pvt->pci_ch[pvt->inject.channel][0]) return -ENODEV; pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, 0); return 0; } /* * i7core inject inject.section * * accept and store error injection inject.section value * bit 0 - refers to the lower 32-byte half cacheline * bit 1 - refers to the upper 32-byte half cacheline */ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if ((rc < 0) || (value > 3)) return -EIO; pvt->inject.section = (u32) value; return count; } static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.section); } /* * i7core inject.type * * accept and store error injection inject.section value * bit 0 - repeat enable - Enable error repetition * bit 1 - inject ECC error * bit 2 - inject parity error */ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if ((rc < 0) || (value > 7)) return -EIO; pvt->inject.type = (u32) value; return count; } static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.type); } /* * i7core_inject_inject.eccmask_store * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; if (pvt->inject.enable) disable_inject(mci); rc = strict_strtoul(data, 10, &value); if (rc < 0) return -EIO; pvt->inject.eccmask = (u32) value; return count; } static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.eccmask); } /* * i7core_addrmatch * * The type of error (UE/CE) will depend on the inject.eccmask value: * Any bits set to a 1 will flip the corresponding ECC bit * Correctable errors can be injected by flipping 1 bit or the bits within * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ #define DECLARE_ADDR_MATCH(param, limit) \ static ssize_t i7core_inject_store_##param( \ struct mem_ctl_info *mci, \ const char *data, size_t count) \ { \ struct i7core_pvt *pvt; \ long value; \ int rc; \ \ debugf1("%s()\n", __func__); \ pvt = mci->pvt_info; \ \ if (pvt->inject.enable) \ disable_inject(mci); \ \ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\ value = -1; \ else { \ rc = strict_strtoul(data, 10, &value); \ if ((rc < 0) || (value >= limit)) \ return -EIO; \ } \ \ pvt->inject.param = value; \ \ return count; \ } \ \ static ssize_t i7core_inject_show_##param( \ struct mem_ctl_info *mci, \ char *data) \ { \ struct i7core_pvt *pvt; \ \ pvt = mci->pvt_info; \ debugf1("%s() pvt=%p\n", __func__, pvt); \ if (pvt->inject.param < 0) \ return sprintf(data, "any\n"); \ else \ return sprintf(data, "%d\n", pvt->inject.param);\ } #define ATTR_ADDR_MATCH(param) \ { \ .attr = { \ .name = #param, \ .mode = (S_IRUGO | S_IWUSR) \ }, \ .show = i7core_inject_show_##param, \ .store = i7core_inject_store_##param, \ } DECLARE_ADDR_MATCH(channel, 3); DECLARE_ADDR_MATCH(dimm, 3); DECLARE_ADDR_MATCH(rank, 4); DECLARE_ADDR_MATCH(bank, 32); DECLARE_ADDR_MATCH(page, 0x10000); DECLARE_ADDR_MATCH(col, 0x4000); static int write_and_test(struct pci_dev *dev, const int where, const u32 val) { u32 read; int count; debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val); for (count = 0; count < 10; count++) { if (count) msleep(100); pci_write_config_dword(dev, where, val); pci_read_config_dword(dev, where, &read); if (read == val) return 0; } i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x " "write=%08x. Read=%08x\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), where, val, read); return -EINVAL; } /* * This routine prepares the Memory Controller for error injection. * The error will be injected when some process tries to write to the * memory that matches the given criteria. * The criteria can be set in terms of a mask where dimm, rank, bank, page * and col can be specified. * A -1 value for any of the mask items will make the MCU to ignore * that matching criteria for error injection. * * It should be noticed that the error will only happen after a write operation * on a memory that matches the condition. if REPEAT_EN is not enabled at * inject mask, then it will produce just one error. Otherwise, it will repeat * until the injectmask would be cleaned. * * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD * is reliable enough to check if the MC is using the * three channels. However, this is not clear at the datasheet. */ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; u64 mask = 0; int rc; long enable; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; rc = strict_strtoul(data, 10, &enable); if ((rc < 0)) return 0; if (enable) { pvt->inject.enable = 1; } else { disable_inject(mci); return count; } /* Sets pvt->inject.dimm mask */ if (pvt->inject.dimm < 0) mask |= 1LL << 41; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.dimm & 0x3LL) << 35; else mask |= (pvt->inject.dimm & 0x1LL) << 36; } /* Sets pvt->inject.rank mask */ if (pvt->inject.rank < 0) mask |= 1LL << 40; else { if (pvt->channel[pvt->inject.channel].dimms > 2) mask |= (pvt->inject.rank & 0x1LL) << 34; else mask |= (pvt->inject.rank & 0x3LL) << 34; } /* Sets pvt->inject.bank mask */ if (pvt->inject.bank < 0) mask |= 1LL << 39; else mask |= (pvt->inject.bank & 0x15LL) << 30; /* Sets pvt->inject.page mask */ if (pvt->inject.page < 0) mask |= 1LL << 38; else mask |= (pvt->inject.page & 0xffff) << 14; /* Sets pvt->inject.column mask */ if (pvt->inject.col < 0) mask |= 1LL << 37; else mask |= (pvt->inject.col & 0x3fff); /* * bit 0: REPEAT_EN * bits 1-2: MASK_HALF_CACHELINE * bit 3: INJECT_ECC * bit 4: INJECT_ADDR_PARITY */ injectmask = (pvt->inject.type & 1) | (pvt->inject.section & 0x3) << 1 | (pvt->inject.type & 0x6) << (3 - 1); /* Unlock writes to registers - this register is write only */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0x2); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH, mask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask); write_and_test(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, injectmask); /* * This is something undocumented, based on my tests * Without writing 8 to this register, errors aren't injected. Not sure * why. */ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 8); debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," " inject 0x%08x\n", mask, pvt->inject.eccmask, injectmask); return count; } static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, char *data) { struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; if (!pvt->pci_ch[pvt->inject.channel][0]) return 0; pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, &injectmask); debugf0("Inject error read: 0x%018x\n", injectmask); if (injectmask & 0x0c) pvt->inject.enable = 1; return sprintf(data, "%d\n", pvt->inject.enable); } #define DECLARE_COUNTER(param) \ static ssize_t i7core_show_counter_##param( \ struct mem_ctl_info *mci, \ char *data) \ { \ struct i7core_pvt *pvt = mci->pvt_info; \ \ debugf1("%s() \n", __func__); \ if (!pvt->ce_count_available || (pvt->is_registered)) \ return sprintf(data, "data unavailable\n"); \ return sprintf(data, "%lu\n", \ pvt->udimm_ce_count[param]); \ } #define ATTR_COUNTER(param) \ { \ .attr = { \ .name = __stringify(udimm##param), \ .mode = (S_IRUGO | S_IWUSR) \ }, \ .show = i7core_show_counter_##param \ } DECLARE_COUNTER(0); DECLARE_COUNTER(1); DECLARE_COUNTER(2); /* * Sysfs struct */ static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { ATTR_ADDR_MATCH(channel), ATTR_ADDR_MATCH(dimm), ATTR_ADDR_MATCH(rank), ATTR_ADDR_MATCH(bank), ATTR_ADDR_MATCH(page), ATTR_ADDR_MATCH(col), { } /* End of list */ }; static const struct mcidev_sysfs_group i7core_inject_addrmatch = { .name = "inject_addrmatch", .mcidev_attr = i7core_addrmatch_attrs, }; static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { ATTR_COUNTER(0), ATTR_COUNTER(1), ATTR_COUNTER(2), { .attr = { .name = NULL } } }; static const struct mcidev_sysfs_group i7core_udimm_counters = { .name = "all_channel_counts", .mcidev_attr = i7core_udimm_counters_attrs, }; static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_section_show, .store = i7core_inject_section_store, }, { .attr = { .name = "inject_type", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_type_show, .store = i7core_inject_type_store, }, { .attr = { .name = "inject_eccmask", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_eccmask_show, .store = i7core_inject_eccmask_store, }, { .grp = &i7core_inject_addrmatch, }, { .attr = { .name = "inject_enable", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_enable_show, .store = i7core_inject_enable_store, }, { } /* End of list */ }; static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { { .attr = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_section_show, .store = i7core_inject_section_store, }, { .attr = { .name = "inject_type", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_type_show, .store = i7core_inject_type_store, }, { .attr = { .name = "inject_eccmask", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_eccmask_show, .store = i7core_inject_eccmask_store, }, { .grp = &i7core_inject_addrmatch, }, { .attr = { .name = "inject_enable", .mode = (S_IRUGO | S_IWUSR) }, .show = i7core_inject_enable_show, .store = i7core_inject_enable_store, }, { .grp = &i7core_udimm_counters, }, { } /* End of list */ }; /**************************************************************************** Device initialization routines: put/get, init/exit ****************************************************************************/ /* * i7core_put_all_devices 'put' all the devices that we have * reserved via 'get' */ static void i7core_put_devices(struct i7core_dev *i7core_dev) { int i; debugf0(__FILE__ ": %s()\n", __func__); for (i = 0; i < i7core_dev->n_devs; i++) { struct pci_dev *pdev = i7core_dev->pdev[i]; if (!pdev) continue; debugf0("Removing dev %02x:%02x.%d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pci_dev_put(pdev); } } static void i7core_put_all_devices(void) { struct i7core_dev *i7core_dev, *tmp; list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) { i7core_put_devices(i7core_dev); free_i7core_dev(i7core_dev); } } static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) { struct pci_dev *pdev = NULL; int i; /* * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses * aren't announced by acpi. So, we need to use a legacy scan probing * to detect them */ while (table && table->descr) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL); if (unlikely(!pdev)) { for (i = 0; i < MAX_SOCKET_BUSES; i++) pcibios_scan_specific_bus(255-i); } pci_dev_put(pdev); table++; } } static unsigned i7core_pci_lastbus(void) { int last_bus = 0, bus; struct pci_bus *b = NULL; while ((b = pci_find_next_bus(b)) != NULL) { bus = b->number; debugf0("Found bus %d\n", bus); if (bus > last_bus) last_bus = bus; } debugf0("Last bus %d\n", last_bus); return last_bus; } /* * i7core_get_all_devices Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * * Need to 'get' device 16 func 1 and func 2 */ static int i7core_get_onedevice(struct pci_dev **prev, const struct pci_id_table *table, const unsigned devno, const unsigned last_bus) { struct i7core_dev *i7core_dev; const struct pci_id_descr *dev_descr = &table->descr[devno]; struct pci_dev *pdev = NULL; u8 bus = 0; u8 socket = 0; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_descr->dev_id, *prev); /* * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs * is at addr 8086:2c40, instead of 8086:2c41. So, we need * to probe for the alternate address in case of failure */ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, *prev); if (!pdev) { if (*prev) { *prev = pdev; return 0; } if (dev_descr->optional) return 0; if (devno == 0) return -ENODEV; i7core_printk(KERN_INFO, "Device not found: dev %02x.%d PCI ID %04x:%04x\n", dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* End of list, leave */ return -ENODEV; } bus = pdev->bus->number; socket = last_bus - bus; i7core_dev = get_i7core_dev(socket); if (!i7core_dev) { i7core_dev = alloc_i7core_dev(socket, table); if (!i7core_dev) { pci_dev_put(pdev); return -ENOMEM; } } if (i7core_dev->pdev[devno]) { i7core_printk(KERN_ERR, "Duplicated device for " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); pci_dev_put(pdev); return -ENODEV; } i7core_dev->pdev[devno] = pdev; /* Sanity check */ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || PCI_FUNC(pdev->devfn) != dev_descr->func)) { i7core_printk(KERN_ERR, "Device PCI ID %04x:%04x " "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id, bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), bus, dev_descr->dev, dev_descr->func); return -ENODEV; } /* Be sure that the device is enabled */ if (unlikely(pci_enable_device(pdev) < 0)) { i7core_printk(KERN_ERR, "Couldn't enable " "dev %02x:%02x.%d PCI ID %04x:%04x\n", bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); return -ENODEV; } debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", socket, bus, dev_descr->dev, dev_descr->func, PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* * As stated on drivers/pci/search.c, the reference count for * @from is always decremented if it is not %NULL. So, as we need * to get all devices up to null, we need to do a get for the device */ pci_dev_get(pdev); *prev = pdev; return 0; } static int i7core_get_all_devices(void) { int i, rc, last_bus; struct pci_dev *pdev = NULL; const struct pci_id_table *table = pci_dev_table; last_bus = i7core_pci_lastbus(); while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { pdev = NULL; do { rc = i7core_get_onedevice(&pdev, table, i, last_bus); if (rc < 0) { if (i == 0) { i = table->n_devs; break; } i7core_put_all_devices(); return -ENODEV; } } while (pdev); } table++; } return 0; } static int mci_bind_devs(struct mem_ctl_info *mci, struct i7core_dev *i7core_dev) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; int i, func, slot; char *family; pvt->is_registered = false; pvt->enable_scrub = false; for (i = 0; i < i7core_dev->n_devs; i++) { pdev = i7core_dev->pdev[i]; if (!pdev) continue; func = PCI_FUNC(pdev->devfn); slot = PCI_SLOT(pdev->devfn); if (slot == 3) { if (unlikely(func > MAX_MCR_FUNC)) goto error; pvt->pci_mcr[func] = pdev; } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) { if (unlikely(func > MAX_CHAN_FUNC)) goto error; pvt->pci_ch[slot - 4][func] = pdev; } else if (!slot && !func) { pvt->pci_noncore = pdev; /* Detect the processor family */ switch (pdev->device) { case PCI_DEVICE_ID_INTEL_I7_NONCORE: family = "Xeon 35xx/ i7core"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT: family = "i7-800/i5-700"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE: family = "Xeon 34xx"; pvt->enable_scrub = false; break; case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT: family = "Xeon 55xx"; pvt->enable_scrub = true; break; case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2: family = "Xeon 56xx / i7-900"; pvt->enable_scrub = true; break; default: family = "unknown"; pvt->enable_scrub = false; } debugf0("Detected a processor type %s\n", family); } else goto error; debugf0("Associated fn %d.%d, dev = %p, socket %d\n", PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev, i7core_dev->socket); if (PCI_SLOT(pdev->devfn) == 3 && PCI_FUNC(pdev->devfn) == 2) pvt->is_registered = true; } return 0; error: i7core_printk(KERN_ERR, "Device %d, function %d " "is out of the expected range\n", slot, func); return -EINVAL; } /**************************************************************************** Error check routines ****************************************************************************/ static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, const int chan, const int dimm, const int add) { char *msg; struct i7core_pvt *pvt = mci->pvt_info; int row = pvt->csrow_map[chan][dimm], i; for (i = 0; i < add; i++) { msg = kasprintf(GFP_KERNEL, "Corrected error " "(Socket=%d channel=%d dimm=%d)", pvt->i7core_dev->socket, chan, dimm); edac_mc_handle_fbd_ce(mci, row, 0, msg); kfree (msg); } } static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, const int chan, const int new0, const int new1, const int new2) { struct i7core_pvt *pvt = mci->pvt_info; int add0 = 0, add1 = 0, add2 = 0; /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ add2 = new2 - pvt->rdimm_last_ce_count[chan][2]; add1 = new1 - pvt->rdimm_last_ce_count[chan][1]; add0 = new0 - pvt->rdimm_last_ce_count[chan][0]; if (add2 < 0) add2 += 0x7fff; pvt->rdimm_ce_count[chan][2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->rdimm_ce_count[chan][1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->rdimm_ce_count[chan][0] += add0; } else pvt->ce_count_available = 1; /* Store the new values */ pvt->rdimm_last_ce_count[chan][2] = new2; pvt->rdimm_last_ce_count[chan][1] = new1; pvt->rdimm_last_ce_count[chan][0] = new0; /*updated the edac core */ if (add0 != 0) i7core_rdimm_update_csrow(mci, chan, 0, add0); if (add1 != 0) i7core_rdimm_update_csrow(mci, chan, 1, add1); if (add2 != 0) i7core_rdimm_update_csrow(mci, chan, 2, add2); } static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv[3][2]; int i, new0, new1, new2; /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0, &rcv[0][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1, &rcv[0][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2, &rcv[1][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3, &rcv[1][1]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4, &rcv[2][0]); pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, &rcv[2][1]); for (i = 0 ; i < 3; i++) { debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); /*if the channel has 3 dimms*/ if (pvt->channel[i].dimms > 2) { new0 = DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][0]); new2 = DIMM_BOT_COR_ERR(rcv[i][1]); } else { new0 = DIMM_TOP_COR_ERR(rcv[i][0]) + DIMM_BOT_COR_ERR(rcv[i][0]); new1 = DIMM_TOP_COR_ERR(rcv[i][1]) + DIMM_BOT_COR_ERR(rcv[i][1]); new2 = 0; } i7core_rdimm_update_ce_count(mci, i, new0, new1, new2); } } /* This function is based on the device 3 function 4 registers as described on: * Intel Xeon Processor 5500 Series Datasheet Volume 2 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf * also available at: * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf */ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 rcv1, rcv0; int new0, new1, new2; if (!pvt->pci_mcr[4]) { debugf0("%s MCR registers not found\n", __func__); return; } /* Corrected test errors */ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1); pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0); /* Store the new values */ new2 = DIMM2_COR_ERR(rcv1); new1 = DIMM1_COR_ERR(rcv0); new0 = DIMM0_COR_ERR(rcv0); /* Updates CE counters if it is not the first time here */ if (pvt->ce_count_available) { /* Updates CE counters */ int add0, add1, add2; add2 = new2 - pvt->udimm_last_ce_count[2]; add1 = new1 - pvt->udimm_last_ce_count[1]; add0 = new0 - pvt->udimm_last_ce_count[0]; if (add2 < 0) add2 += 0x7fff; pvt->udimm_ce_count[2] += add2; if (add1 < 0) add1 += 0x7fff; pvt->udimm_ce_count[1] += add1; if (add0 < 0) add0 += 0x7fff; pvt->udimm_ce_count[0] += add0; if (add0 | add1 | add2) i7core_printk(KERN_ERR, "New Corrected error(s): " "dimm0: +%d, dimm1: +%d, dimm2 +%d\n", add0, add1, add2); } else pvt->ce_count_available = 1; /* Store the new values */ pvt->udimm_last_ce_count[2] = new2; pvt->udimm_last_ce_count[1] = new1; pvt->udimm_last_ce_count[0] = new0; } /* * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32 * Architectures Software Developer’s Manual Volume 3B. * Nehalem are defined as family 0x06, model 0x1a * * The MCA registers used here are the following ones: * struct mce field MCA Register * m->status MSR_IA32_MC8_STATUS * m->addr MSR_IA32_MC8_ADDR * m->misc MSR_IA32_MC8_MISC * In the case of Nehalem, the error information is masked at .status and .misc * fields */ static void i7core_mce_output_error(struct mem_ctl_info *mci, const struct mce *m) { struct i7core_pvt *pvt = mci->pvt_info; char *type, *optype, *err, *msg; unsigned long error = m->status & 0x1ff0000l; u32 optypenum = (m->status >> 4) & 0x07; u32 core_err_cnt = (m->status >> 38) & 0x7fff; u32 dimm = (m->misc >> 16) & 0x3; u32 channel = (m->misc >> 18) & 0x3; u32 syndrome = m->misc >> 32; u32 errnum = find_first_bit(&error, 32); int csrow; if (m->mcgstatus & 1) type = "FATAL"; else type = "NON_FATAL"; switch (optypenum) { case 0: optype = "generic undef request"; break; case 1: optype = "read error"; break; case 2: optype = "write error"; break; case 3: optype = "addr/cmd error"; break; case 4: optype = "scrubbing error"; break; default: optype = "reserved"; break; } switch (errnum) { case 16: err = "read ECC error"; break; case 17: err = "RAS ECC error"; break; case 18: err = "write parity error"; break; case 19: err = "redundacy loss"; break; case 20: err = "reserved"; break; case 21: err = "memory range error"; break; case 22: err = "RTID out of range"; break; case 23: err = "address parity error"; break; case 24: err = "byte enable parity error"; break; default: err = "unknown"; } /* FIXME: should convert addr into bank and rank information */ msg = kasprintf(GFP_ATOMIC, "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, " "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n", type, (long long) m->addr, m->cpu, dimm, channel, syndrome, core_err_cnt, (long long)m->status, (long long)m->misc, optype, err); debugf0("%s", msg); csrow = pvt->csrow_map[channel][dimm]; /* Call the helper to output message */ if (m->mcgstatus & 1) edac_mc_handle_fbd_ue(mci, csrow, 0, 0 /* FIXME: should be channel here */, msg); else if (!pvt->is_registered) edac_mc_handle_fbd_ce(mci, csrow, 0 /* FIXME: should be channel here */, msg); kfree(msg); } /* * i7core_check_error Retrieve and process errors reported by the * hardware. Called by the Core module. */ static void i7core_check_error(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; int i; unsigned count = 0; struct mce *m; /* * MCE first step: Copy all mce errors into a temporary buffer * We use a double buffering here, to reduce the risk of * losing an error. */ smp_rmb(); count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) % MCE_LOG_LEN; if (!count) goto check_ce_error; m = pvt->mce_outentry; if (pvt->mce_in + count > MCE_LOG_LEN) { unsigned l = MCE_LOG_LEN - pvt->mce_in; memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); smp_wmb(); pvt->mce_in = 0; count -= l; m += l; } memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); smp_wmb(); pvt->mce_in += count; smp_rmb(); if (pvt->mce_overrun) { i7core_printk(KERN_ERR, "Lost %d memory errors\n", pvt->mce_overrun); smp_wmb(); pvt->mce_overrun = 0; } /* * MCE second step: parse errors and display */ for (i = 0; i < count; i++) i7core_mce_output_error(mci, &pvt->mce_outentry[i]); /* * Now, let's increment CE error counts */ check_ce_error: if (!pvt->is_registered) i7core_udimm_check_mc_ecc_err(mci); else i7core_rdimm_check_mc_ecc_err(mci); } /* * i7core_mce_check_error Replicates mcelog routine to get errors * This routine simply queues mcelog errors, and * return. The error itself should be handled later * by i7core_check_error. * WARNING: As this routine should be called at NMI time, extra care should * be taken to avoid deadlocks, and to be as fast as possible. */ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) { struct mce *mce = (struct mce *)data; struct i7core_dev *i7_dev; struct mem_ctl_info *mci; struct i7core_pvt *pvt; i7_dev = get_i7core_dev(mce->socketid); if (!i7_dev) return NOTIFY_BAD; mci = i7_dev->mci; pvt = mci->pvt_info; /* * Just let mcelog handle it if the error is * outside the memory controller */ if (((mce->status & 0xffff) >> 7) != 1) return NOTIFY_DONE; /* Bank 8 registers are the only ones that we know how to handle */ if (mce->bank != 8) return NOTIFY_DONE; #ifdef CONFIG_SMP /* Only handle if it is the right mc controller */ if (mce->socketid != pvt->i7core_dev->socket) return NOTIFY_DONE; #endif smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { smp_wmb(); pvt->mce_overrun++; return NOTIFY_DONE; } /* Copy memory error at the ringbuffer */ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); smp_wmb(); pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; /* Handle fatal errors immediately */ if (mce->mcgstatus & 1) i7core_check_error(mci); /* Advise mcelog that the errors were handled */ return NOTIFY_STOP; } static struct notifier_block i7_mce_dec = { .notifier_call = i7core_mce_check_error, }; struct memdev_dmi_entry { u8 type; u8 length; u16 handle; u16 phys_mem_array_handle; u16 mem_err_info_handle; u16 total_width; u16 data_width; u16 size; u8 form; u8 device_set; u8 device_locator; u8 bank_locator; u8 memory_type; u16 type_detail; u16 speed; u8 manufacturer; u8 serial_number; u8 asset_tag; u8 part_number; u8 attributes; u32 extended_size; u16 conf_mem_clk_speed; } __attribute__((__packed__)); /* * Decode the DRAM Clock Frequency, be paranoid, make sure that all * memory devices show the same speed, and if they don't then consider * all speeds to be invalid. */ static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq) { int *dclk_freq = _dclk_freq; u16 dmi_mem_clk_speed; if (*dclk_freq == -1) return; if (dh->type == DMI_ENTRY_MEM_DEVICE) { struct memdev_dmi_entry *memdev_dmi_entry = (struct memdev_dmi_entry *)dh; unsigned long conf_mem_clk_speed_offset = (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed - (unsigned long)&memdev_dmi_entry->type; unsigned long speed_offset = (unsigned long)&memdev_dmi_entry->speed - (unsigned long)&memdev_dmi_entry->type; /* Check that a DIMM is present */ if (memdev_dmi_entry->size == 0) return; /* * Pick the configured speed if it's available, otherwise * pick the DIMM speed, or we don't have a speed. */ if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) { dmi_mem_clk_speed = memdev_dmi_entry->conf_mem_clk_speed; } else if (memdev_dmi_entry->length > speed_offset) { dmi_mem_clk_speed = memdev_dmi_entry->speed; } else { *dclk_freq = -1; return; } if (*dclk_freq == 0) { /* First pass, speed was 0 */ if (dmi_mem_clk_speed > 0) { /* Set speed if a valid speed is read */ *dclk_freq = dmi_mem_clk_speed; } else { /* Otherwise we don't have a valid speed */ *dclk_freq = -1; } } else if (*dclk_freq > 0 && *dclk_freq != dmi_mem_clk_speed) { /* * If we have a speed, check that all DIMMS are the same * speed, otherwise set the speed as invalid. */ *dclk_freq = -1; } } } /* * The default DCLK frequency is used as a fallback if we * fail to find anything reliable in the DMI. The value * is taken straight from the datasheet. */ #define DEFAULT_DCLK_FREQ 800 static int get_dclk_freq(void) { int dclk_freq = 0; dmi_walk(decode_dclk, (void *)&dclk_freq); if (dclk_freq < 1) return DEFAULT_DCLK_FREQ; return dclk_freq; } /* * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate * to hardware according to SCRUBINTERVAL formula * found in datasheet. */ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; u32 dw_scrub; u32 dw_ssr; /* Get data from the MC register, function 2 */ pdev = pvt->pci_mcr[2]; if (!pdev) return -ENODEV; pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub); if (new_bw == 0) { /* Prepare to disable petrol scrub */ dw_scrub &= ~STARTSCRUB; /* Stop the patrol scrub engine */ write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~SCRUBINTERVAL_MASK); /* Get current status of scrub rate and set bit to disable */ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); dw_ssr &= ~SSR_MODE_MASK; dw_ssr |= SSR_MODE_DISABLE; } else { const int cache_line_size = 64; const u32 freq_dclk_mhz = pvt->dclk_freq; unsigned long long scrub_interval; /* * Translate the desired scrub rate to a register value and * program the corresponding register value. */ scrub_interval = (unsigned long long)freq_dclk_mhz * cache_line_size * 1000000; do_div(scrub_interval, new_bw); if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK) return -EINVAL; dw_scrub = SCRUBINTERVAL_MASK & scrub_interval; /* Start the patrol scrub engine */ pci_write_config_dword(pdev, MC_SCRUB_CONTROL, STARTSCRUB | dw_scrub); /* Get current status of scrub rate and set bit to enable */ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr); dw_ssr &= ~SSR_MODE_MASK; dw_ssr |= SSR_MODE_ENABLE; } /* Disable or enable scrubbing */ pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr); return new_bw; } /* * get_sdram_scrub_rate This routine convert current scrub rate value * into byte/sec bandwidth accourding to * SCRUBINTERVAL formula found in datasheet. */ static int get_sdram_scrub_rate(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; const u32 cache_line_size = 64; const u32 freq_dclk_mhz = pvt->dclk_freq; unsigned long long scrub_rate; u32 scrubval; /* Get data from the MC register, function 2 */ pdev = pvt->pci_mcr[2]; if (!pdev) return -ENODEV; /* Get current scrub control data */ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval); /* Mask highest 8-bits to 0 */ scrubval &= SCRUBINTERVAL_MASK; if (!scrubval) return 0; /* Calculate scrub rate value into byte/sec bandwidth */ scrub_rate = (unsigned long long)freq_dclk_mhz * 1000000 * cache_line_size; do_div(scrub_rate, scrubval); return (int)scrub_rate; } static void enable_sdram_scrub_setting(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 pci_lock; /* Unlock writes to pci registers */ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); pci_lock &= ~0x3; pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, pci_lock | MC_CFG_UNLOCK); mci->set_sdram_scrub_rate = set_sdram_scrub_rate; mci->get_sdram_scrub_rate = get_sdram_scrub_rate; } static void disable_sdram_scrub_setting(struct mem_ctl_info *mci) { struct i7core_pvt *pvt = mci->pvt_info; u32 pci_lock; /* Lock writes to pci registers */ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock); pci_lock &= ~0x3; pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, pci_lock | MC_CFG_LOCK); } static void i7core_pci_ctl_create(struct i7core_pvt *pvt) { pvt->i7core_pci = edac_pci_create_generic_ctl( &pvt->i7core_dev->pdev[0]->dev, EDAC_MOD_STR); if (unlikely(!pvt->i7core_pci)) i7core_printk(KERN_WARNING, "Unable to setup PCI error report via EDAC\n"); } static void i7core_pci_ctl_release(struct i7core_pvt *pvt) { if (likely(pvt->i7core_pci)) edac_pci_release_generic_ctl(pvt->i7core_pci); else i7core_printk(KERN_ERR, "Couldn't find mem_ctl_info for socket %d\n", pvt->i7core_dev->socket); pvt->i7core_pci = NULL; } static void i7core_unregister_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci = i7core_dev->mci; struct i7core_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { debugf0("MC: " __FILE__ ": %s(): dev = %p\n", __func__, &i7core_dev->pdev[0]->dev); i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); return; } pvt = mci->pvt_info; debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &i7core_dev->pdev[0]->dev); /* Disable scrubrate setting */ if (pvt->enable_scrub) disable_sdram_scrub_setting(mci); mce_unregister_decode_chain(&i7_mce_dec); /* Disable EDAC polling */ i7core_pci_ctl_release(pvt); /* Remove MC sysfs nodes */ edac_mc_del_mc(mci->dev); debugf1("%s: free mci struct\n", mci->ctl_name); kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; } static int i7core_register_mci(struct i7core_dev *i7core_dev) { struct mem_ctl_info *mci; struct i7core_pvt *pvt; int rc, channels, csrows; /* Check the number of active and not disabled channels */ rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows); if (unlikely(rc < 0)) return rc; /* allocate a new MC control structure */ mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); if (unlikely(!mci)) return -ENOMEM; debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", __func__, mci, &i7core_dev->pdev[0]->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); /* Associates i7core_dev and mci for future usage */ pvt->i7core_dev = i7core_dev; i7core_dev->mci = mci; /* * FIXME: how to handle RDDR3 at MCI level? It is possible to have * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different * memory channels */ mci->mtype_cap = MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i7core_edac.c"; mci->mod_ver = I7CORE_REVISION; mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket); mci->dev_name = pci_name(i7core_dev->pdev[0]); mci->ctl_page_to_phys = NULL; /* Store pci devices at mci for faster access */ rc = mci_bind_devs(mci, i7core_dev); if (unlikely(rc < 0)) goto fail0; if (pvt->is_registered) mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs; else mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs; /* Get dimm basic config */ get_dimm_config(mci); /* record ptr to the generic device */ mci->dev = &i7core_dev->pdev[0]->dev; /* Set the function pointer to an actual operation function */ mci->edac_check = i7core_check_error; /* Enable scrubrate setting */ if (pvt->enable_scrub) enable_sdram_scrub_setting(mci); /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { debugf0("MC: " __FILE__ ": %s(): failed edac_mc_add_mc()\n", __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ rc = -EINVAL; goto fail0; } /* Default error mask is any memory */ pvt->inject.channel = 0; pvt->inject.dimm = -1; pvt->inject.rank = -1; pvt->inject.bank = -1; pvt->inject.page = -1; pvt->inject.col = -1; /* allocating generic PCI control info */ i7core_pci_ctl_create(pvt); /* DCLK for scrub rate setting */ pvt->dclk_freq = get_dclk_freq(); mce_register_decode_chain(&i7_mce_dec); return 0; fail0: kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; return rc; } /* * i7core_probe Probe for ONE instance of device to see if it is * present. * return: * 0 for FOUND a device * < 0 for error code */ static int __devinit i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int rc, count = 0; struct i7core_dev *i7core_dev; /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); /* * All memory controllers are allocated at the first pass. */ if (unlikely(probed >= 1)) { mutex_unlock(&i7core_edac_lock); return -ENODEV; } probed++; rc = i7core_get_all_devices(); if (unlikely(rc < 0)) goto fail0; list_for_each_entry(i7core_dev, &i7core_edac_list, list) { count++; rc = i7core_register_mci(i7core_dev); if (unlikely(rc < 0)) goto fail1; } /* * Nehalem-EX uses a different memory controller. However, as the * memory controller is not visible on some Nehalem/Nehalem-EP, we * need to indirectly probe via a X58 PCI device. The same devices * are found on (some) Nehalem-EX. So, on those machines, the * probe routine needs to return -ENODEV, as the actual Memory * Controller registers won't be detected. */ if (!count) { rc = -ENODEV; goto fail1; } i7core_printk(KERN_INFO, "Driver loaded, %d memory controller(s) found.\n", count); mutex_unlock(&i7core_edac_lock); return 0; fail1: list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); i7core_put_all_devices(); fail0: mutex_unlock(&i7core_edac_lock); return rc; } /* * i7core_remove destructor for one instance of device * */ static void __devexit i7core_remove(struct pci_dev *pdev) { struct i7core_dev *i7core_dev; debugf0(__FILE__ ": %s()\n", __func__); /* * we have a trouble here: pdev value for removal will be wrong, since * it will point to the X58 register used to detect that the machine * is a Nehalem or upper design. However, due to the way several PCI * devices are grouped together to provide MC functionality, we need * to use a different method for releasing the devices */ mutex_lock(&i7core_edac_lock); if (unlikely(!probed)) { mutex_unlock(&i7core_edac_lock); return; } list_for_each_entry(i7core_dev, &i7core_edac_list, list) i7core_unregister_mci(i7core_dev); /* Release PCI resources */ i7core_put_all_devices(); probed--; mutex_unlock(&i7core_edac_lock); } MODULE_DEVICE_TABLE(pci, i7core_pci_tbl); /* * i7core_driver pci_driver structure for this module * */ static struct pci_driver i7core_driver = { .name = "i7core_edac", .probe = i7core_probe, .remove = __devexit_p(i7core_remove), .id_table = i7core_pci_tbl, }; /* * i7core_init Module entry function * Try to initialize this module for its devices */ static int __init i7core_init(void) { int pci_rc; debugf2("MC: " __FILE__ ": %s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); if (use_pci_fixup) i7core_xeon_pci_fixup(pci_dev_table); pci_rc = pci_register_driver(&i7core_driver); if (pci_rc >= 0) return 0; i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", pci_rc); return pci_rc; } /* * i7core_exit() Module exit function * Unregister the driver */ static void __exit i7core_exit(void) { debugf2("MC: " __FILE__ ": %s()\n", __func__); pci_unregister_driver(&i7core_driver); } module_init(i7core_init); module_exit(i7core_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - " I7CORE_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
saeedhadi/linux-at91
arch/arm/mach-imx/devices/platform-mx2-emma.c
2737
1090
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" #define imx_mx2_emmaprp_data_entry_single(soc) \ { \ .iobase = soc ## _EMMAPRP_BASE_ADDR, \ .iosize = SZ_32, \ .irq = soc ## _INT_EMMAPRP, \ } #ifdef CONFIG_SOC_IMX27 const struct imx_mx2_emma_data imx27_mx2_emmaprp_data __initconst = imx_mx2_emmaprp_data_entry_single(MX27); #endif /* ifdef CONFIG_SOC_IMX27 */ struct platform_device *__init imx_add_mx2_emmaprp( const struct imx_mx2_emma_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("m2m-emmaprp", 0, res, 2, NULL, 0, DMA_BIT_MASK(32)); }
gpl-2.0
roggin/iconia-a500-kernel
drivers/mtd/chips/map_rom.c
3505
2830
/* * Common code to handle map devices which are simple ROM * (C) 2000 Red Hat. GPL'd. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static void maprom_nop (struct mtd_info *); static struct mtd_info *map_rom_probe(struct map_info *map); static int maprom_erase (struct mtd_info *mtd, struct erase_info *info); static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long, unsigned long, unsigned long); static struct mtd_chip_driver maprom_chipdrv = { .probe = map_rom_probe, .name = "map_rom", .module = THIS_MODULE }; static struct mtd_info *map_rom_probe(struct map_info *map) { struct mtd_info *mtd; mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) return NULL; map->fldrv = &maprom_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_ROM; mtd->size = map->size; mtd->get_unmapped_area = maprom_unmapped_area; mtd->read = maprom_read; mtd->write = maprom_write; mtd->sync = maprom_nop; mtd->erase = maprom_erase; mtd->flags = MTD_CAP_ROM; mtd->erasesize = map->size; mtd->writesize = 1; __module_get(THIS_MODULE); return mtd; } /* * Allow NOMMU mmap() to directly map the device (if not NULL) * - return the address to which the offset maps * - return -ENOSYS to indicate refusal to do the mapping */ static unsigned long maprom_unmapped_area(struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags) { struct map_info *map = mtd->priv; return (unsigned long) map->virt + offset; } static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; map_copy_from(map, buf, from, len); *retlen = len; return 0; } static void maprom_nop(struct mtd_info *mtd) { /* Nothing to see here */ } static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { printk(KERN_NOTICE "maprom_write called\n"); return -EIO; } static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) { /* We do our best 8) */ return -EROFS; } static int __init map_rom_init(void) { register_mtd_chip_driver(&maprom_chipdrv); return 0; } static void __exit map_rom_exit(void) { unregister_mtd_chip_driver(&maprom_chipdrv); } module_init(map_rom_init); module_exit(map_rom_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("MTD chip driver for ROM chips");
gpl-2.0
vmayoral/ubuntu-vivid
arch/sparc/prom/bootstr_32.c
4273
1304
/* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <asm/oplib.h> #include <linux/init.h> #define BARG_LEN 256 static char barg_buf[BARG_LEN] = { 0 }; static char fetched __initdata = 0; char * __init prom_getbootargs(void) { int iter; char *cp, *arg; /* This check saves us from a panic when bootfd patches args. */ if (fetched) { return barg_buf; } switch (prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ for (iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; while (*arg != 0) { /* Leave place for space and null. */ if (cp >= barg_buf + BARG_LEN - 2) /* We might issue a warning here. */ break; *cp++ = *arg++; } *cp++ = ' '; if (cp >= barg_buf + BARG_LEN - 1) /* We might issue a warning here. */ break; } *cp = 0; break; case PROM_V2: case PROM_V3: /* * V3 PROM cannot supply as with more than 128 bytes * of an argument. But a smart bootstrap loader can. */ strlcpy(barg_buf, *romvec->pv_v2bootargs.bootargs, sizeof(barg_buf)); break; default: break; } fetched = 1; return barg_buf; }
gpl-2.0
627656505/linux
arch/sparc/prom/bootstr_32.c
4273
1304
/* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <asm/oplib.h> #include <linux/init.h> #define BARG_LEN 256 static char barg_buf[BARG_LEN] = { 0 }; static char fetched __initdata = 0; char * __init prom_getbootargs(void) { int iter; char *cp, *arg; /* This check saves us from a panic when bootfd patches args. */ if (fetched) { return barg_buf; } switch (prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ for (iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; while (*arg != 0) { /* Leave place for space and null. */ if (cp >= barg_buf + BARG_LEN - 2) /* We might issue a warning here. */ break; *cp++ = *arg++; } *cp++ = ' '; if (cp >= barg_buf + BARG_LEN - 1) /* We might issue a warning here. */ break; } *cp = 0; break; case PROM_V2: case PROM_V3: /* * V3 PROM cannot supply as with more than 128 bytes * of an argument. But a smart bootstrap loader can. */ strlcpy(barg_buf, *romvec->pv_v2bootargs.bootargs, sizeof(barg_buf)); break; default: break; } fetched = 1; return barg_buf; }
gpl-2.0
Kuzma30/NT34K
fs/ocfs2/super.c
4529
70782
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * super.c * * load/unload driver, mount/dismount volumes * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/random.h> #include <linux/statfs.h> #include <linux/moduleparam.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/parser.h> #include <linux/crc32.h> #include <linux/debugfs.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/quotaops.h> #include <linux/cleancache.h> #define CREATE_TRACE_POINTS #include "ocfs2_trace.h" #include <cluster/masklog.h> #include "ocfs2.h" /* this should be the only file to include a version 1 header */ #include "ocfs1_fs_compat.h" #include "alloc.h" #include "aops.h" #include "blockcheck.h" #include "dlmglue.h" #include "export.h" #include "extent_map.h" #include "heartbeat.h" #include "inode.h" #include "journal.h" #include "localalloc.h" #include "namei.h" #include "slot_map.h" #include "super.h" #include "sysfile.h" #include "uptodate.h" #include "ver.h" #include "xattr.h" #include "quota.h" #include "refcounttree.h" #include "suballoc.h" #include "buffer_head_io.h" static struct kmem_cache *ocfs2_inode_cachep = NULL; struct kmem_cache *ocfs2_dquot_cachep; struct kmem_cache *ocfs2_qf_chunk_cachep; /* OCFS2 needs to schedule several different types of work which * require cluster locking, disk I/O, recovery waits, etc. Since these * types of work tend to be heavy we avoid using the kernel events * workqueue and schedule on our own. */ struct workqueue_struct *ocfs2_wq = NULL; static struct dentry *ocfs2_debugfs_root = NULL; MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); struct mount_options { unsigned long commit_interval; unsigned long mount_opt; unsigned int atime_quantum; signed short slot; int localalloc_opt; unsigned int resv_level; int dir_resv_level; char cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; }; static int ocfs2_parse_options(struct super_block *sb, char *options, struct mount_options *mopt, int is_remount); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options); static int ocfs2_show_options(struct seq_file *s, struct dentry *root); static void ocfs2_put_super(struct super_block *sb); static int ocfs2_mount_volume(struct super_block *sb); static int ocfs2_remount(struct super_block *sb, int *flags, char *data); static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err); static int ocfs2_initialize_mem_caches(void); static void ocfs2_free_mem_caches(void); static void ocfs2_delete_osb(struct ocfs2_super *osb); static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf); static int ocfs2_sync_fs(struct super_block *sb, int wait); static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb); static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb); static void ocfs2_release_system_inodes(struct ocfs2_super *osb); static int ocfs2_check_volume(struct ocfs2_super *osb); static int ocfs2_verify_volume(struct ocfs2_dinode *di, struct buffer_head *bh, u32 sectsize, struct ocfs2_blockcheck_stats *stats); static int ocfs2_initialize_super(struct super_block *sb, struct buffer_head *bh, int sector_size, struct ocfs2_blockcheck_stats *stats); static int ocfs2_get_sector(struct super_block *sb, struct buffer_head **bh, int block, int sect_size); static struct inode *ocfs2_alloc_inode(struct super_block *sb); static void ocfs2_destroy_inode(struct inode *inode); static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend); static int ocfs2_enable_quotas(struct ocfs2_super *osb); static void ocfs2_disable_quotas(struct ocfs2_super *osb); static const struct super_operations ocfs2_sops = { .statfs = ocfs2_statfs, .alloc_inode = ocfs2_alloc_inode, .destroy_inode = ocfs2_destroy_inode, .drop_inode = ocfs2_drop_inode, .evict_inode = ocfs2_evict_inode, .sync_fs = ocfs2_sync_fs, .put_super = ocfs2_put_super, .remount_fs = ocfs2_remount, .show_options = ocfs2_show_options, .quota_read = ocfs2_quota_read, .quota_write = ocfs2_quota_write, }; enum { Opt_barrier, Opt_err_panic, Opt_err_ro, Opt_intr, Opt_nointr, Opt_hb_none, Opt_hb_local, Opt_hb_global, Opt_data_ordered, Opt_data_writeback, Opt_atime_quantum, Opt_slot, Opt_commit, Opt_localalloc, Opt_localflocks, Opt_stack, Opt_user_xattr, Opt_nouser_xattr, Opt_inode64, Opt_acl, Opt_noacl, Opt_usrquota, Opt_grpquota, Opt_coherency_buffered, Opt_coherency_full, Opt_resv_level, Opt_dir_resv_level, Opt_err, }; static const match_table_t tokens = { {Opt_barrier, "barrier=%u"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_intr, "intr"}, {Opt_nointr, "nointr"}, {Opt_hb_none, OCFS2_HB_NONE}, {Opt_hb_local, OCFS2_HB_LOCAL}, {Opt_hb_global, OCFS2_HB_GLOBAL}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_atime_quantum, "atime_quantum=%u"}, {Opt_slot, "preferred_slot=%u"}, {Opt_commit, "commit=%u"}, {Opt_localalloc, "localalloc=%d"}, {Opt_localflocks, "localflocks"}, {Opt_stack, "cluster_stack=%s"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_inode64, "inode64"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_usrquota, "usrquota"}, {Opt_grpquota, "grpquota"}, {Opt_coherency_buffered, "coherency=buffered"}, {Opt_coherency_full, "coherency=full"}, {Opt_resv_level, "resv_level=%u"}, {Opt_dir_resv_level, "dir_resv_level=%u"}, {Opt_err, NULL} }; #ifdef CONFIG_DEBUG_FS static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) { struct ocfs2_cluster_connection *cconn = osb->cconn; struct ocfs2_recovery_map *rm = osb->recovery_map; struct ocfs2_orphan_scan *os = &osb->osb_orphan_scan; int i, out = 0; out += snprintf(buf + out, len - out, "%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n", "Device", osb->dev_str, osb->uuid_str, osb->fs_generation, osb->vol_label); out += snprintf(buf + out, len - out, "%10s => State: %d Flags: 0x%lX\n", "Volume", atomic_read(&osb->vol_state), osb->osb_flags); out += snprintf(buf + out, len - out, "%10s => Block: %lu Cluster: %d\n", "Sizes", osb->sb->s_blocksize, osb->s_clustersize); out += snprintf(buf + out, len - out, "%10s => Compat: 0x%X Incompat: 0x%X " "ROcompat: 0x%X\n", "Features", osb->s_feature_compat, osb->s_feature_incompat, osb->s_feature_ro_compat); out += snprintf(buf + out, len - out, "%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount", osb->s_mount_opt, osb->s_atime_quantum); if (cconn) { out += snprintf(buf + out, len - out, "%10s => Stack: %s Name: %*s " "Version: %d.%d\n", "Cluster", (*osb->osb_cluster_stack == '\0' ? "o2cb" : osb->osb_cluster_stack), cconn->cc_namelen, cconn->cc_name, cconn->cc_version.pv_major, cconn->cc_version.pv_minor); } spin_lock(&osb->dc_task_lock); out += snprintf(buf + out, len - out, "%10s => Pid: %d Count: %lu WakeSeq: %lu " "WorkSeq: %lu\n", "DownCnvt", (osb->dc_task ? task_pid_nr(osb->dc_task) : -1), osb->blocked_lock_count, osb->dc_wake_sequence, osb->dc_work_sequence); spin_unlock(&osb->dc_task_lock); spin_lock(&osb->osb_lock); out += snprintf(buf + out, len - out, "%10s => Pid: %d Nodes:", "Recovery", (osb->recovery_thread_task ? task_pid_nr(osb->recovery_thread_task) : -1)); if (rm->rm_used == 0) out += snprintf(buf + out, len - out, " None\n"); else { for (i = 0; i < rm->rm_used; i++) out += snprintf(buf + out, len - out, " %d", rm->rm_entries[i]); out += snprintf(buf + out, len - out, "\n"); } spin_unlock(&osb->osb_lock); out += snprintf(buf + out, len - out, "%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit", (osb->commit_task ? task_pid_nr(osb->commit_task) : -1), osb->osb_commit_interval, atomic_read(&osb->needs_checkpoint)); out += snprintf(buf + out, len - out, "%10s => State: %d TxnId: %lu NumTxns: %d\n", "Journal", osb->journal->j_state, osb->journal->j_trans_id, atomic_read(&osb->journal->j_num_trans)); out += snprintf(buf + out, len - out, "%10s => GlobalAllocs: %d LocalAllocs: %d " "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", "Stats", atomic_read(&osb->alloc_stats.bitmap_data), atomic_read(&osb->alloc_stats.local_data), atomic_read(&osb->alloc_stats.bg_allocs), atomic_read(&osb->alloc_stats.moves), atomic_read(&osb->alloc_stats.bg_extends)); out += snprintf(buf + out, len - out, "%10s => State: %u Descriptor: %llu Size: %u bits " "Default: %u bits\n", "LocalAlloc", osb->local_alloc_state, (unsigned long long)osb->la_last_gd, osb->local_alloc_bits, osb->local_alloc_default_bits); spin_lock(&osb->osb_lock); out += snprintf(buf + out, len - out, "%10s => InodeSlot: %d StolenInodes: %d, " "MetaSlot: %d StolenMeta: %d\n", "Steal", osb->s_inode_steal_slot, atomic_read(&osb->s_num_inodes_stolen), osb->s_meta_steal_slot, atomic_read(&osb->s_num_meta_stolen)); spin_unlock(&osb->osb_lock); out += snprintf(buf + out, len - out, "OrphanScan => "); out += snprintf(buf + out, len - out, "Local: %u Global: %u ", os->os_count, os->os_seqno); out += snprintf(buf + out, len - out, " Last Scan: "); if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) out += snprintf(buf + out, len - out, "Disabled\n"); else out += snprintf(buf + out, len - out, "%lu seconds ago\n", (get_seconds() - os->os_scantime.tv_sec)); out += snprintf(buf + out, len - out, "%10s => %3s %10s\n", "Slots", "Num", "RecoGen"); for (i = 0; i < osb->max_slots; ++i) { out += snprintf(buf + out, len - out, "%10s %c %3d %10d\n", " ", (i == osb->slot_num ? '*' : ' '), i, osb->slot_recovery_generations[i]); } return out; } static int ocfs2_osb_debug_open(struct inode *inode, struct file *file) { struct ocfs2_super *osb = inode->i_private; char *buf = NULL; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) goto bail; i_size_write(inode, ocfs2_osb_dump(osb, buf, PAGE_SIZE)); file->private_data = buf; return 0; bail: return -ENOMEM; } static int ocfs2_debug_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static ssize_t ocfs2_debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, i_size_read(file->f_mapping->host)); } #else static int ocfs2_osb_debug_open(struct inode *inode, struct file *file) { return 0; } static int ocfs2_debug_release(struct inode *inode, struct file *file) { return 0; } static ssize_t ocfs2_debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { return 0; } #endif /* CONFIG_DEBUG_FS */ static const struct file_operations ocfs2_osb_debug_fops = { .open = ocfs2_osb_debug_open, .release = ocfs2_debug_release, .read = ocfs2_debug_read, .llseek = generic_file_llseek, }; static int ocfs2_sync_fs(struct super_block *sb, int wait) { int status; tid_t target; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_is_hard_readonly(osb)) return -EROFS; if (wait) { status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); } else { ocfs2_schedule_truncate_log_flush(osb, 0); } if (jbd2_journal_start_commit(OCFS2_SB(sb)->journal->j_journal, &target)) { if (wait) jbd2_log_wait_commit(OCFS2_SB(sb)->journal->j_journal, target); } return 0; } static int ocfs2_need_system_inode(struct ocfs2_super *osb, int ino) { if (!OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA) && (ino == USER_QUOTA_SYSTEM_INODE || ino == LOCAL_USER_QUOTA_SYSTEM_INODE)) return 0; if (!OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) && (ino == GROUP_QUOTA_SYSTEM_INODE || ino == LOCAL_GROUP_QUOTA_SYSTEM_INODE)) return 0; return 1; } static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) { struct inode *new = NULL; int status = 0; int i; new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); if (IS_ERR(new)) { status = PTR_ERR(new); mlog_errno(status); goto bail; } osb->root_inode = new; new = ocfs2_iget(osb, osb->system_dir_blkno, OCFS2_FI_FLAG_SYSFILE, 0); if (IS_ERR(new)) { status = PTR_ERR(new); mlog_errno(status); goto bail; } osb->sys_root_inode = new; for (i = OCFS2_FIRST_ONLINE_SYSTEM_INODE; i <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; i++) { if (!ocfs2_need_system_inode(osb, i)) continue; new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); if (!new) { ocfs2_release_system_inodes(osb); status = -EINVAL; mlog_errno(status); /* FIXME: Should ERROR_RO_FS */ mlog(ML_ERROR, "Unable to load system inode %d, " "possibly corrupt fs?", i); goto bail; } // the array now has one ref, so drop this one iput(new); } bail: if (status) mlog_errno(status); return status; } static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb) { struct inode *new = NULL; int status = 0; int i; for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; i < NUM_SYSTEM_INODES; i++) { if (!ocfs2_need_system_inode(osb, i)) continue; new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); if (!new) { ocfs2_release_system_inodes(osb); status = -EINVAL; mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", status, i, osb->slot_num); goto bail; } /* the array now has one ref, so drop this one */ iput(new); } bail: if (status) mlog_errno(status); return status; } static void ocfs2_release_system_inodes(struct ocfs2_super *osb) { int i; struct inode *inode; for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) { inode = osb->global_system_inodes[i]; if (inode) { iput(inode); osb->global_system_inodes[i] = NULL; } } inode = osb->sys_root_inode; if (inode) { iput(inode); osb->sys_root_inode = NULL; } inode = osb->root_inode; if (inode) { iput(inode); osb->root_inode = NULL; } if (!osb->local_system_inodes) return; for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) { if (osb->local_system_inodes[i]) { iput(osb->local_system_inodes[i]); osb->local_system_inodes[i] = NULL; } } kfree(osb->local_system_inodes); osb->local_system_inodes = NULL; } /* We're allocating fs objects, use GFP_NOFS */ static struct inode *ocfs2_alloc_inode(struct super_block *sb) { struct ocfs2_inode_info *oi; oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS); if (!oi) return NULL; jbd2_journal_init_jbd_inode(&oi->ip_jinode, &oi->vfs_inode); return &oi->vfs_inode; } static void ocfs2_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode)); } static void ocfs2_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, ocfs2_i_callback); } static unsigned long long ocfs2_max_file_offset(unsigned int bbits, unsigned int cbits) { unsigned int bytes = 1 << cbits; unsigned int trim = bytes; unsigned int bitshift = 32; /* * i_size and all block offsets in ocfs2 are always 64 bits * wide. i_clusters is 32 bits, in cluster-sized units. So on * 64 bit platforms, cluster size will be the limiting factor. */ #if BITS_PER_LONG == 32 # if defined(CONFIG_LBDAF) BUILD_BUG_ON(sizeof(sector_t) != 8); /* * We might be limited by page cache size. */ if (bytes > PAGE_CACHE_SIZE) { bytes = PAGE_CACHE_SIZE; trim = 1; /* * Shift by 31 here so that we don't get larger than * MAX_LFS_FILESIZE */ bitshift = 31; } # else /* * We are limited by the size of sector_t. Use block size, as * that's what we expose to the VFS. */ bytes = 1 << bbits; trim = 1; bitshift = 31; # endif #endif /* * Trim by a whole cluster when we can actually approach the * on-disk limits. Otherwise we can overflow i_clusters when * an extent start is at the max offset. */ return (((unsigned long long)bytes) << bitshift) - trim; } static int ocfs2_remount(struct super_block *sb, int *flags, char *data) { int incompat_features; int ret = 0; struct mount_options parsed_options; struct ocfs2_super *osb = OCFS2_SB(sb); u32 tmp; if (!ocfs2_parse_options(sb, data, &parsed_options, 1) || !ocfs2_check_set_options(sb, &parsed_options)) { ret = -EINVAL; goto out; } tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE; if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); goto out; } if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) != (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change data mode on remount\n"); goto out; } /* Probably don't want this on remount; it might * mess with other nodes */ if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) && (parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); goto out; } /* We're going to/from readonly mode. */ if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { /* Disable quota accounting before remounting RO */ if (*flags & MS_RDONLY) { ret = ocfs2_susp_quotas(osb, 0); if (ret < 0) goto out; } /* Lock here so the check of HARD_RO and the potential * setting of SOFT_RO is atomic. */ spin_lock(&osb->osb_lock); if (osb->osb_flags & OCFS2_OSB_HARD_RO) { mlog(ML_ERROR, "Remount on readonly device is forbidden.\n"); ret = -EROFS; goto unlock_osb; } if (*flags & MS_RDONLY) { sb->s_flags |= MS_RDONLY; osb->osb_flags |= OCFS2_OSB_SOFT_RO; } else { if (osb->osb_flags & OCFS2_OSB_ERROR_FS) { mlog(ML_ERROR, "Cannot remount RDWR " "filesystem due to previous errors.\n"); ret = -EROFS; goto unlock_osb; } incompat_features = OCFS2_HAS_RO_COMPAT_FEATURE(sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP); if (incompat_features) { mlog(ML_ERROR, "Cannot remount RDWR because " "of unsupported optional features " "(%x).\n", incompat_features); ret = -EINVAL; goto unlock_osb; } sb->s_flags &= ~MS_RDONLY; osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; } trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags); unlock_osb: spin_unlock(&osb->osb_lock); /* Enable quota accounting after remounting RW */ if (!ret && !(*flags & MS_RDONLY)) { if (sb_any_quota_suspended(sb)) ret = ocfs2_susp_quotas(osb, 1); else ret = ocfs2_enable_quotas(osb); if (ret < 0) { /* Return back changes... */ spin_lock(&osb->osb_lock); sb->s_flags |= MS_RDONLY; osb->osb_flags |= OCFS2_OSB_SOFT_RO; spin_unlock(&osb->osb_lock); goto out; } } } if (!ret) { /* Only save off the new mount options in case of a successful * remount. */ osb->s_mount_opt = parsed_options.mount_opt; osb->s_atime_quantum = parsed_options.atime_quantum; osb->preferred_slot = parsed_options.slot; if (parsed_options.commit_interval) osb->osb_commit_interval = parsed_options.commit_interval; if (!ocfs2_is_hard_readonly(osb)) ocfs2_set_journal_params(osb); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); } out: return ret; } static int ocfs2_sb_probe(struct super_block *sb, struct buffer_head **bh, int *sector_size, struct ocfs2_blockcheck_stats *stats) { int status, tmpstat; struct ocfs1_vol_disk_hdr *hdr; struct ocfs2_dinode *di; int blksize; *bh = NULL; /* may be > 512 */ *sector_size = bdev_logical_block_size(sb->s_bdev); if (*sector_size > OCFS2_MAX_BLOCKSIZE) { mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", *sector_size, OCFS2_MAX_BLOCKSIZE); status = -EINVAL; goto bail; } /* Can this really happen? */ if (*sector_size < OCFS2_MIN_BLOCKSIZE) *sector_size = OCFS2_MIN_BLOCKSIZE; /* check block zero for old format */ status = ocfs2_get_sector(sb, bh, 0, *sector_size); if (status < 0) { mlog_errno(status); goto bail; } hdr = (struct ocfs1_vol_disk_hdr *) (*bh)->b_data; if (hdr->major_version == OCFS1_MAJOR_VERSION) { mlog(ML_ERROR, "incompatible version: %u.%u\n", hdr->major_version, hdr->minor_version); status = -EINVAL; } if (memcmp(hdr->signature, OCFS1_VOLUME_SIGNATURE, strlen(OCFS1_VOLUME_SIGNATURE)) == 0) { mlog(ML_ERROR, "incompatible volume signature: %8s\n", hdr->signature); status = -EINVAL; } brelse(*bh); *bh = NULL; if (status < 0) { mlog(ML_ERROR, "This is an ocfs v1 filesystem which must be " "upgraded before mounting with ocfs v2\n"); goto bail; } /* * Now check at magic offset for 512, 1024, 2048, 4096 * blocksizes. 4096 is the maximum blocksize because it is * the minimum clustersize. */ status = -EINVAL; for (blksize = *sector_size; blksize <= OCFS2_MAX_BLOCKSIZE; blksize <<= 1) { tmpstat = ocfs2_get_sector(sb, bh, OCFS2_SUPER_BLOCK_BLKNO, blksize); if (tmpstat < 0) { status = tmpstat; mlog_errno(status); break; } di = (struct ocfs2_dinode *) (*bh)->b_data; memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); spin_lock_init(&stats->b_lock); tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats); if (tmpstat < 0) { brelse(*bh); *bh = NULL; } if (tmpstat != -EAGAIN) { status = tmpstat; break; } } bail: return status; } static int ocfs2_verify_heartbeat(struct ocfs2_super *osb) { u32 hb_enabled = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL; if (osb->s_mount_opt & hb_enabled) { if (ocfs2_mount_local(osb)) { mlog(ML_ERROR, "Cannot heartbeat on a locally " "mounted device.\n"); return -EINVAL; } if (ocfs2_userspace_stack(osb)) { mlog(ML_ERROR, "Userspace stack expected, but " "o2cb heartbeat arguments passed to mount\n"); return -EINVAL; } if (((osb->s_mount_opt & OCFS2_MOUNT_HB_GLOBAL) && !ocfs2_cluster_o2cb_global_heartbeat(osb)) || ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) && ocfs2_cluster_o2cb_global_heartbeat(osb))) { mlog(ML_ERROR, "Mismatching o2cb heartbeat modes\n"); return -EINVAL; } } if (!(osb->s_mount_opt & hb_enabled)) { if (!ocfs2_mount_local(osb) && !ocfs2_is_hard_readonly(osb) && !ocfs2_userspace_stack(osb)) { mlog(ML_ERROR, "Heartbeat has to be started to mount " "a read-write clustered device.\n"); return -EINVAL; } } return 0; } /* * If we're using a userspace stack, mount should have passed * a name that matches the disk. If not, mount should not * have passed a stack. */ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb, struct mount_options *mopt) { if (!ocfs2_userspace_stack(osb) && mopt->cluster_stack[0]) { mlog(ML_ERROR, "cluster stack passed to mount, but this filesystem " "does not support it\n"); return -EINVAL; } if (ocfs2_userspace_stack(osb) && strncmp(osb->osb_cluster_stack, mopt->cluster_stack, OCFS2_STACK_LABEL_LEN)) { mlog(ML_ERROR, "cluster stack passed to mount (\"%s\") does not " "match the filesystem (\"%s\")\n", mopt->cluster_stack, osb->osb_cluster_stack); return -EINVAL; } return 0; } static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend) { int type; struct super_block *sb = osb->sb; unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; int status = 0; for (type = 0; type < MAXQUOTAS; type++) { if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) continue; if (unsuspend) status = dquot_resume(sb, type); else { struct ocfs2_mem_dqinfo *oinfo; /* Cancel periodic syncing before suspending */ oinfo = sb_dqinfo(sb, type)->dqi_priv; cancel_delayed_work_sync(&oinfo->dqi_sync_work); status = dquot_suspend(sb, type); } if (status < 0) break; } if (status < 0) mlog(ML_ERROR, "Failed to suspend/unsuspend quotas on " "remount (error = %d).\n", status); return status; } static int ocfs2_enable_quotas(struct ocfs2_super *osb) { struct inode *inode[MAXQUOTAS] = { NULL, NULL }; struct super_block *sb = osb->sb; unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, LOCAL_GROUP_QUOTA_SYSTEM_INODE }; int status; int type; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE; for (type = 0; type < MAXQUOTAS; type++) { if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) continue; inode[type] = ocfs2_get_system_file_inode(osb, ino[type], osb->slot_num); if (!inode[type]) { status = -ENOENT; goto out_quota_off; } status = dquot_enable(inode[type], type, QFMT_OCFS2, DQUOT_USAGE_ENABLED); if (status < 0) goto out_quota_off; } for (type = 0; type < MAXQUOTAS; type++) iput(inode[type]); return 0; out_quota_off: ocfs2_disable_quotas(osb); for (type = 0; type < MAXQUOTAS; type++) iput(inode[type]); mlog_errno(status); return status; } static void ocfs2_disable_quotas(struct ocfs2_super *osb) { int type; struct inode *inode; struct super_block *sb = osb->sb; struct ocfs2_mem_dqinfo *oinfo; /* We mostly ignore errors in this function because there's not much * we can do when we see them */ for (type = 0; type < MAXQUOTAS; type++) { if (!sb_has_quota_loaded(sb, type)) continue; /* Cancel periodic syncing before we grab dqonoff_mutex */ oinfo = sb_dqinfo(sb, type)->dqi_priv; cancel_delayed_work_sync(&oinfo->dqi_sync_work); inode = igrab(sb->s_dquot.files[type]); /* Turn off quotas. This will remove all dquot structures from * memory and so they will be automatically synced to global * quota files */ dquot_disable(sb, type, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); if (!inode) continue; iput(inode); } } /* Handle quota on quotactl */ static int ocfs2_quota_on(struct super_block *sb, int type, int format_id) { unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) return -EINVAL; return dquot_enable(sb_dqopt(sb)->files[type], type, format_id, DQUOT_LIMITS_ENABLED); } /* Handle quota off quotactl */ static int ocfs2_quota_off(struct super_block *sb, int type) { return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); } static const struct quotactl_ops ocfs2_quotactl_ops = { .quota_on_meta = ocfs2_quota_on, .quota_off = ocfs2_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk, }; static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) { struct dentry *root; int status, sector_size; struct mount_options parsed_options; struct inode *inode = NULL; struct ocfs2_super *osb = NULL; struct buffer_head *bh = NULL; char nodestr[8]; struct ocfs2_blockcheck_stats stats; trace_ocfs2_fill_super(sb, data, silent); if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { status = -EINVAL; goto read_super_error; } /* probe for superblock */ status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats); if (status < 0) { mlog(ML_ERROR, "superblock probe failed!\n"); goto read_super_error; } status = ocfs2_initialize_super(sb, bh, sector_size, &stats); osb = OCFS2_SB(sb); if (status < 0) { mlog_errno(status); goto read_super_error; } brelse(bh); bh = NULL; if (!ocfs2_check_set_options(sb, &parsed_options)) { status = -EINVAL; goto read_super_error; } osb->s_mount_opt = parsed_options.mount_opt; osb->s_atime_quantum = parsed_options.atime_quantum; osb->preferred_slot = parsed_options.slot; osb->osb_commit_interval = parsed_options.commit_interval; ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt); osb->osb_resv_level = parsed_options.resv_level; osb->osb_dir_resv_level = parsed_options.resv_level; if (parsed_options.dir_resv_level == -1) osb->osb_dir_resv_level = parsed_options.resv_level; else osb->osb_dir_resv_level = parsed_options.dir_resv_level; status = ocfs2_verify_userspace_stack(osb, &parsed_options); if (status) goto read_super_error; sb->s_magic = OCFS2_SUPER_MAGIC; sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) | ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, * heartbeat=none */ if (bdev_read_only(sb->s_bdev)) { if (!(sb->s_flags & MS_RDONLY)) { status = -EACCES; mlog(ML_ERROR, "Readonly device detected but readonly " "mount was not specified.\n"); goto read_super_error; } /* You should not be able to start a local heartbeat * on a readonly device. */ if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) { status = -EROFS; mlog(ML_ERROR, "Local heartbeat specified on readonly " "device.\n"); goto read_super_error; } status = ocfs2_check_journals_nolocks(osb); if (status < 0) { if (status == -EROFS) mlog(ML_ERROR, "Recovery required on readonly " "file system, but write access is " "unavailable.\n"); else mlog_errno(status); goto read_super_error; } ocfs2_set_ro_flag(osb, 1); printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. " "Cluster services will not be used for this mount. " "Recovery will be skipped.\n", osb->dev_str); } if (!ocfs2_is_hard_readonly(osb)) { if (sb->s_flags & MS_RDONLY) ocfs2_set_ro_flag(osb, 0); } status = ocfs2_verify_heartbeat(osb); if (status < 0) { mlog_errno(status); goto read_super_error; } osb->osb_debug_root = debugfs_create_dir(osb->uuid_str, ocfs2_debugfs_root); if (!osb->osb_debug_root) { status = -EINVAL; mlog(ML_ERROR, "Unable to create per-mount debugfs root.\n"); goto read_super_error; } osb->osb_ctxt = debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root, osb, &ocfs2_osb_debug_fops); if (!osb->osb_ctxt) { status = -EINVAL; mlog_errno(status); goto read_super_error; } if (ocfs2_meta_ecc(osb)) { status = ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats, osb->osb_debug_root); if (status) { mlog(ML_ERROR, "Unable to create blockcheck statistics " "files\n"); goto read_super_error; } } status = ocfs2_mount_volume(sb); if (status < 0) goto read_super_error; if (osb->root_inode) inode = igrab(osb->root_inode); if (!inode) { status = -EIO; mlog_errno(status); goto read_super_error; } root = d_make_root(inode); if (!root) { status = -ENOMEM; mlog_errno(status); goto read_super_error; } sb->s_root = root; ocfs2_complete_mount_recovery(osb); if (ocfs2_mount_local(osb)) snprintf(nodestr, sizeof(nodestr), "local"); else snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num); printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %s, slot %d) " "with %s data mode.\n", osb->dev_str, nodestr, osb->slot_num, osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : "ordered"); atomic_set(&osb->vol_state, VOLUME_MOUNTED); wake_up(&osb->osb_mount_event); /* Now we can initialize quotas because we can afford to wait * for cluster locks recovery now. That also means that truncation * log recovery can happen but that waits for proper quota setup */ if (!(sb->s_flags & MS_RDONLY)) { status = ocfs2_enable_quotas(osb); if (status < 0) { /* We have to err-out specially here because * s_root is already set */ mlog_errno(status); atomic_set(&osb->vol_state, VOLUME_DISABLED); wake_up(&osb->osb_mount_event); return status; } } ocfs2_complete_quota_recovery(osb); /* Now we wake up again for processes waiting for quotas */ atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS); wake_up(&osb->osb_mount_event); /* Start this when the mount is almost sure of being successful */ ocfs2_orphan_scan_start(osb); return status; read_super_error: brelse(bh); if (osb) { atomic_set(&osb->vol_state, VOLUME_DISABLED); wake_up(&osb->osb_mount_event); ocfs2_dismount_volume(sb, 1); } if (status) mlog_errno(status); return status; } static struct dentry *ocfs2_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super); } static void ocfs2_kill_sb(struct super_block *sb) { struct ocfs2_super *osb = OCFS2_SB(sb); /* Failed mount? */ if (!osb || atomic_read(&osb->vol_state) == VOLUME_DISABLED) goto out; /* Prevent further queueing of inode drop events */ spin_lock(&dentry_list_lock); ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED); spin_unlock(&dentry_list_lock); /* Wait for work to finish and/or remove it */ cancel_work_sync(&osb->dentry_lock_work); out: kill_block_super(sb); } static struct file_system_type ocfs2_fs_type = { .owner = THIS_MODULE, .name = "ocfs2", .mount = ocfs2_mount, .kill_sb = ocfs2_kill_sb, .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, .next = NULL }; static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options) { if (options->mount_opt & OCFS2_MOUNT_USRQUOTA && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { mlog(ML_ERROR, "User quotas were requested, but this " "filesystem does not have the feature enabled.\n"); return 0; } if (options->mount_opt & OCFS2_MOUNT_GRPQUOTA && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { mlog(ML_ERROR, "Group quotas were requested, but this " "filesystem does not have the feature enabled.\n"); return 0; } if (options->mount_opt & OCFS2_MOUNT_POSIX_ACL && !OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR)) { mlog(ML_ERROR, "ACL support requested but extended attributes " "feature is not enabled\n"); return 0; } /* No ACL setting specified? Use XATTR feature... */ if (!(options->mount_opt & (OCFS2_MOUNT_POSIX_ACL | OCFS2_MOUNT_NO_POSIX_ACL))) { if (OCFS2_HAS_INCOMPAT_FEATURE(sb, OCFS2_FEATURE_INCOMPAT_XATTR)) options->mount_opt |= OCFS2_MOUNT_POSIX_ACL; else options->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; } return 1; } static int ocfs2_parse_options(struct super_block *sb, char *options, struct mount_options *mopt, int is_remount) { int status, user_stack = 0; char *p; u32 tmp; trace_ocfs2_parse_options(is_remount, options ? options : "(none)"); mopt->commit_interval = 0; mopt->mount_opt = OCFS2_MOUNT_NOINTR; mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; mopt->slot = OCFS2_INVALID_SLOT; mopt->localalloc_opt = -1; mopt->cluster_stack[0] = '\0'; mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; mopt->dir_resv_level = -1; if (!options) { status = 1; goto bail; } while ((p = strsep(&options, ",")) != NULL) { int token, option; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_hb_local: mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL; break; case Opt_hb_none: mopt->mount_opt |= OCFS2_MOUNT_HB_NONE; break; case Opt_hb_global: mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL; break; case Opt_barrier: if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option) mopt->mount_opt |= OCFS2_MOUNT_BARRIER; else mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; break; case Opt_intr: mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; break; case Opt_nointr: mopt->mount_opt |= OCFS2_MOUNT_NOINTR; break; case Opt_err_panic: mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; break; case Opt_err_ro: mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; break; case Opt_data_ordered: mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; break; case Opt_data_writeback: mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; break; case Opt_user_xattr: mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; break; case Opt_nouser_xattr: mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR; break; case Opt_atime_quantum: if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option >= 0) mopt->atime_quantum = option; break; case Opt_slot: option = 0; if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option) mopt->slot = (s16)option; break; case Opt_commit: option = 0; if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option < 0) return 0; if (option == 0) option = JBD2_DEFAULT_MAX_COMMIT_AGE; mopt->commit_interval = HZ * option; break; case Opt_localalloc: option = 0; if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option >= 0) mopt->localalloc_opt = option; break; case Opt_localflocks: /* * Changing this during remount could race * flock() requests, or "unbalance" existing * ones (e.g., a lock is taken in one mode but * dropped in the other). If users care enough * to flip locking modes during remount, we * could add a "local" flag to individual * flock structures for proper tracking of * state. */ if (!is_remount) mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; break; case Opt_stack: /* Check both that the option we were passed * is of the right length and that it is a proper * string of the right length. */ if (((args[0].to - args[0].from) != OCFS2_STACK_LABEL_LEN) || (strnlen(args[0].from, OCFS2_STACK_LABEL_LEN) != OCFS2_STACK_LABEL_LEN)) { mlog(ML_ERROR, "Invalid cluster_stack option\n"); status = 0; goto bail; } memcpy(mopt->cluster_stack, args[0].from, OCFS2_STACK_LABEL_LEN); mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; /* * Open code the memcmp here as we don't have * an osb to pass to * ocfs2_userspace_stack(). */ if (memcmp(mopt->cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK, OCFS2_STACK_LABEL_LEN)) user_stack = 1; break; case Opt_inode64: mopt->mount_opt |= OCFS2_MOUNT_INODE64; break; case Opt_usrquota: mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; break; case Opt_grpquota: mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; break; case Opt_coherency_buffered: mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED; break; case Opt_coherency_full: mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; break; case Opt_acl: mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; break; case Opt_noacl: mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; break; case Opt_resv_level: if (is_remount) break; if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option >= OCFS2_MIN_RESV_LEVEL && option < OCFS2_MAX_RESV_LEVEL) mopt->resv_level = option; break; case Opt_dir_resv_level: if (is_remount) break; if (match_int(&args[0], &option)) { status = 0; goto bail; } if (option >= OCFS2_MIN_RESV_LEVEL && option < OCFS2_MAX_RESV_LEVEL) mopt->dir_resv_level = option; break; default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " "or missing value\n", p); status = 0; goto bail; } } if (user_stack == 0) { /* Ensure only one heartbeat mode */ tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE); if (hweight32(tmp) != 1) { mlog(ML_ERROR, "Invalid heartbeat mount options\n"); status = 0; goto bail; } } status = 1; bail: return status; } static int ocfs2_show_options(struct seq_file *s, struct dentry *root) { struct ocfs2_super *osb = OCFS2_SB(root->d_sb); unsigned long opts = osb->s_mount_opt; unsigned int local_alloc_megs; if (opts & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL)) { seq_printf(s, ",_netdev"); if (opts & OCFS2_MOUNT_HB_LOCAL) seq_printf(s, ",%s", OCFS2_HB_LOCAL); else seq_printf(s, ",%s", OCFS2_HB_GLOBAL); } else seq_printf(s, ",%s", OCFS2_HB_NONE); if (opts & OCFS2_MOUNT_NOINTR) seq_printf(s, ",nointr"); if (opts & OCFS2_MOUNT_DATA_WRITEBACK) seq_printf(s, ",data=writeback"); else seq_printf(s, ",data=ordered"); if (opts & OCFS2_MOUNT_BARRIER) seq_printf(s, ",barrier=1"); if (opts & OCFS2_MOUNT_ERRORS_PANIC) seq_printf(s, ",errors=panic"); else seq_printf(s, ",errors=remount-ro"); if (osb->preferred_slot != OCFS2_INVALID_SLOT) seq_printf(s, ",preferred_slot=%d", osb->preferred_slot); seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum); if (osb->osb_commit_interval) seq_printf(s, ",commit=%u", (unsigned) (osb->osb_commit_interval / HZ)); local_alloc_megs = osb->local_alloc_bits >> (20 - osb->s_clustersize_bits); if (local_alloc_megs != ocfs2_la_default_mb(osb)) seq_printf(s, ",localalloc=%d", local_alloc_megs); if (opts & OCFS2_MOUNT_LOCALFLOCKS) seq_printf(s, ",localflocks,"); if (osb->osb_cluster_stack[0]) seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN, osb->osb_cluster_stack); if (opts & OCFS2_MOUNT_USRQUOTA) seq_printf(s, ",usrquota"); if (opts & OCFS2_MOUNT_GRPQUOTA) seq_printf(s, ",grpquota"); if (opts & OCFS2_MOUNT_COHERENCY_BUFFERED) seq_printf(s, ",coherency=buffered"); else seq_printf(s, ",coherency=full"); if (opts & OCFS2_MOUNT_NOUSERXATTR) seq_printf(s, ",nouser_xattr"); else seq_printf(s, ",user_xattr"); if (opts & OCFS2_MOUNT_INODE64) seq_printf(s, ",inode64"); if (opts & OCFS2_MOUNT_POSIX_ACL) seq_printf(s, ",acl"); else seq_printf(s, ",noacl"); if (osb->osb_resv_level != OCFS2_DEFAULT_RESV_LEVEL) seq_printf(s, ",resv_level=%d", osb->osb_resv_level); if (osb->osb_dir_resv_level != osb->osb_resv_level) seq_printf(s, ",dir_resv_level=%d", osb->osb_resv_level); return 0; } wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; static int __init ocfs2_init(void) { int status, i; ocfs2_print_version(); for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++) init_waitqueue_head(&ocfs2__ioend_wq[i]); status = init_ocfs2_uptodate_cache(); if (status < 0) goto out1; status = ocfs2_initialize_mem_caches(); if (status < 0) goto out2; ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); if (!ocfs2_wq) { status = -ENOMEM; goto out3; } ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); if (!ocfs2_debugfs_root) { status = -EFAULT; mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); } ocfs2_set_locking_protocol(); status = register_quota_format(&ocfs2_quota_format); if (status < 0) goto out4; status = register_filesystem(&ocfs2_fs_type); if (!status) return 0; unregister_quota_format(&ocfs2_quota_format); out4: destroy_workqueue(ocfs2_wq); debugfs_remove(ocfs2_debugfs_root); out3: ocfs2_free_mem_caches(); out2: exit_ocfs2_uptodate_cache(); out1: mlog_errno(status); return status; } static void __exit ocfs2_exit(void) { if (ocfs2_wq) { flush_workqueue(ocfs2_wq); destroy_workqueue(ocfs2_wq); } unregister_quota_format(&ocfs2_quota_format); debugfs_remove(ocfs2_debugfs_root); ocfs2_free_mem_caches(); unregister_filesystem(&ocfs2_fs_type); exit_ocfs2_uptodate_cache(); } static void ocfs2_put_super(struct super_block *sb) { trace_ocfs2_put_super(sb); ocfs2_sync_blockdev(sb); ocfs2_dismount_volume(sb, 0); } static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) { struct ocfs2_super *osb; u32 numbits, freebits; int status; struct ocfs2_dinode *bm_lock; struct buffer_head *bh = NULL; struct inode *inode = NULL; trace_ocfs2_statfs(dentry->d_sb, buf); osb = OCFS2_SB(dentry->d_sb); inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!inode) { mlog(ML_ERROR, "failed to get bitmap inode\n"); status = -EIO; goto bail; } status = ocfs2_inode_lock(inode, &bh, 0); if (status < 0) { mlog_errno(status); goto bail; } bm_lock = (struct ocfs2_dinode *) bh->b_data; numbits = le32_to_cpu(bm_lock->id1.bitmap1.i_total); freebits = numbits - le32_to_cpu(bm_lock->id1.bitmap1.i_used); buf->f_type = OCFS2_SUPER_MAGIC; buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_namelen = OCFS2_MAX_FILENAME_LEN; buf->f_blocks = ((sector_t) numbits) * (osb->s_clustersize >> osb->sb->s_blocksize_bits); buf->f_bfree = ((sector_t) freebits) * (osb->s_clustersize >> osb->sb->s_blocksize_bits); buf->f_bavail = buf->f_bfree; buf->f_files = numbits; buf->f_ffree = freebits; buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL; buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN, OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL; brelse(bh); ocfs2_inode_unlock(inode, 0); status = 0; bail: if (inode) iput(inode); if (status) mlog_errno(status); return status; } static void ocfs2_inode_init_once(void *data) { struct ocfs2_inode_info *oi = data; oi->ip_flags = 0; oi->ip_open_count = 0; spin_lock_init(&oi->ip_lock); ocfs2_extent_map_init(&oi->vfs_inode); INIT_LIST_HEAD(&oi->ip_io_markers); oi->ip_dir_start_lookup = 0; atomic_set(&oi->ip_unaligned_aio, 0); init_rwsem(&oi->ip_alloc_sem); init_rwsem(&oi->ip_xattr_sem); mutex_init(&oi->ip_io_mutex); oi->ip_blkno = 0ULL; oi->ip_clusters = 0; ocfs2_resv_init_once(&oi->ip_la_data_resv); ocfs2_lock_res_init_once(&oi->ip_rw_lockres); ocfs2_lock_res_init_once(&oi->ip_inode_lockres); ocfs2_lock_res_init_once(&oi->ip_open_lockres); ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode), &ocfs2_inode_caching_ops); inode_init_once(&oi->vfs_inode); } static int ocfs2_initialize_mem_caches(void) { ocfs2_inode_cachep = kmem_cache_create("ocfs2_inode_cache", sizeof(struct ocfs2_inode_info), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), ocfs2_inode_init_once); ocfs2_dquot_cachep = kmem_cache_create("ocfs2_dquot_cache", sizeof(struct ocfs2_dquot), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), NULL); ocfs2_qf_chunk_cachep = kmem_cache_create("ocfs2_qf_chunk_cache", sizeof(struct ocfs2_quota_chunk), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), NULL); if (!ocfs2_inode_cachep || !ocfs2_dquot_cachep || !ocfs2_qf_chunk_cachep) { if (ocfs2_inode_cachep) kmem_cache_destroy(ocfs2_inode_cachep); if (ocfs2_dquot_cachep) kmem_cache_destroy(ocfs2_dquot_cachep); if (ocfs2_qf_chunk_cachep) kmem_cache_destroy(ocfs2_qf_chunk_cachep); return -ENOMEM; } return 0; } static void ocfs2_free_mem_caches(void) { if (ocfs2_inode_cachep) kmem_cache_destroy(ocfs2_inode_cachep); ocfs2_inode_cachep = NULL; if (ocfs2_dquot_cachep) kmem_cache_destroy(ocfs2_dquot_cachep); ocfs2_dquot_cachep = NULL; if (ocfs2_qf_chunk_cachep) kmem_cache_destroy(ocfs2_qf_chunk_cachep); ocfs2_qf_chunk_cachep = NULL; } static int ocfs2_get_sector(struct super_block *sb, struct buffer_head **bh, int block, int sect_size) { if (!sb_set_blocksize(sb, sect_size)) { mlog(ML_ERROR, "unable to set blocksize\n"); return -EIO; } *bh = sb_getblk(sb, block); if (!*bh) { mlog_errno(-EIO); return -EIO; } lock_buffer(*bh); if (!buffer_dirty(*bh)) clear_buffer_uptodate(*bh); unlock_buffer(*bh); ll_rw_block(READ, 1, bh); wait_on_buffer(*bh); if (!buffer_uptodate(*bh)) { mlog_errno(-EIO); brelse(*bh); *bh = NULL; return -EIO; } return 0; } static int ocfs2_mount_volume(struct super_block *sb) { int status = 0; int unlock_super = 0; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_is_hard_readonly(osb)) goto leave; status = ocfs2_dlm_init(osb); if (status < 0) { mlog_errno(status); goto leave; } status = ocfs2_super_lock(osb, 1); if (status < 0) { mlog_errno(status); goto leave; } unlock_super = 1; /* This will load up the node map and add ourselves to it. */ status = ocfs2_find_slot(osb); if (status < 0) { mlog_errno(status); goto leave; } /* load all node-local system inodes */ status = ocfs2_init_local_system_inodes(osb); if (status < 0) { mlog_errno(status); goto leave; } status = ocfs2_check_volume(osb); if (status < 0) { mlog_errno(status); goto leave; } status = ocfs2_truncate_log_init(osb); if (status < 0) mlog_errno(status); leave: if (unlock_super) ocfs2_super_unlock(osb, 1); return status; } static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) { int tmp, hangup_needed = 0; struct ocfs2_super *osb = NULL; char nodestr[8]; trace_ocfs2_dismount_volume(sb); BUG_ON(!sb); osb = OCFS2_SB(sb); BUG_ON(!osb); debugfs_remove(osb->osb_ctxt); /* * Flush inode dropping work queue so that deletes are * performed while the filesystem is still working */ ocfs2_drop_all_dl_inodes(osb); /* Orphan scan should be stopped as early as possible */ ocfs2_orphan_scan_stop(osb); ocfs2_disable_quotas(osb); ocfs2_shutdown_local_alloc(osb); ocfs2_truncate_log_shutdown(osb); /* This will disable recovery and flush any recovery work. */ ocfs2_recovery_exit(osb); ocfs2_journal_shutdown(osb); ocfs2_sync_blockdev(sb); ocfs2_purge_refcount_trees(osb); /* No cluster connection means we've failed during mount, so skip * all the steps which depended on that to complete. */ if (osb->cconn) { tmp = ocfs2_super_lock(osb, 1); if (tmp < 0) { mlog_errno(tmp); return; } } if (osb->slot_num != OCFS2_INVALID_SLOT) ocfs2_put_slot(osb); if (osb->cconn) ocfs2_super_unlock(osb, 1); ocfs2_release_system_inodes(osb); /* * If we're dismounting due to mount error, mount.ocfs2 will clean * up heartbeat. If we're a local mount, there is no heartbeat. * If we failed before we got a uuid_str yet, we can't stop * heartbeat. Otherwise, do it. */ if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str && !ocfs2_is_hard_readonly(osb)) hangup_needed = 1; if (osb->cconn) ocfs2_dlm_shutdown(osb, hangup_needed); ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats); debugfs_remove(osb->osb_debug_root); if (hangup_needed) ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str)); atomic_set(&osb->vol_state, VOLUME_DISMOUNTED); if (ocfs2_mount_local(osb)) snprintf(nodestr, sizeof(nodestr), "local"); else snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num); printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %s)\n", osb->dev_str, nodestr); ocfs2_delete_osb(osb); kfree(osb); sb->s_dev = 0; sb->s_fs_info = NULL; } static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uuid, unsigned uuid_bytes) { int i, ret; char *ptr; BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN); osb->uuid_str = kzalloc(OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL); if (osb->uuid_str == NULL) return -ENOMEM; for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) { /* print with null */ ret = snprintf(ptr, 3, "%02X", uuid[i]); if (ret != 2) /* drop super cleans up */ return -EINVAL; /* then only advance past the last char */ ptr += 2; } return 0; } /* Make sure entire volume is addressable by our journal. Requires osb_clusters_at_boot to be valid and for the journal to have been initialized by ocfs2_journal_init(). */ static int ocfs2_journal_addressable(struct ocfs2_super *osb) { int status = 0; u64 max_block = ocfs2_clusters_to_blocks(osb->sb, osb->osb_clusters_at_boot) - 1; /* 32-bit block number is always OK. */ if (max_block <= (u32)~0ULL) goto out; /* Volume is "huge", so see if our journal is new enough to support it. */ if (!(OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_JBD2_SB) && jbd2_journal_check_used_features(osb->journal->j_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT))) { mlog(ML_ERROR, "The journal cannot address the entire volume. " "Enable the 'block64' journal option with tunefs.ocfs2"); status = -EFBIG; goto out; } out: return status; } static int ocfs2_initialize_super(struct super_block *sb, struct buffer_head *bh, int sector_size, struct ocfs2_blockcheck_stats *stats) { int status; int i, cbits, bbits; struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; struct inode *inode = NULL; struct ocfs2_journal *journal; __le32 uuid_net_key; struct ocfs2_super *osb; u64 total_blocks; osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL); if (!osb) { status = -ENOMEM; mlog_errno(status); goto bail; } sb->s_fs_info = osb; sb->s_op = &ocfs2_sops; sb->s_d_op = &ocfs2_dentry_ops; sb->s_export_op = &ocfs2_export_ops; sb->s_qcop = &ocfs2_quotactl_ops; sb->dq_op = &ocfs2_quota_operations; sb->s_xattr = ocfs2_xattr_handlers; sb->s_time_gran = 1; sb->s_flags |= MS_NOATIME; /* this is needed to support O_LARGEFILE */ cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits); bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits); osb->osb_dx_mask = (1 << (cbits - bbits)) - 1; for (i = 0; i < 3; i++) osb->osb_dx_seed[i] = le32_to_cpu(di->id2.i_super.s_dx_seed[i]); osb->osb_dx_seed[3] = le32_to_cpu(di->id2.i_super.s_uuid_hash); osb->sb = sb; /* Save off for ocfs2_rw_direct */ osb->s_sectsize_bits = blksize_bits(sector_size); BUG_ON(!osb->s_sectsize_bits); spin_lock_init(&osb->dc_task_lock); init_waitqueue_head(&osb->dc_event); osb->dc_work_sequence = 0; osb->dc_wake_sequence = 0; INIT_LIST_HEAD(&osb->blocked_lock_list); osb->blocked_lock_count = 0; spin_lock_init(&osb->osb_lock); spin_lock_init(&osb->osb_xattr_lock); ocfs2_init_steal_slots(osb); atomic_set(&osb->alloc_stats.moves, 0); atomic_set(&osb->alloc_stats.local_data, 0); atomic_set(&osb->alloc_stats.bitmap_data, 0); atomic_set(&osb->alloc_stats.bg_allocs, 0); atomic_set(&osb->alloc_stats.bg_extends, 0); /* Copy the blockcheck stats from the superblock probe */ osb->osb_ecc_stats = *stats; ocfs2_init_node_maps(osb); snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u", MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots); if (osb->max_slots > OCFS2_MAX_SLOTS || osb->max_slots == 0) { mlog(ML_ERROR, "Invalid number of node slots (%u)\n", osb->max_slots); status = -EINVAL; goto bail; } ocfs2_orphan_scan_init(osb); status = ocfs2_recovery_init(osb); if (status) { mlog(ML_ERROR, "Unable to initialize recovery state\n"); mlog_errno(status); goto bail; } init_waitqueue_head(&osb->checkpoint_event); atomic_set(&osb->needs_checkpoint, 0); osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; osb->slot_num = OCFS2_INVALID_SLOT; osb->s_xattr_inline_size = le16_to_cpu( di->id2.i_super.s_xattr_inline_size); osb->local_alloc_state = OCFS2_LA_UNUSED; osb->local_alloc_bh = NULL; INIT_DELAYED_WORK(&osb->la_enable_wq, ocfs2_la_enable_worker); init_waitqueue_head(&osb->osb_mount_event); status = ocfs2_resmap_init(osb, &osb->osb_la_resmap); if (status) { mlog_errno(status); goto bail; } osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL); if (!osb->vol_label) { mlog(ML_ERROR, "unable to alloc vol label\n"); status = -ENOMEM; goto bail; } osb->slot_recovery_generations = kcalloc(osb->max_slots, sizeof(*osb->slot_recovery_generations), GFP_KERNEL); if (!osb->slot_recovery_generations) { status = -ENOMEM; mlog_errno(status); goto bail; } init_waitqueue_head(&osb->osb_wipe_event); osb->osb_orphan_wipes = kcalloc(osb->max_slots, sizeof(*osb->osb_orphan_wipes), GFP_KERNEL); if (!osb->osb_orphan_wipes) { status = -ENOMEM; mlog_errno(status); goto bail; } osb->osb_rf_lock_tree = RB_ROOT; osb->s_feature_compat = le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_compat); osb->s_feature_ro_compat = le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_ro_compat); osb->s_feature_incompat = le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_incompat); if ((i = OCFS2_HAS_INCOMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_INCOMPAT_SUPP))) { mlog(ML_ERROR, "couldn't mount because of unsupported " "optional features (%x).\n", i); status = -EINVAL; goto bail; } if (!(osb->sb->s_flags & MS_RDONLY) && (i = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP))) { mlog(ML_ERROR, "couldn't mount RDWR because of " "unsupported optional features (%x).\n", i); status = -EINVAL; goto bail; } if (ocfs2_clusterinfo_valid(osb)) { osb->osb_stackflags = OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags; memcpy(osb->osb_cluster_stack, OCFS2_RAW_SB(di)->s_cluster_info.ci_stack, OCFS2_STACK_LABEL_LEN); osb->osb_cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) { mlog(ML_ERROR, "couldn't mount because of an invalid " "cluster stack label (%s) \n", osb->osb_cluster_stack); status = -EINVAL; goto bail; } } else { /* The empty string is identical with classic tools that * don't know about s_cluster_info. */ osb->osb_cluster_stack[0] = '\0'; } get_random_bytes(&osb->s_next_generation, sizeof(u32)); /* FIXME * This should be done in ocfs2_journal_init(), but unknown * ordering issues will cause the filesystem to crash. * If anyone wants to figure out what part of the code * refers to osb->journal before ocfs2_journal_init() is run, * be my guest. */ /* initialize our journal structure */ journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL); if (!journal) { mlog(ML_ERROR, "unable to alloc journal\n"); status = -ENOMEM; goto bail; } osb->journal = journal; journal->j_osb = osb; atomic_set(&journal->j_num_trans, 0); init_rwsem(&journal->j_trans_barrier); init_waitqueue_head(&journal->j_checkpointed); spin_lock_init(&journal->j_lock); journal->j_trans_id = (unsigned long) 1; INIT_LIST_HEAD(&journal->j_la_cleanups); INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); journal->j_state = OCFS2_JOURNAL_FREE; INIT_WORK(&osb->dentry_lock_work, ocfs2_drop_dl_inodes); osb->dentry_lock_list = NULL; /* get some pseudo constants for clustersize bits */ osb->s_clustersize_bits = le32_to_cpu(di->id2.i_super.s_clustersize_bits); osb->s_clustersize = 1 << osb->s_clustersize_bits; if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE || osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) { mlog(ML_ERROR, "Volume has invalid cluster size (%d)\n", osb->s_clustersize); status = -EINVAL; goto bail; } total_blocks = ocfs2_clusters_to_blocks(osb->sb, le32_to_cpu(di->i_clusters)); status = generic_check_addressable(osb->sb->s_blocksize_bits, total_blocks); if (status) { mlog(ML_ERROR, "Volume too large " "to mount safely on this system"); status = -EFBIG; goto bail; } if (ocfs2_setup_osb_uuid(osb, di->id2.i_super.s_uuid, sizeof(di->id2.i_super.s_uuid))) { mlog(ML_ERROR, "Out of memory trying to setup our uuid.\n"); status = -ENOMEM; goto bail; } memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key)); strncpy(osb->vol_label, di->id2.i_super.s_label, 63); osb->vol_label[63] = '\0'; osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno); osb->system_dir_blkno = le64_to_cpu(di->id2.i_super.s_system_dir_blkno); osb->first_cluster_group_blkno = le64_to_cpu(di->id2.i_super.s_first_cluster_group); osb->fs_generation = le32_to_cpu(di->i_fs_generation); osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash); trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str, (unsigned long long)osb->root_blkno, (unsigned long long)osb->system_dir_blkno, osb->s_clustersize_bits); osb->osb_dlm_debug = ocfs2_new_dlm_debug(); if (!osb->osb_dlm_debug) { status = -ENOMEM; mlog_errno(status); goto bail; } atomic_set(&osb->vol_state, VOLUME_INIT); /* load root, system_dir, and all global system inodes */ status = ocfs2_init_global_system_inodes(osb); if (status < 0) { mlog_errno(status); goto bail; } /* * global bitmap */ inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!inode) { status = -EINVAL; mlog_errno(status); goto bail; } osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; osb->osb_clusters_at_boot = OCFS2_I(inode)->ip_clusters; iput(inode); osb->bitmap_cpg = ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat) * 8; status = ocfs2_init_slot_info(osb); if (status < 0) { mlog_errno(status); goto bail; } cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb); bail: return status; } /* * will return: -EAGAIN if it is ok to keep searching for superblocks * -EINVAL if there is a bad superblock * 0 on success */ static int ocfs2_verify_volume(struct ocfs2_dinode *di, struct buffer_head *bh, u32 blksz, struct ocfs2_blockcheck_stats *stats) { int status = -EAGAIN; if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { /* We have to do a raw check of the feature here */ if (le32_to_cpu(di->id2.i_super.s_feature_incompat) & OCFS2_FEATURE_INCOMPAT_META_ECC) { status = ocfs2_block_check_validate(bh->b_data, bh->b_size, &di->i_check, stats); if (status) goto out; } status = -EINVAL; if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) { mlog(ML_ERROR, "found superblock with incorrect block " "size: found %u, should be %u\n", 1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits), blksz); } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != OCFS2_MAJOR_REV_LEVEL || le16_to_cpu(di->id2.i_super.s_minor_rev_level) != OCFS2_MINOR_REV_LEVEL) { mlog(ML_ERROR, "found superblock with bad version: " "found %u.%u, should be %u.%u\n", le16_to_cpu(di->id2.i_super.s_major_rev_level), le16_to_cpu(di->id2.i_super.s_minor_rev_level), OCFS2_MAJOR_REV_LEVEL, OCFS2_MINOR_REV_LEVEL); } else if (bh->b_blocknr != le64_to_cpu(di->i_blkno)) { mlog(ML_ERROR, "bad block number on superblock: " "found %llu, should be %llu\n", (unsigned long long)le64_to_cpu(di->i_blkno), (unsigned long long)bh->b_blocknr); } else if (le32_to_cpu(di->id2.i_super.s_clustersize_bits) < 12 || le32_to_cpu(di->id2.i_super.s_clustersize_bits) > 20) { mlog(ML_ERROR, "bad cluster size found: %u\n", 1 << le32_to_cpu(di->id2.i_super.s_clustersize_bits)); } else if (!le64_to_cpu(di->id2.i_super.s_root_blkno)) { mlog(ML_ERROR, "bad root_blkno: 0\n"); } else if (!le64_to_cpu(di->id2.i_super.s_system_dir_blkno)) { mlog(ML_ERROR, "bad system_dir_blkno: 0\n"); } else if (le16_to_cpu(di->id2.i_super.s_max_slots) > OCFS2_MAX_SLOTS) { mlog(ML_ERROR, "Superblock slots found greater than file system " "maximum: found %u, max %u\n", le16_to_cpu(di->id2.i_super.s_max_slots), OCFS2_MAX_SLOTS); } else { /* found it! */ status = 0; } } out: if (status && status != -EAGAIN) mlog_errno(status); return status; } static int ocfs2_check_volume(struct ocfs2_super *osb) { int status; int dirty; int local; struct ocfs2_dinode *local_alloc = NULL; /* only used if we * recover * ourselves. */ /* Init our journal object. */ status = ocfs2_journal_init(osb->journal, &dirty); if (status < 0) { mlog(ML_ERROR, "Could not initialize journal!\n"); goto finally; } /* Now that journal has been initialized, check to make sure entire volume is addressable. */ status = ocfs2_journal_addressable(osb); if (status) goto finally; /* If the journal was unmounted cleanly then we don't want to * recover anything. Otherwise, journal_load will do that * dirty work for us :) */ if (!dirty) { status = ocfs2_journal_wipe(osb->journal, 0); if (status < 0) { mlog_errno(status); goto finally; } } else { printk(KERN_NOTICE "ocfs2: File system on device (%s) was not " "unmounted cleanly, recovering it.\n", osb->dev_str); } local = ocfs2_mount_local(osb); /* will play back anything left in the journal. */ status = ocfs2_journal_load(osb->journal, local, dirty); if (status < 0) { mlog(ML_ERROR, "ocfs2 journal load failed! %d\n", status); goto finally; } if (dirty) { /* recover my local alloc if we didn't unmount cleanly. */ status = ocfs2_begin_local_alloc_recovery(osb, osb->slot_num, &local_alloc); if (status < 0) { mlog_errno(status); goto finally; } /* we complete the recovery process after we've marked * ourselves as mounted. */ } status = ocfs2_load_local_alloc(osb); if (status < 0) { mlog_errno(status); goto finally; } if (dirty) { /* Recovery will be completed after we've mounted the * rest of the volume. */ osb->dirty = 1; osb->local_alloc_copy = local_alloc; local_alloc = NULL; } /* go through each journal, trylock it and if you get the * lock, and it's marked as dirty, set the bit in the recover * map and launch a recovery thread for it. */ status = ocfs2_mark_dead_nodes(osb); if (status < 0) { mlog_errno(status); goto finally; } status = ocfs2_compute_replay_slots(osb); if (status < 0) mlog_errno(status); finally: if (local_alloc) kfree(local_alloc); if (status) mlog_errno(status); return status; } /* * The routine gets called from dismount or close whenever a dismount on * volume is requested and the osb open count becomes 1. * It will remove the osb from the global list and also free up all the * initialized resources and fileobject. */ static void ocfs2_delete_osb(struct ocfs2_super *osb) { /* This function assumes that the caller has the main osb resource */ ocfs2_free_slot_info(osb); kfree(osb->osb_orphan_wipes); kfree(osb->slot_recovery_generations); /* FIXME * This belongs in journal shutdown, but because we have to * allocate osb->journal at the start of ocfs2_initialize_osb(), * we free it here. */ kfree(osb->journal); if (osb->local_alloc_copy) kfree(osb->local_alloc_copy); kfree(osb->uuid_str); ocfs2_put_dlm_debug(osb->osb_dlm_debug); memset(osb, 0, sizeof(struct ocfs2_super)); } /* Put OCFS2 into a readonly state, or (if the user specifies it), * panic(). We do not support continue-on-error operation. */ static void ocfs2_handle_error(struct super_block *sb) { struct ocfs2_super *osb = OCFS2_SB(sb); if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC) panic("OCFS2: (device %s): panic forced after error\n", sb->s_id); ocfs2_set_osb_flag(osb, OCFS2_OSB_ERROR_FS); if (sb->s_flags & MS_RDONLY && (ocfs2_is_soft_readonly(osb) || ocfs2_is_hard_readonly(osb))) return; printk(KERN_CRIT "File system is now read-only due to the potential " "of on-disk corruption. Please run fsck.ocfs2 once the file " "system is unmounted.\n"); sb->s_flags |= MS_RDONLY; ocfs2_set_ro_flag(osb, 0); } static char error_buf[1024]; void __ocfs2_error(struct super_block *sb, const char *function, const char *fmt, ...) { va_list args; va_start(args, fmt); vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); /* Not using mlog here because we want to show the actual * function the error came from. */ printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %s\n", sb->s_id, function, error_buf); ocfs2_handle_error(sb); } /* Handle critical errors. This is intentionally more drastic than * ocfs2_handle_error, so we only use for things like journal errors, * etc. */ void __ocfs2_abort(struct super_block* sb, const char *function, const char *fmt, ...) { va_list args; va_start(args, fmt); vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n", sb->s_id, function, error_buf); /* We don't have the cluster support yet to go straight to * hard readonly in here. Until then, we want to keep * ocfs2_abort() so that we can at least mark critical * errors. * * TODO: This should abort the journal and alert other nodes * that our slot needs recovery. */ /* Force a panic(). This stinks, but it's better than letting * things continue without having a proper hard readonly * here. */ if (!ocfs2_mount_local(OCFS2_SB(sb))) OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; ocfs2_handle_error(sb); } /* * Void signal blockers, because in-kernel sigprocmask() only fails * when SIG_* is wrong. */ void ocfs2_block_signals(sigset_t *oldset) { int rc; sigset_t blocked; sigfillset(&blocked); rc = sigprocmask(SIG_BLOCK, &blocked, oldset); BUG_ON(rc); } void ocfs2_unblock_signals(sigset_t *oldset) { int rc = sigprocmask(SIG_SETMASK, oldset, NULL); BUG_ON(rc); } module_init(ocfs2_init); module_exit(ocfs2_exit);
gpl-2.0
RenderBroken/msm8974_motox2014_render_kernel
arch/frv/kernel/irq-mb93493.c
7345
3617
/* irq-mb93493.c: MB93493 companion chip interrupt handler * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> #include <asm/mb93493-irqs.h> #include <asm/mb93493-regs.h> #define IRQ_ROUTE_ONE(X) (X##_ROUTE << (X - IRQ_BASE_MB93493)) #define IRQ_ROUTING \ (IRQ_ROUTE_ONE(IRQ_MB93493_VDC) | \ IRQ_ROUTE_ONE(IRQ_MB93493_VCC) | \ IRQ_ROUTE_ONE(IRQ_MB93493_AUDIO_OUT) | \ IRQ_ROUTE_ONE(IRQ_MB93493_I2C_0) | \ IRQ_ROUTE_ONE(IRQ_MB93493_I2C_1) | \ IRQ_ROUTE_ONE(IRQ_MB93493_USB) | \ IRQ_ROUTE_ONE(IRQ_MB93493_LOCAL_BUS) | \ IRQ_ROUTE_ONE(IRQ_MB93493_PCMCIA) | \ IRQ_ROUTE_ONE(IRQ_MB93493_GPIO) | \ IRQ_ROUTE_ONE(IRQ_MB93493_AUDIO_IN)) /* * daughter board PIC operations * - there is no way to ACK interrupts in the MB93493 chip */ static void frv_mb93493_mask(struct irq_data *d) { uint32_t iqsr; volatile void *piqsr; if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493))) piqsr = __addr_MB93493_IQSR(1); else piqsr = __addr_MB93493_IQSR(0); iqsr = readl(piqsr); iqsr &= ~(1 << (d->irq - IRQ_BASE_MB93493 + 16)); writel(iqsr, piqsr); } static void frv_mb93493_ack(struct irq_data *d) { } static void frv_mb93493_unmask(struct irq_data *d) { uint32_t iqsr; volatile void *piqsr; if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493))) piqsr = __addr_MB93493_IQSR(1); else piqsr = __addr_MB93493_IQSR(0); iqsr = readl(piqsr); iqsr |= 1 << (d->irq - IRQ_BASE_MB93493 + 16); writel(iqsr, piqsr); } static struct irq_chip frv_mb93493_pic = { .name = "mb93093", .irq_ack = frv_mb93493_ack, .irq_mask = frv_mb93493_mask, .irq_mask_ack = frv_mb93493_mask, .irq_unmask = frv_mb93493_unmask, }; /* * MB93493 PIC interrupt handler */ static irqreturn_t mb93493_interrupt(int irq, void *_piqsr) { volatile void *piqsr = _piqsr; uint32_t iqsr; iqsr = readl(piqsr); iqsr = iqsr & (iqsr >> 16) & 0xffff; /* poll all the triggered IRQs */ while (iqsr) { int irq; asm("scan %1,gr0,%0" : "=r"(irq) : "r"(iqsr)); irq = 31 - irq; iqsr &= ~(1 << irq); generic_handle_irq(IRQ_BASE_MB93493 + irq); } return IRQ_HANDLED; } /* * define an interrupt action for each MB93493 PIC output * - use dev_id to indicate the MB93493 PIC input to output mappings */ static struct irqaction mb93493_irq[2] = { [0] = { .handler = mb93493_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "mb93493.0", .dev_id = (void *) __addr_MB93493_IQSR(0), }, [1] = { .handler = mb93493_interrupt, .flags = IRQF_DISABLED | IRQF_SHARED, .name = "mb93493.1", .dev_id = (void *) __addr_MB93493_IQSR(1), } }; /* * initialise the motherboard MB93493's PIC */ void __init mb93493_init(void) { int irq; for (irq = IRQ_BASE_MB93493 + 0; irq <= IRQ_BASE_MB93493 + 10; irq++) irq_set_chip_and_handler(irq, &frv_mb93493_pic, handle_edge_irq); /* the MB93493 drives external IRQ inputs on the CPU PIC */ setup_irq(IRQ_CPU_MB93493_0, &mb93493_irq[0]); setup_irq(IRQ_CPU_MB93493_1, &mb93493_irq[1]); }
gpl-2.0
GameTheory-/android_kernel_lge_f6mt
drivers/scsi/qla4xxx/ql4_bsg.c
7601
13774
/* * QLogic iSCSI HBA Driver * Copyright (c) 2011 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_bsg.h" static int qla4xxx_read_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t offset = 0; uint32_t length = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_READING; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; length = bsg_job->reply_payload.payload_len; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_flash(ha, flash_dma, offset, length); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, flash, length); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; } static int qla4xxx_update_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t length = 0; uint32_t offset = 0; uint32_t options = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_WRITING; length = bsg_job->request_payload.payload_len; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, flash, length); rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; } static int qla4xxx_get_acb_state(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t status[MBOX_REG_COUNT]; uint32_t acb_idx; uint32_t ip_idx; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (bsg_job->reply_payload.payload_len < sizeof(status)) { ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n", __func__, bsg_job->reply_payload.payload_len); rval = -EINVAL; goto leave; } acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, status, sizeof(status)); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; } static int qla4xxx_read_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 40xx adapters are supported */ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, nvram, len); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; } static int qla4xxx_update_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, nvram, len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; } static int qla4xxx_restore_defaults(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t region = 0; uint32_t field0 = 0; uint32_t field1 = 0; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; } static int qla4xxx_bsg_get_acb(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t acb_type = 0; uint32_t len = 0; dma_addr_t acb_dma; uint8_t *acb = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; if (len < sizeof(struct addr_ctrl_blk)) { ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n", __func__, len); rval = -EINVAL; goto leave; } acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, acb, len); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma); leave: return rval; } /** * qla4xxx_process_vendor_specific - handle vendor specific bsg request * @job: iscsi_bsg_job to handle **/ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) { struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case QLISCSI_VND_READ_FLASH: return qla4xxx_read_flash(bsg_job); case QLISCSI_VND_UPDATE_FLASH: return qla4xxx_update_flash(bsg_job); case QLISCSI_VND_GET_ACB_STATE: return qla4xxx_get_acb_state(bsg_job); case QLISCSI_VND_READ_NVRAM: return qla4xxx_read_nvram(bsg_job); case QLISCSI_VND_UPDATE_NVRAM: return qla4xxx_update_nvram(bsg_job); case QLISCSI_VND_RESTORE_DEFAULTS: return qla4xxx_restore_defaults(bsg_job); case QLISCSI_VND_GET_ACB: return qla4xxx_bsg_get_acb(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " "0x%x\n", __func__, bsg_req->msgcode); bsg_reply->result = (DID_ERROR << 16); bsg_reply->reply_payload_rcv_len = 0; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return -ENOSYS; } } /** * qla4xxx_bsg_request - handle bsg request from ISCSI transport * @job: iscsi_bsg_job to handle */ int qla4xxx_bsg_request(struct bsg_job *bsg_job) { struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->msgcode) { case ISCSI_BSG_HST_VENDOR: return qla4xxx_process_vendor_specific(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n", __func__, bsg_req->msgcode); } return -ENOSYS; }
gpl-2.0
kbehren/android_kernel_lenovo_msm8226
drivers/ide/sis5513.c
8113
18551
/* * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * * * Thanks : * * SiS Taiwan : for direct support and hardware. * Daniela Engert : for initial ATA100 advices and numerous others. * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt : * for checking code correctness, providing patches. * * * Original tests and design on the SiS620 chipset. * ATA100 tests and design on the SiS735 chipset. * ATA16/33 support from specs * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw> * ATA133 961/962/963 fixes by Vojtech Pavlik <vojtech@suse.cz> * * Documentation: * SiS chipset documentation available under NDA to companies only * (not to individuals). */ /* * The original SiS5513 comes from a SiS5511/55112/5513 chipset. The original * SiS5513 was also used in the SiS5596/5513 chipset. Thus if we see a SiS5511 * or SiS5596, we can assume we see the first MWDMA-16 capable SiS5513 chip. * * Later SiS chipsets integrated the 5513 functionality into the NorthBridge, * starting with SiS5571 and up to SiS745. The PCI ID didn't change, though. We * can figure out that we have a more modern and more capable 5513 by looking * for the respective NorthBridge IDs. * * Even later (96x family) SiS chipsets use the MuTIOL link and place the 5513 * into the SouthBrige. Here we cannot rely on looking up the NorthBridge PCI * ID, while the now ATA-133 capable 5513 still has the same PCI ID. * Fortunately the 5513 can be 'unmasked' by fiddling with some config space * bits, changing its device id to the true one - 5517 for 961 and 5518 for * 962/963. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #define DRV_NAME "sis5513" /* registers layout and init values are chipset family dependent */ #define ATA_16 0x01 #define ATA_33 0x02 #define ATA_66 0x03 #define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */ #define ATA_100 0x05 #define ATA_133a 0x06 /* SiS961b with 133 support */ #define ATA_133 0x07 /* SiS962/963 */ static u8 chipset_family; /* * Devices supported */ static const struct { const char *name; u16 host_id; u8 chipset_family; u8 flags; } SiSHostChipInfo[] = { { "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 }, { "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 }, { "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 }, { "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 }, { "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 }, { "SiS733", PCI_DEVICE_ID_SI_733, ATA_100 }, { "SiS635", PCI_DEVICE_ID_SI_635, ATA_100 }, { "SiS633", PCI_DEVICE_ID_SI_633, ATA_100 }, { "SiS730", PCI_DEVICE_ID_SI_730, ATA_100a }, { "SiS550", PCI_DEVICE_ID_SI_550, ATA_100a }, { "SiS640", PCI_DEVICE_ID_SI_640, ATA_66 }, { "SiS630", PCI_DEVICE_ID_SI_630, ATA_66 }, { "SiS620", PCI_DEVICE_ID_SI_620, ATA_66 }, { "SiS540", PCI_DEVICE_ID_SI_540, ATA_66 }, { "SiS530", PCI_DEVICE_ID_SI_530, ATA_66 }, { "SiS5600", PCI_DEVICE_ID_SI_5600, ATA_33 }, { "SiS5598", PCI_DEVICE_ID_SI_5598, ATA_33 }, { "SiS5597", PCI_DEVICE_ID_SI_5597, ATA_33 }, { "SiS5591/2", PCI_DEVICE_ID_SI_5591, ATA_33 }, { "SiS5582", PCI_DEVICE_ID_SI_5582, ATA_33 }, { "SiS5581", PCI_DEVICE_ID_SI_5581, ATA_33 }, { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 }, { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 }, { "SiS5517", PCI_DEVICE_ID_SI_5517, ATA_16 }, { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 }, }; /* Cycle time bits and values vary across chip dma capabilities These three arrays hold the register layout and the values to set. Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */ /* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */ static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 }; static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 }; static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */ { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */ { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific), different cycle_time range and offset */ { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */ { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */ { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */ }; /* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133 See SiS962 data sheet for more detail */ static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 2, 1, 1, 0, 0, 0, 0 }, { 4, 3, 2, 1, 0, 0, 0 }, { 4, 3, 2, 1, 0, 0, 0 }, { 6, 4, 3, 1, 1, 1, 0 }, { 9, 6, 4, 2, 2, 2, 2 }, { 9, 6, 4, 2, 2, 2, 2 }, }; /* Initialize time, Active time, Recovery time vary across IDE clock settings. These 3 arrays hold the register value for PIO0/1/2/3/4 and DMA0/1/2 mode in order */ static u8 ini_time_value[][8] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, { 2, 1, 0, 0, 0, 1, 0, 0 }, { 4, 3, 1, 1, 1, 3, 1, 1 }, { 4, 3, 1, 1, 1, 3, 1, 1 }, { 6, 4, 2, 2, 2, 4, 2, 2 }, { 9, 6, 3, 3, 3, 6, 3, 3 }, { 9, 6, 3, 3, 3, 6, 3, 3 }, }; static u8 act_time_value[][8] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 9, 9, 2, 2, 7, 2, 2 }, { 19, 19, 19, 5, 4, 14, 5, 4 }, { 19, 19, 19, 5, 4, 14, 5, 4 }, { 28, 28, 28, 7, 6, 21, 7, 6 }, { 38, 38, 38, 10, 9, 28, 10, 9 }, { 38, 38, 38, 10, 9, 28, 10, 9 }, }; static u8 rco_time_value[][8] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 2, 0, 2, 0, 7, 1, 1 }, { 19, 5, 1, 5, 2, 16, 3, 2 }, { 19, 5, 1, 5, 2, 16, 3, 2 }, { 30, 9, 3, 9, 4, 25, 6, 4 }, { 40, 12, 4, 12, 5, 34, 12, 5 }, { 40, 12, 4, 12, 5, 34, 12, 5 }, }; /* * Printing configuration */ /* Used for chipset type printing at boot time */ static char *chipset_capability[] = { "ATA", "ATA 16", "ATA 33", "ATA 66", "ATA 100 (1st gen)", "ATA 100 (2nd gen)", "ATA 133 (1st gen)", "ATA 133 (2nd gen)" }; /* * Configuration functions */ static u8 sis_ata133_get_base(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 reg54 = 0; pci_read_config_dword(dev, 0x54, &reg54); return ((reg54 & 0x40000000) ? 0x70 : 0x40) + drive->dn * 4; } static void sis_ata16_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u16 t1 = 0; u8 drive_pci = 0x40 + drive->dn * 2; const u16 pio_timings[] = { 0x000, 0x607, 0x404, 0x303, 0x301 }; const u16 mwdma_timings[] = { 0x008, 0x302, 0x301 }; pci_read_config_word(dev, drive_pci, &t1); /* clear active/recovery timings */ t1 &= ~0x070f; if (mode >= XFER_MW_DMA_0) { if (chipset_family > ATA_16) t1 &= ~0x8000; /* disable UDMA */ t1 |= mwdma_timings[mode - XFER_MW_DMA_0]; } else t1 |= pio_timings[mode - XFER_PIO_0]; pci_write_config_word(dev, drive_pci, t1); } static void sis_ata100_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 t1, drive_pci = 0x40 + drive->dn * 2; /* timing bits: 7:4 active 3:0 recovery */ const u8 pio_timings[] = { 0x00, 0x67, 0x44, 0x33, 0x31 }; const u8 mwdma_timings[] = { 0x08, 0x32, 0x31 }; if (mode >= XFER_MW_DMA_0) { u8 t2 = 0; pci_read_config_byte(dev, drive_pci, &t2); t2 &= ~0x80; /* disable UDMA */ pci_write_config_byte(dev, drive_pci, t2); t1 = mwdma_timings[mode - XFER_MW_DMA_0]; } else t1 = pio_timings[mode - XFER_PIO_0]; pci_write_config_byte(dev, drive_pci + 1, t1); } static void sis_ata133_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 t1 = 0; u8 drive_pci = sis_ata133_get_base(drive), clk, idx; pci_read_config_dword(dev, drive_pci, &t1); t1 &= 0xc0c00fff; clk = (t1 & 0x08) ? ATA_133 : ATA_100; if (mode >= XFER_MW_DMA_0) { t1 &= ~0x04; /* disable UDMA */ idx = mode - XFER_MW_DMA_0 + 5; } else idx = mode - XFER_PIO_0; t1 |= ini_time_value[clk][idx] << 12; t1 |= act_time_value[clk][idx] << 16; t1 |= rco_time_value[clk][idx] << 24; pci_write_config_dword(dev, drive_pci, t1); } static void sis_program_timings(ide_drive_t *drive, const u8 mode) { if (chipset_family < ATA_100) /* ATA_16/33/66/100a */ sis_ata16_program_timings(drive, mode); else if (chipset_family < ATA_133) /* ATA_100/133a */ sis_ata100_program_timings(drive, mode); else /* ATA_133 */ sis_ata133_program_timings(drive, mode); } static void config_drive_art_rwp(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 reg4bh = 0; u8 rw_prefetch = 0; pci_read_config_byte(dev, 0x4b, &reg4bh); rw_prefetch = reg4bh & ~(0x11 << drive->dn); if (drive->media == ide_disk) rw_prefetch |= 0x11 << drive->dn; if (reg4bh != rw_prefetch) pci_write_config_byte(dev, 0x4b, rw_prefetch); } static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { config_drive_art_rwp(drive); sis_program_timings(drive, drive->pio_mode); } static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 regdw = 0; u8 drive_pci = sis_ata133_get_base(drive), clk, idx; pci_read_config_dword(dev, drive_pci, &regdw); regdw |= 0x04; regdw &= 0xfffff00f; /* check if ATA133 enable */ clk = (regdw & 0x08) ? ATA_133 : ATA_100; idx = mode - XFER_UDMA_0; regdw |= cycle_time_value[clk][idx] << 4; regdw |= cvs_time_value[clk][idx] << 8; pci_write_config_dword(dev, drive_pci, regdw); } static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family; pci_read_config_byte(dev, drive_pci + 1, &reg); /* force the UDMA bit on if we want to use UDMA */ reg |= 0x80; /* clean reg cycle time bits */ reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]); /* set reg cycle time bits */ reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i]; pci_write_config_byte(dev, drive_pci + 1, reg); } static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode) { if (chipset_family >= ATA_133) /* ATA_133 */ sis_ata133_program_udma_timings(drive, mode); else /* ATA_33/66/100a/100/133a */ sis_ata33_program_udma_timings(drive, mode); } static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { const u8 speed = drive->dma_mode; if (speed >= XFER_UDMA_0) sis_program_udma_timings(drive, speed); else sis_program_timings(drive, speed); } static u8 sis_ata133_udma_filter(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 regdw = 0; u8 drive_pci = sis_ata133_get_base(drive); pci_read_config_dword(dev, drive_pci, &regdw); /* if ATA133 disable, we should not set speed above UDMA5 */ return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5; } static int __devinit sis_find_family(struct pci_dev *dev) { struct pci_dev *host; int i = 0; chipset_family = 0; for (i = 0; i < ARRAY_SIZE(SiSHostChipInfo) && !chipset_family; i++) { host = pci_get_device(PCI_VENDOR_ID_SI, SiSHostChipInfo[i].host_id, NULL); if (!host) continue; chipset_family = SiSHostChipInfo[i].chipset_family; /* Special case for SiS630 : 630S/ET is ATA_100a */ if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) { if (host->revision >= 0x30) chipset_family = ATA_100a; } pci_dev_put(host); printk(KERN_INFO DRV_NAME " %s: %s %s controller\n", pci_name(dev), SiSHostChipInfo[i].name, chipset_capability[chipset_family]); } if (!chipset_family) { /* Belongs to pci-quirks */ u32 idemisc; u16 trueid; /* Disable ID masking and register remapping */ pci_read_config_dword(dev, 0x54, &idemisc); pci_write_config_dword(dev, 0x54, (idemisc & 0x7fffffff)); pci_read_config_word(dev, PCI_DEVICE_ID, &trueid); pci_write_config_dword(dev, 0x54, idemisc); if (trueid == 0x5518) { printk(KERN_INFO DRV_NAME " %s: SiS 962/963 MuTIOL IDE UDMA133 controller\n", pci_name(dev)); chipset_family = ATA_133; /* Check for 5513 compatibility mapping * We must use this, else the port enabled code will fail, * as it expects the enablebits at 0x4a. */ if ((idemisc & 0x40000000) == 0) { pci_write_config_dword(dev, 0x54, idemisc | 0x40000000); printk(KERN_INFO DRV_NAME " %s: Switching to 5513 register mapping\n", pci_name(dev)); } } } if (!chipset_family) { /* Belongs to pci-quirks */ struct pci_dev *lpc_bridge; u16 trueid; u8 prefctl; u8 idecfg; pci_read_config_byte(dev, 0x4a, &idecfg); pci_write_config_byte(dev, 0x4a, idecfg | 0x10); pci_read_config_word(dev, PCI_DEVICE_ID, &trueid); pci_write_config_byte(dev, 0x4a, idecfg); if (trueid == 0x5517) { /* SiS 961/961B */ lpc_bridge = pci_get_slot(dev->bus, 0x10); /* Bus 0, Dev 2, Fn 0 */ pci_read_config_byte(dev, 0x49, &prefctl); pci_dev_put(lpc_bridge); if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) { printk(KERN_INFO DRV_NAME " %s: SiS 961B MuTIOL IDE UDMA133 controller\n", pci_name(dev)); chipset_family = ATA_133a; } else { printk(KERN_INFO DRV_NAME " %s: SiS 961 MuTIOL IDE UDMA100 controller\n", pci_name(dev)); chipset_family = ATA_100; } } } return chipset_family; } static int init_chipset_sis5513(struct pci_dev *dev) { /* Make general config ops here 1/ tell IDE channels to operate in Compatibility mode only 2/ tell old chips to allow per drive IDE timings */ u8 reg; u16 regw; switch (chipset_family) { case ATA_133: /* SiS962 operation mode */ pci_read_config_word(dev, 0x50, &regw); if (regw & 0x08) pci_write_config_word(dev, 0x50, regw&0xfff7); pci_read_config_word(dev, 0x52, &regw); if (regw & 0x08) pci_write_config_word(dev, 0x52, regw&0xfff7); break; case ATA_133a: case ATA_100: /* Fixup latency */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Set compatibility bit */ pci_read_config_byte(dev, 0x49, &reg); if (!(reg & 0x01)) pci_write_config_byte(dev, 0x49, reg|0x01); break; case ATA_100a: case ATA_66: /* Fixup latency */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10); /* On ATA_66 chips the bit was elsewhere */ pci_read_config_byte(dev, 0x52, &reg); if (!(reg & 0x04)) pci_write_config_byte(dev, 0x52, reg|0x04); break; case ATA_33: /* On ATA_33 we didn't have a single bit to set */ pci_read_config_byte(dev, 0x09, &reg); if ((reg & 0x0f) != 0x00) pci_write_config_byte(dev, 0x09, reg&0xf0); case ATA_16: /* force per drive recovery and active timings needed on ATA_33 and below chips */ pci_read_config_byte(dev, 0x52, &reg); if (!(reg & 0x08)) pci_write_config_byte(dev, 0x52, reg|0x08); break; } return 0; } struct sis_laptop { u16 device; u16 subvendor; u16 subdevice; }; static const struct sis_laptop sis_laptop[] = { /* devid, subvendor, subdev */ { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ { 0x5513, 0x1734, 0x105f }, /* FSC Amilo A1630 */ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ /* end marker */ { 0, } }; static u8 sis_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); const struct sis_laptop *lap = &sis_laptop[0]; u8 ata66 = 0; while (lap->device) { if (lap->device == pdev->device && lap->subvendor == pdev->subsystem_vendor && lap->subdevice == pdev->subsystem_device) return ATA_CBL_PATA40_SHORT; lap++; } if (chipset_family >= ATA_133) { u16 regw = 0; u16 reg_addr = hwif->channel ? 0x52: 0x50; pci_read_config_word(pdev, reg_addr, &regw); ata66 = (regw & 0x8000) ? 0 : 1; } else if (chipset_family >= ATA_66) { u8 reg48h = 0; u8 mask = hwif->channel ? 0x20 : 0x10; pci_read_config_byte(pdev, 0x48, &reg48h); ata66 = (reg48h & mask) ? 0 : 1; } return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; } static const struct ide_port_ops sis_port_ops = { .set_pio_mode = sis_set_pio_mode, .set_dma_mode = sis_set_dma_mode, .cable_detect = sis_cable_detect, }; static const struct ide_port_ops sis_ata133_port_ops = { .set_pio_mode = sis_set_pio_mode, .set_dma_mode = sis_set_dma_mode, .udma_filter = sis_ata133_udma_filter, .cable_detect = sis_cable_detect, }; static const struct ide_port_info sis5513_chipset __devinitdata = { .name = DRV_NAME, .init_chipset = init_chipset_sis5513, .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, .host_flags = IDE_HFLAG_NO_AUTODMA, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }; static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ide_port_info d = sis5513_chipset; u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; int rc; rc = pci_enable_device(dev); if (rc) return rc; if (sis_find_family(dev) == 0) return -ENOTSUPP; if (chipset_family >= ATA_133) d.port_ops = &sis_ata133_port_ops; else d.port_ops = &sis_port_ops; d.udma_mask = udma_rates[chipset_family]; return ide_pci_init_one(dev, &d, NULL); } static void __devexit sis5513_remove(struct pci_dev *dev) { ide_pci_remove(dev); pci_disable_device(dev); } static const struct pci_device_id sis5513_pci_tbl[] = { { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5513), 0 }, { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5518), 0 }, { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_1180), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, sis5513_pci_tbl); static struct pci_driver sis5513_pci_driver = { .name = "SIS_IDE", .id_table = sis5513_pci_tbl, .probe = sis5513_init_one, .remove = __devexit_p(sis5513_remove), .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init sis5513_ide_init(void) { return ide_pci_register_driver(&sis5513_pci_driver); } static void __exit sis5513_ide_exit(void) { pci_unregister_driver(&sis5513_pci_driver); } module_init(sis5513_ide_init); module_exit(sis5513_ide_exit); MODULE_AUTHOR("Lionel Bouton, L C Chang, Andre Hedrick, Vojtech Pavlik"); MODULE_DESCRIPTION("PCI driver module for SIS IDE"); MODULE_LICENSE("GPL");
gpl-2.0
rmbq/bubba_kernel_3.0
drivers/staging/vt6655/card.c
8113
68241
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: card.c * Purpose: Provide functions to setup NIC operation mode * Functions: * s_vSafeResetTx - Rest Tx * CARDvSetRSPINF - Set RSPINF * vUpdateIFS - Update slotTime,SIFS,DIFS, and EIFS * CARDvUpdateBasicTopRate - Update BasicTopRate * CARDbAddBasicRate - Add to BasicRateSet * CARDbSetBasicRate - Set Basic Tx Rate * CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet * CARDvSetLoopbackMode - Set Loopback mode * CARDbSoftwareReset - Sortware reset NIC * CARDqGetTSFOffset - Caculate TSFOffset * CARDbGetCurrentTSF - Read Current NIC TSF counter * CARDqGetNextTBTT - Caculate Next Beacon TSF counter * CARDvSetFirstNextTBTT - Set NIC Beacon time * CARDvUpdateNextTBTT - Sync. NIC Beacon time * CARDbRadioPowerOff - Turn Off NIC Radio Power * CARDbRadioPowerOn - Turn On NIC Radio Power * CARDbSetWEPMode - Set NIC Wep mode * CARDbSetTxPower - Set NIC tx power * * Revision History: * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec. * 08-26-2003 Kyle Hsu: Modify the defination type of dwIoBase. * 09-01-2003 Bryan YC Fan: Add vUpdateIFS(). * */ #include "tmacro.h" #include "card.h" #include "baseband.h" #include "mac.h" #include "desc.h" #include "rf.h" #include "vntwifi.h" #include "power.h" #include "key.h" #include "rc4.h" #include "country.h" #include "channel.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; #define C_SIFS_A 16 // micro sec. #define C_SIFS_BG 10 #define C_EIFS 80 // micro sec. #define C_SLOT_SHORT 9 // micro sec. #define C_SLOT_LONG 20 #define C_CWMIN_A 15 // slot time #define C_CWMIN_B 31 #define C_CWMAX 1023 // slot time #define WAIT_BEACON_TX_DOWN_TMO 3 // Times //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M static unsigned char abyDefaultSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C}; //6M, 9M, 12M, 48M static unsigned char abyDefaultExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60}; //6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M static unsigned char abyDefaultSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C}; //1M, 2M, 5M, 11M, static unsigned char abyDefaultSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16}; /*--------------------- Static Variables --------------------------*/ const unsigned short cwRXBCNTSFOff[MAX_RATE] = {17, 17, 17, 17, 34, 23, 17, 11, 8, 5, 4, 3}; /*--------------------- Static Functions --------------------------*/ static void s_vCaculateOFDMRParameter( unsigned char byRate, CARD_PHY_TYPE ePHYType, unsigned char *pbyTxRate, unsigned char *pbyRsvTime ); /*--------------------- Export Functions --------------------------*/ /* * Description: Caculate TxRate and RsvTime fields for RSPINF in OFDM mode. * * Parameters: * In: * wRate - Tx Rate * byPktType - Tx Packet type * Out: * pbyTxRate - pointer to RSPINF TxRate field * pbyRsvTime - pointer to RSPINF RsvTime field * * Return Value: none * */ static void s_vCaculateOFDMRParameter ( unsigned char byRate, CARD_PHY_TYPE ePHYType, unsigned char *pbyTxRate, unsigned char *pbyRsvTime ) { switch (byRate) { case RATE_6M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x9B; *pbyRsvTime = 44; } else { *pbyTxRate = 0x8B; *pbyRsvTime = 50; } break; case RATE_9M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x9F; *pbyRsvTime = 36; } else { *pbyTxRate = 0x8F; *pbyRsvTime = 42; } break; case RATE_12M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x9A; *pbyRsvTime = 32; } else { *pbyTxRate = 0x8A; *pbyRsvTime = 38; } break; case RATE_18M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x9E; *pbyRsvTime = 28; } else { *pbyTxRate = 0x8E; *pbyRsvTime = 34; } break; case RATE_36M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x9D; *pbyRsvTime = 24; } else { *pbyTxRate = 0x8D; *pbyRsvTime = 30; } break; case RATE_48M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x98; *pbyRsvTime = 24; } else { *pbyTxRate = 0x88; *pbyRsvTime = 30; } break; case RATE_54M : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x9C; *pbyRsvTime = 24; } else { *pbyTxRate = 0x8C; *pbyRsvTime = 30; } break; case RATE_24M : default : if (ePHYType == PHY_TYPE_11A) {//5GHZ *pbyTxRate = 0x99; *pbyRsvTime = 28; } else { *pbyTxRate = 0x89; *pbyRsvTime = 34; } break; } } /* * Description: Set RSPINF * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ static void s_vSetRSPINF (PSDevice pDevice, CARD_PHY_TYPE ePHYType, void *pvSupportRateIEs, void *pvExtSupportRateIEs) { unsigned char byServ = 0, bySignal = 0; // For CCK unsigned short wLen = 0; unsigned char byTxRate = 0, byRsvTime = 0; // For OFDM //Set to Page1 MACvSelectPage1(pDevice->PortOffset); //RSPINF_b_1 BBvCaculateParameter(pDevice, 14, VNTWIFIbyGetACKTxRate(RATE_1M, pvSupportRateIEs, pvExtSupportRateIEs), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_1, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); ///RSPINF_b_2 BBvCaculateParameter(pDevice, 14, VNTWIFIbyGetACKTxRate(RATE_2M, pvSupportRateIEs, pvExtSupportRateIEs), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_2, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); //RSPINF_b_5 BBvCaculateParameter(pDevice, 14, VNTWIFIbyGetACKTxRate(RATE_5M, pvSupportRateIEs, pvExtSupportRateIEs), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_5, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); //RSPINF_b_11 BBvCaculateParameter(pDevice, 14, VNTWIFIbyGetACKTxRate(RATE_11M, pvSupportRateIEs, pvExtSupportRateIEs), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_11, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); //RSPINF_a_6 s_vCaculateOFDMRParameter(RATE_6M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_9 s_vCaculateOFDMRParameter(RATE_9M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_12 s_vCaculateOFDMRParameter(RATE_12M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_18 s_vCaculateOFDMRParameter(RATE_18M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_24 s_vCaculateOFDMRParameter(RATE_24M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_36 s_vCaculateOFDMRParameter( VNTWIFIbyGetACKTxRate(RATE_36M, pvSupportRateIEs, pvExtSupportRateIEs), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_48 s_vCaculateOFDMRParameter( VNTWIFIbyGetACKTxRate(RATE_48M, pvSupportRateIEs, pvExtSupportRateIEs), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_54 s_vCaculateOFDMRParameter( VNTWIFIbyGetACKTxRate(RATE_54M, pvSupportRateIEs, pvExtSupportRateIEs), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_72 VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate,byRsvTime)); //Set to Page0 MACvSelectPage0(pDevice->PortOffset); } /*--------------------- Export Functions --------------------------*/ /* * Description: Card Send packet function * * Parameters: * In: * pDeviceHandler - The adapter to be set * pPacket - Packet buffer pointer * ePktType - Packet type * uLength - Packet length * Out: * none * * Return Value: true if succeeded; false if failed. * */ /* bool CARDbSendPacket (void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, unsigned int uLength) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (ePktType == PKT_TYPE_802_11_MNG) { return TXbTD0Send(pDevice, pPacket, uLength); } else if (ePktType == PKT_TYPE_802_11_BCN) { return TXbBeaconSend(pDevice, pPacket, uLength); } if (ePktType == PKT_TYPE_802_11_DATA) { return TXbTD1Send(pDevice, pPacket, uLength); } return (true); } */ /* * Description: Get Card short preamble option value * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: true if short preamble; otherwise false * */ bool CARDbIsShortPreamble (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (pDevice->byPreambleType == 0) { return(false); } return(true); } /* * Description: Get Card short slot time option value * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: true if short slot time; otherwise false * */ bool CARDbIsShorSlotTime (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; return(pDevice->bShortSlotTime); } /* * Description: Update IFS * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ bool CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, unsigned short wCapInfo, unsigned char byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned char byCWMaxMin = 0; unsigned char bySlot = 0; unsigned char bySIFS = 0; unsigned char byDIFS = 0; unsigned char byData; // PWLAN_IE_SUPP_RATES pRates = NULL; PWLAN_IE_SUPP_RATES pSupportRates = (PWLAN_IE_SUPP_RATES) pvSupportRateIEs; PWLAN_IE_SUPP_RATES pExtSupportRates = (PWLAN_IE_SUPP_RATES) pvExtSupportRateIEs; //Set SIFS, DIFS, EIFS, SlotTime, CwMin if (ePHYType == PHY_TYPE_11A) { if (pSupportRates == NULL) { pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesA; } if (pDevice->byRFType == RF_AIROHA7230) { // AL7230 use single PAPE and connect to PAPE_2.4G MACvSetBBType(pDevice->PortOffset, BB_TYPE_11G); pDevice->abyBBVGA[0] = 0x20; pDevice->abyBBVGA[2] = 0x10; pDevice->abyBBVGA[3] = 0x10; BBbReadEmbeded(pDevice->PortOffset, 0xE7, &byData); if (byData == 0x1C) { BBbWriteEmbeded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]); } } else if (pDevice->byRFType == RF_UW2452) { MACvSetBBType(pDevice->PortOffset, BB_TYPE_11A); pDevice->abyBBVGA[0] = 0x18; BBbReadEmbeded(pDevice->PortOffset, 0xE7, &byData); if (byData == 0x14) { BBbWriteEmbeded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]); BBbWriteEmbeded(pDevice->PortOffset, 0xE1, 0x57); } } else { MACvSetBBType(pDevice->PortOffset, BB_TYPE_11A); } BBbWriteEmbeded(pDevice->PortOffset, 0x88, 0x03); bySlot = C_SLOT_SHORT; bySIFS = C_SIFS_A; byDIFS = C_SIFS_A + 2*C_SLOT_SHORT; byCWMaxMin = 0xA4; } else if (ePHYType == PHY_TYPE_11B) { if (pSupportRates == NULL) { pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesB; } MACvSetBBType(pDevice->PortOffset, BB_TYPE_11B); if (pDevice->byRFType == RF_AIROHA7230) { pDevice->abyBBVGA[0] = 0x1C; pDevice->abyBBVGA[2] = 0x00; pDevice->abyBBVGA[3] = 0x00; BBbReadEmbeded(pDevice->PortOffset, 0xE7, &byData); if (byData == 0x20) { BBbWriteEmbeded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]); } } else if (pDevice->byRFType == RF_UW2452) { pDevice->abyBBVGA[0] = 0x14; BBbReadEmbeded(pDevice->PortOffset, 0xE7, &byData); if (byData == 0x18) { BBbWriteEmbeded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]); BBbWriteEmbeded(pDevice->PortOffset, 0xE1, 0xD3); } } BBbWriteEmbeded(pDevice->PortOffset, 0x88, 0x02); bySlot = C_SLOT_LONG; bySIFS = C_SIFS_BG; byDIFS = C_SIFS_BG + 2*C_SLOT_LONG; byCWMaxMin = 0xA5; } else {// PK_TYPE_11GA & PK_TYPE_11GB if (pSupportRates == NULL) { pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesG; pExtSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultExtSuppRatesG; } MACvSetBBType(pDevice->PortOffset, BB_TYPE_11G); if (pDevice->byRFType == RF_AIROHA7230) { pDevice->abyBBVGA[0] = 0x1C; pDevice->abyBBVGA[2] = 0x00; pDevice->abyBBVGA[3] = 0x00; BBbReadEmbeded(pDevice->PortOffset, 0xE7, &byData); if (byData == 0x20) { BBbWriteEmbeded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]); } } else if (pDevice->byRFType == RF_UW2452) { pDevice->abyBBVGA[0] = 0x14; BBbReadEmbeded(pDevice->PortOffset, 0xE7, &byData); if (byData == 0x18) { BBbWriteEmbeded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]); BBbWriteEmbeded(pDevice->PortOffset, 0xE1, 0xD3); } } BBbWriteEmbeded(pDevice->PortOffset, 0x88, 0x08); bySIFS = C_SIFS_BG; if(VNTWIFIbIsShortSlotTime(wCapInfo)) { bySlot = C_SLOT_SHORT; byDIFS = C_SIFS_BG + 2*C_SLOT_SHORT; } else { bySlot = C_SLOT_LONG; byDIFS = C_SIFS_BG + 2*C_SLOT_LONG; } if (VNTWIFIbyGetMaxSupportRate(pSupportRates, pExtSupportRates) > RATE_11M) { byCWMaxMin = 0xA4; } else { byCWMaxMin = 0xA5; } if (pDevice->bProtectMode != VNTWIFIbIsProtectMode(byERPField)) { pDevice->bProtectMode = VNTWIFIbIsProtectMode(byERPField); if (pDevice->bProtectMode) { MACvEnableProtectMD(pDevice->PortOffset); } else { MACvDisableProtectMD(pDevice->PortOffset); } } if (pDevice->bBarkerPreambleMd != VNTWIFIbIsBarkerMode(byERPField)) { pDevice->bBarkerPreambleMd = VNTWIFIbIsBarkerMode(byERPField); if (pDevice->bBarkerPreambleMd) { MACvEnableBarkerPreambleMd(pDevice->PortOffset); } else { MACvDisableBarkerPreambleMd(pDevice->PortOffset); } } } if (pDevice->byRFType == RF_RFMD2959) { // bcs TX_PE will reserve 3 us // hardware's processing time here is 2 us. bySIFS -= 3; byDIFS -= 3; //{{ RobertYu: 20041202 //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput //// MAC will need 2 us to process, so the SIFS, DIFS can be shorter by 2 us. } if (pDevice->bySIFS != bySIFS) { pDevice->bySIFS = bySIFS; VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, pDevice->bySIFS); } if (pDevice->byDIFS != byDIFS) { pDevice->byDIFS = byDIFS; VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, pDevice->byDIFS); } if (pDevice->byEIFS != C_EIFS) { pDevice->byEIFS = C_EIFS; VNSvOutPortB(pDevice->PortOffset + MAC_REG_EIFS, pDevice->byEIFS); } if (pDevice->bySlot != bySlot) { pDevice->bySlot = bySlot; VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, pDevice->bySlot); if (pDevice->bySlot == C_SLOT_SHORT) { pDevice->bShortSlotTime = true; } else { pDevice->bShortSlotTime = false; } BBvSetShortSlotTime(pDevice); } if (pDevice->byCWMaxMin != byCWMaxMin) { pDevice->byCWMaxMin = byCWMaxMin; VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, pDevice->byCWMaxMin); } if (VNTWIFIbIsShortPreamble(wCapInfo)) { pDevice->byPreambleType = pDevice->byShortPreamble; } else { pDevice->byPreambleType = 0; } s_vSetRSPINF(pDevice, ePHYType, pSupportRates, pExtSupportRates); pDevice->eCurrentPHYType = ePHYType; // set for NDIS OID_802_11SUPPORTED_RATES return (true); } /* * Description: Sync. TSF counter to BSS * Get TSF offset and write to HW * * Parameters: * In: * pDevice - The adapter to be sync. * byRxRate - data rate of receive beacon * qwBSSTimestamp - Rx BCN's TSF * qwLocalTSF - Local TSF * Out: * none * * Return Value: none * */ bool CARDbUpdateTSF (void *pDeviceHandler, unsigned char byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF) { PSDevice pDevice = (PSDevice) pDeviceHandler; QWORD qwTSFOffset; HIDWORD(qwTSFOffset) = 0; LODWORD(qwTSFOffset) = 0; if ((HIDWORD(qwBSSTimestamp) != HIDWORD(qwLocalTSF)) || (LODWORD(qwBSSTimestamp) != LODWORD(qwLocalTSF))) { qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF); // adjust TSF // HW's TSF add TSF Offset reg VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST, LODWORD(qwTSFOffset)); VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, HIDWORD(qwTSFOffset)); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN); } return(true); } /* * Description: Set NIC TSF counter for first Beacon time * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * pDevice - The adapter to be set. * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: true if succeed; otherwise false * */ bool CARDbSetBeaconPeriod (void *pDeviceHandler, unsigned short wBeaconInterval) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int uBeaconInterval = 0; unsigned int uLowNextTBTT = 0; unsigned int uHighRemain = 0; unsigned int uLowRemain = 0; QWORD qwNextTBTT; HIDWORD(qwNextTBTT) = 0; LODWORD(qwNextTBTT) = 0; CARDbGetCurrentTSF(pDevice->PortOffset, &qwNextTBTT); //Get Local TSF counter uBeaconInterval = wBeaconInterval * 1024; // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval uLowNextTBTT = (LODWORD(qwNextTBTT) >> 10) << 10; uLowRemain = (uLowNextTBTT) % uBeaconInterval; // high dword (mod) bcn uHighRemain = (((0xffffffff % uBeaconInterval) + 1) * HIDWORD(qwNextTBTT)) % uBeaconInterval; uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval; uLowRemain = uBeaconInterval - uLowRemain; // check if carry when add one beacon interval if ((~uLowNextTBTT) < uLowRemain) { HIDWORD(qwNextTBTT) ++ ; } LODWORD(qwNextTBTT) = uLowNextTBTT + uLowRemain; // set HW beacon interval VNSvOutPortW(pDevice->PortOffset + MAC_REG_BI, wBeaconInterval); pDevice->wBeaconInterval = wBeaconInterval; // Set NextTBTT VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT, LODWORD(qwNextTBTT)); VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT + 4, HIDWORD(qwNextTBTT)); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); return(true); } /* * Description: Card Stop Hardware Tx * * Parameters: * In: * pDeviceHandler - The adapter to be set * ePktType - Packet type to stop * Out: * none * * Return Value: true if all data packet complete; otherwise false. * */ bool CARDbStopTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (ePktType == PKT_TYPE_802_11_ALL) { pDevice->bStopBeacon = true; pDevice->bStopTx0Pkt = true; pDevice->bStopDataPkt = true; } else if (ePktType == PKT_TYPE_802_11_BCN) { pDevice->bStopBeacon = true; } else if (ePktType == PKT_TYPE_802_11_MNG) { pDevice->bStopTx0Pkt = true; } else if (ePktType == PKT_TYPE_802_11_DATA) { pDevice->bStopDataPkt = true; } if (pDevice->bStopBeacon == true) { if (pDevice->bIsBeaconBufReadySet == true) { if (pDevice->cbBeaconBufReadySetCnt < WAIT_BEACON_TX_DOWN_TMO) { pDevice->cbBeaconBufReadySetCnt ++; return(false); } } pDevice->bIsBeaconBufReadySet = false; pDevice->cbBeaconBufReadySetCnt = 0; MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX); } // wait all TD0 complete if (pDevice->bStopTx0Pkt == true) { if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){ return(false); } } // wait all Data TD complete if (pDevice->bStopDataPkt == true) { if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){ return(false); } } return(true); } /* * Description: Card Start Hardware Tx * * Parameters: * In: * pDeviceHandler - The adapter to be set * ePktType - Packet type to start * Out: * none * * Return Value: true if success; false if failed. * */ bool CARDbStartTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (ePktType == PKT_TYPE_802_11_ALL) { pDevice->bStopBeacon = false; pDevice->bStopTx0Pkt = false; pDevice->bStopDataPkt = false; } else if (ePktType == PKT_TYPE_802_11_BCN) { pDevice->bStopBeacon = false; } else if (ePktType == PKT_TYPE_802_11_MNG) { pDevice->bStopTx0Pkt = false; } else if (ePktType == PKT_TYPE_802_11_DATA) { pDevice->bStopDataPkt = false; } if ((pDevice->bStopBeacon == false) && (pDevice->bBeaconBufReady == true) && (pDevice->eOPMode == OP_MODE_ADHOC)) { MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX); } return(true); } /* * Description: Card Set BSSID value * * Parameters: * In: * pDeviceHandler - The adapter to be set * pbyBSSID - pointer to BSSID field * bAdhoc - flag to indicate IBSS * Out: * none * * Return Value: true if success; false if failed. * */ bool CARDbSetBSSID(void *pDeviceHandler, unsigned char *pbyBSSID, CARD_OP_MODE eOPMode) { PSDevice pDevice = (PSDevice) pDeviceHandler; MACvWriteBSSIDAddress(pDevice->PortOffset, pbyBSSID); memcpy(pDevice->abyBSSID, pbyBSSID, WLAN_BSSID_LEN); if (eOPMode == OP_MODE_ADHOC) { MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC); } else { MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC); } if (eOPMode == OP_MODE_AP) { MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP); } else { MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP); } if (eOPMode == OP_MODE_UNKNOWN) { MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID); pDevice->bBSSIDFilter = false; pDevice->byRxMode &= ~RCR_BSSID; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode ); } else { if (is_zero_ether_addr(pDevice->abyBSSID) == false) { MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID); pDevice->bBSSIDFilter = true; pDevice->byRxMode |= RCR_BSSID; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wmgr: rx_mode = %x\n", pDevice->byRxMode ); } // Adopt BSS state in Adapter Device Object pDevice->eOPMode = eOPMode; return(true); } /* * Description: Card indicate status * * Parameters: * In: * pDeviceHandler - The adapter to be set * eStatus - Status * Out: * none * * Return Value: true if success; false if failed. * */ /* * Description: Save Assoc info. contain in assoc. response frame * * Parameters: * In: * pDevice - The adapter to be set * wCapabilityInfo - Capability information * wStatus - Status code * wAID - Assoc. ID * uLen - Length of IEs * pbyIEs - pointer to IEs * Out: * none * * Return Value: true if succeed; otherwise false * */ bool CARDbSetTxDataRate( void *pDeviceHandler, unsigned short wDataRate ) { PSDevice pDevice = (PSDevice) pDeviceHandler; pDevice->wCurrentRate = wDataRate; return(true); } /*+ * * Routine Description: * Consider to power down when no more packets to tx or rx. * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: true if power down success; otherwise false * -*/ bool CARDbPowerDown( void *pDeviceHandler ) { PSDevice pDevice = (PSDevice)pDeviceHandler; unsigned int uIdx; // check if already in Doze mode if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) return true; // Froce PSEN on MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN); // check if all TD are empty, for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx ++) { if (pDevice->iTDUsed[uIdx] != 0) return false; } MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Go to Doze ZZZZZZZZZZZZZZZ\n"); return true; } /* * Description: Turn off Radio power * * Parameters: * In: * pDevice - The adapter to be turned off * Out: * none * * Return Value: true if success; otherwise false * */ bool CARDbRadioPowerOff (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; bool bResult = true; if (pDevice->bRadioOff == true) return true; switch (pDevice->byRFType) { case RF_RFMD2959: MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV); MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1); break; case RF_AIROHA: case RF_AL2230S: case RF_AIROHA7230: //RobertYu:20050104 MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2); MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); break; } MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON); BBvSetDeepSleep(pDevice->PortOffset, pDevice->byLocalID); pDevice->bRadioOff = true; //2007-0409-03,<Add> by chester printk("chester power off\n"); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue return bResult; } /* * Description: Turn on Radio power * * Parameters: * In: * pDevice - The adapter to be turned on * Out: * none * * Return Value: true if success; otherwise false * */ bool CARDbRadioPowerOn (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; bool bResult = true; printk("chester power on\n"); if (pDevice->bRadioControlOff == true){ if (pDevice->bHWRadioOff == true) printk("chester bHWRadioOff\n"); if (pDevice->bRadioControlOff == true) printk("chester bRadioControlOff\n"); return false;} if (pDevice->bRadioOff == false) { printk("chester pbRadioOff\n"); return true;} BBvExitDeepSleep(pDevice->PortOffset, pDevice->byLocalID); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON); switch (pDevice->byRFType) { case RF_RFMD2959: MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV); MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1); break; case RF_AIROHA: case RF_AL2230S: case RF_AIROHA7230: //RobertYu:20050104 MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3)); break; } pDevice->bRadioOff = false; // 2007-0409-03,<Add> by chester printk("chester power on\n"); MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue return bResult; } bool CARDbRemoveKey (void *pDeviceHandler, unsigned char *pbyBSSID) { PSDevice pDevice = (PSDevice) pDeviceHandler; KeybRemoveAllKey(&(pDevice->sKey), pbyBSSID, pDevice->PortOffset); return (true); } /* * * Description: * Add BSSID in PMKID Candidate list. * * Parameters: * In: * hDeviceContext - device structure point * pbyBSSID - BSSID address for adding * wRSNCap - BSS's RSN capability * Out: * none * * Return Value: none. * -*/ bool CARDbAdd_PMKID_Candidate ( void *pDeviceHandler, unsigned char *pbyBSSID, bool bRSNCapExist, unsigned short wRSNCap ) { PSDevice pDevice = (PSDevice) pDeviceHandler; PPMKID_CANDIDATE pCandidateList; unsigned int ii = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate START: (%d)\n", (int)pDevice->gsPMKIDCandidate.NumCandidates); if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFlush_PMKID_Candidate: 3\n"); memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent)); } for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02X ", *(pbyBSSID + ii)); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); // Update Old Candidate for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) { pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii]; if ( !memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) { if ((bRSNCapExist == true) && (wRSNCap & BIT0)) { pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED; } else { pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED); } return true; } } // New Candidate pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates]; if ((bRSNCapExist == true) && (wRSNCap & BIT0)) { pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED; } else { pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED); } memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN); pDevice->gsPMKIDCandidate.NumCandidates++; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates); return true; } void * CARDpGetCurrentAddress ( void *pDeviceHandler ) { PSDevice pDevice = (PSDevice) pDeviceHandler; return (pDevice->abyCurrentNetAddr); } /* * * Description: * Start Spectrum Measure defined in 802.11h * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ bool CARDbStartMeasure ( void *pDeviceHandler, void *pvMeasureEIDs, unsigned int uNumOfMeasureEIDs ) { PSDevice pDevice = (PSDevice) pDeviceHandler; PWLAN_IE_MEASURE_REQ pEID = (PWLAN_IE_MEASURE_REQ) pvMeasureEIDs; QWORD qwCurrTSF; QWORD qwStartTSF; bool bExpired = true; unsigned short wDuration = 0; if ((pEID == NULL) || (uNumOfMeasureEIDs == 0)) { return (true); } CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF); if (pDevice->bMeasureInProgress == true) { pDevice->bMeasureInProgress = false; VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR); MACvSelectPage1(pDevice->PortOffset); VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0); VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4); // clear measure control MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN); MACvSelectPage0(pDevice->PortOffset); set_channel(pDevice, pDevice->byOrgChannel); MACvSelectPage1(pDevice->PortOffset); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE); MACvSelectPage0(pDevice->PortOffset); } pDevice->uNumOfMeasureEIDs = uNumOfMeasureEIDs; do { pDevice->pCurrMeasureEID = pEID; pEID++; pDevice->uNumOfMeasureEIDs--; if (pDevice->byLocalID > REV_ID_VT3253_B1) { HIDWORD(qwStartTSF) = HIDWORD(*((PQWORD) (pDevice->pCurrMeasureEID->sReq.abyStartTime))); LODWORD(qwStartTSF) = LODWORD(*((PQWORD) (pDevice->pCurrMeasureEID->sReq.abyStartTime))); wDuration = *((unsigned short *) (pDevice->pCurrMeasureEID->sReq.abyDuration)); wDuration += 1; // 1 TU for channel switching if ((LODWORD(qwStartTSF) == 0) && (HIDWORD(qwStartTSF) == 0)) { // start immediately by setting start TSF == current TSF + 2 TU LODWORD(qwStartTSF) = LODWORD(qwCurrTSF) + 2048; HIDWORD(qwStartTSF) = HIDWORD(qwCurrTSF); if (LODWORD(qwCurrTSF) > LODWORD(qwStartTSF)) { HIDWORD(qwStartTSF)++; } bExpired = false; break; } else { // start at setting start TSF - 1TU(for channel switching) if (LODWORD(qwStartTSF) < 1024) { HIDWORD(qwStartTSF)--; } LODWORD(qwStartTSF) -= 1024; } if ((HIDWORD(qwCurrTSF) < HIDWORD(qwStartTSF)) || ((HIDWORD(qwCurrTSF) == HIDWORD(qwStartTSF)) && (LODWORD(qwCurrTSF) < LODWORD(qwStartTSF))) ) { bExpired = false; break; } VNTWIFIbMeasureReport( pDevice->pMgmt, false, pDevice->pCurrMeasureEID, MEASURE_MODE_LATE, pDevice->byBasicMap, pDevice->byCCAFraction, pDevice->abyRPIs ); } else { // hardware do not support measure VNTWIFIbMeasureReport( pDevice->pMgmt, false, pDevice->pCurrMeasureEID, MEASURE_MODE_INCAPABLE, pDevice->byBasicMap, pDevice->byCCAFraction, pDevice->abyRPIs ); } } while (pDevice->uNumOfMeasureEIDs != 0); if (bExpired == false) { MACvSelectPage1(pDevice->PortOffset); VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART, LODWORD(qwStartTSF)); VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART + 4, HIDWORD(qwStartTSF)); VNSvOutPortW(pDevice->PortOffset + MAC_REG_MSRDURATION, wDuration); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN); MACvSelectPage0(pDevice->PortOffset); } else { // all measure start time expired we should complete action VNTWIFIbMeasureReport( pDevice->pMgmt, true, NULL, 0, pDevice->byBasicMap, pDevice->byCCAFraction, pDevice->abyRPIs ); } return (true); } /* * * Description: * Do Channel Switch defined in 802.11h * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ bool CARDbChannelSwitch ( void *pDeviceHandler, unsigned char byMode, unsigned char byNewChannel, unsigned char byCount ) { PSDevice pDevice = (PSDevice) pDeviceHandler; bool bResult = true; if (byCount == 0) { bResult = set_channel(pDevice, byNewChannel); VNTWIFIbChannelSwitch(pDevice->pMgmt, byNewChannel); MACvSelectPage1(pDevice->PortOffset); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE); MACvSelectPage0(pDevice->PortOffset); return(bResult); } pDevice->byChannelSwitchCount = byCount; pDevice->byNewChannel = byNewChannel; pDevice->bChannelSwitch = true; if (byMode == 1) { bResult=CARDbStopTxPacket(pDevice, PKT_TYPE_802_11_ALL); } return (bResult); } /* * * Description: * Handle Quiet EID defined in 802.11h * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ bool CARDbSetQuiet ( void *pDeviceHandler, bool bResetQuiet, unsigned char byQuietCount, unsigned char byQuietPeriod, unsigned short wQuietDuration, unsigned short wQuietOffset ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii = 0; if (bResetQuiet == true) { MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN)); for(ii=0;ii<MAX_QUIET_COUNT;ii++) { pDevice->sQuiet[ii].bEnable = false; } pDevice->uQuietEnqueue = 0; pDevice->bEnableFirstQuiet = false; pDevice->bQuietEnable = false; pDevice->byQuietStartCount = byQuietCount; } if (pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable == false) { pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable = true; pDevice->sQuiet[pDevice->uQuietEnqueue].byPeriod = byQuietPeriod; pDevice->sQuiet[pDevice->uQuietEnqueue].wDuration = wQuietDuration; pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime = (unsigned long) byQuietCount; pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime *= pDevice->wBeaconInterval; pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime += wQuietOffset; pDevice->uQuietEnqueue++; pDevice->uQuietEnqueue %= MAX_QUIET_COUNT; if (pDevice->byQuietStartCount < byQuietCount) { pDevice->byQuietStartCount = byQuietCount; } } else { // we can not handle Quiet EID more } return (true); } /* * * Description: * Do Quiet, It will called by either ISR (after start) or VNTWIFI (before start) so do not need SPINLOCK * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ bool CARDbStartQuiet ( void *pDeviceHandler ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii = 0; unsigned long dwStartTime = 0xFFFFFFFF; unsigned int uCurrentQuietIndex = 0; unsigned long dwNextTime = 0; unsigned long dwGap = 0; unsigned long dwDuration = 0; for(ii=0;ii<MAX_QUIET_COUNT;ii++) { if ((pDevice->sQuiet[ii].bEnable == true) && (dwStartTime > pDevice->sQuiet[ii].dwStartTime)) { dwStartTime = pDevice->sQuiet[ii].dwStartTime; uCurrentQuietIndex = ii; } } if (dwStartTime == 0xFFFFFFFF) { // no more quiet pDevice->bQuietEnable = false; MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN)); } else { if (pDevice->bQuietEnable == false) { // first quiet pDevice->byQuietStartCount--; dwNextTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime; dwNextTime %= pDevice->wBeaconInterval; MACvSelectPage1(pDevice->PortOffset); VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETINIT, (unsigned short) dwNextTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) pDevice->sQuiet[uCurrentQuietIndex].wDuration); if (pDevice->byQuietStartCount == 0) { pDevice->bEnableFirstQuiet = false; MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN)); } else { pDevice->bEnableFirstQuiet = true; } MACvSelectPage0(pDevice->PortOffset); } else { if (pDevice->dwCurrentQuietEndTime > pDevice->sQuiet[uCurrentQuietIndex].dwStartTime) { // overlap with previous Quiet dwGap = pDevice->dwCurrentQuietEndTime - pDevice->sQuiet[uCurrentQuietIndex].dwStartTime; if (dwGap >= pDevice->sQuiet[uCurrentQuietIndex].wDuration) { // return false to indicate next quiet expired, should call this function again return (false); } dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration - dwGap; dwGap = 0; } else { dwGap = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime - pDevice->dwCurrentQuietEndTime; dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration; } // set GAP and Next duration MACvSelectPage1(pDevice->PortOffset); VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETGAP, (unsigned short) dwGap); VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) dwDuration); MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_QUIETRPT); MACvSelectPage0(pDevice->PortOffset); } pDevice->bQuietEnable = true; pDevice->dwCurrentQuietEndTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime; pDevice->dwCurrentQuietEndTime += pDevice->sQuiet[uCurrentQuietIndex].wDuration; if (pDevice->sQuiet[uCurrentQuietIndex].byPeriod == 0) { // not period disable current quiet element pDevice->sQuiet[uCurrentQuietIndex].bEnable = false; } else { // set next period start time dwNextTime = (unsigned long) pDevice->sQuiet[uCurrentQuietIndex].byPeriod; dwNextTime *= pDevice->wBeaconInterval; pDevice->sQuiet[uCurrentQuietIndex].dwStartTime = dwNextTime; } if (pDevice->dwCurrentQuietEndTime > 0x80010000) { // decreament all time to avoid wrap around for(ii=0;ii<MAX_QUIET_COUNT;ii++) { if (pDevice->sQuiet[ii].bEnable == true) { pDevice->sQuiet[ii].dwStartTime -= 0x80000000; } } pDevice->dwCurrentQuietEndTime -= 0x80000000; } } return (true); } /* * * Description: * Set Local Power Constraint * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ void CARDvSetPowerConstraint ( void *pDeviceHandler, unsigned char byChannel, char byPower ) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (byChannel > CB_MAX_CHANNEL_24G) { if (pDevice->bCountryInfo5G == true) { pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower; } } else { if (pDevice->bCountryInfo24G == true) { pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower; } } } /* * * Description: * Set Local Power Constraint * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ void CARDvGetPowerCapability ( void *pDeviceHandler, unsigned char *pbyMinPower, unsigned char *pbyMaxPower ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned char byDec = 0; *pbyMaxPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh]; byDec = pDevice->abyOFDMPwrTbl[pDevice->byCurrentCh]; if (pDevice->byRFType == RF_UW2452) { byDec *= 3; byDec >>= 1; } else { byDec <<= 1; } *pbyMinPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh] - byDec; } /* * * Description: * Get Current Tx Power * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * */ char CARDbyGetTransmitPower ( void *pDeviceHandler ) { PSDevice pDevice = (PSDevice) pDeviceHandler; return (pDevice->byCurPwrdBm); } //xxx void CARDvSafeResetTx ( void *pDeviceHandler ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int uu; PSTxDesc pCurrTD; // initialize TD index pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]); pDevice->apTailTD[1] = pDevice->apCurrTD[1] = &(pDevice->apTD1Rings[0]); for (uu = 0; uu < TYPE_MAXTD; uu ++) pDevice->iTDUsed[uu] = 0; for (uu = 0; uu < pDevice->sOpts.nTxDescs[0]; uu++) { pCurrTD = &(pDevice->apTD0Rings[uu]); pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST; // init all Tx Packet pointer to NULL } for (uu = 0; uu < pDevice->sOpts.nTxDescs[1]; uu++) { pCurrTD = &(pDevice->apTD1Rings[uu]); pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST; // init all Tx Packet pointer to NULL } // set MAC TD pointer MACvSetCurrTXDescAddr(TYPE_TXDMA0, pDevice->PortOffset, (pDevice->td0_pool_dma)); MACvSetCurrTXDescAddr(TYPE_AC0DMA, pDevice->PortOffset, (pDevice->td1_pool_dma)); // set MAC Beacon TX pointer MACvSetCurrBCNTxDescAddr(pDevice->PortOffset, (pDevice->tx_beacon_dma)); } /*+ * * Description: * Reset Rx * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: none * -*/ void CARDvSafeResetRx ( void *pDeviceHandler ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int uu; PSRxDesc pDesc; // initialize RD index pDevice->pCurrRD[0]=&(pDevice->aRD0Ring[0]); pDevice->pCurrRD[1]=&(pDevice->aRD1Ring[0]); // init state, all RD is chip's for (uu = 0; uu < pDevice->sOpts.nRxDescs0; uu++) { pDesc =&(pDevice->aRD0Ring[uu]); pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz); pDesc->m_rd0RD0.f1Owner=OWNED_BY_NIC; pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz); } // init state, all RD is chip's for (uu = 0; uu < pDevice->sOpts.nRxDescs1; uu++) { pDesc =&(pDevice->aRD1Ring[uu]); pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz); pDesc->m_rd0RD0.f1Owner=OWNED_BY_NIC; pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz); } pDevice->cbDFCB = CB_MAX_RX_FRAG; pDevice->cbFreeDFCB = pDevice->cbDFCB; // set perPkt mode MACvRx0PerPktMode(pDevice->PortOffset); MACvRx1PerPktMode(pDevice->PortOffset); // set MAC RD pointer MACvSetCurrRx0DescAddr(pDevice->PortOffset, pDevice->rd0_pool_dma); MACvSetCurrRx1DescAddr(pDevice->PortOffset, pDevice->rd1_pool_dma); } /* * Description: Get response Control frame rate in CCK mode * * Parameters: * In: * pDevice - The adapter to be set * wRateIdx - Receiving data rate * Out: * none * * Return Value: response Control frame rate * */ unsigned short CARDwGetCCKControlRate(void *pDeviceHandler, unsigned short wRateIdx) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ui = (unsigned int) wRateIdx; while (ui > RATE_1M) { if (pDevice->wBasicRate & ((unsigned short)1 << ui)) { return (unsigned short)ui; } ui --; } return (unsigned short)RATE_1M; } /* * Description: Get response Control frame rate in OFDM mode * * Parameters: * In: * pDevice - The adapter to be set * wRateIdx - Receiving data rate * Out: * none * * Return Value: response Control frame rate * */ unsigned short CARDwGetOFDMControlRate (void *pDeviceHandler, unsigned short wRateIdx) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ui = (unsigned int) wRateIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate); if (!CARDbIsOFDMinBasicRate((void *)pDevice)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDwGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); if (wRateIdx > RATE_24M) wRateIdx = RATE_24M; return wRateIdx; } while (ui > RATE_11M) { if (pDevice->wBasicRate & ((unsigned short)1 << ui)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDwGetOFDMControlRate : %d\n", ui); return (unsigned short)ui; } ui --; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDwGetOFDMControlRate: 6M\n"); return (unsigned short)RATE_24M; } /* * Description: Set RSPINF * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ void CARDvSetRSPINF (void *pDeviceHandler, CARD_PHY_TYPE ePHYType) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned char byServ = 0x00, bySignal = 0x00; //For CCK unsigned short wLen = 0x0000; unsigned char byTxRate, byRsvTime; //For OFDM //Set to Page1 MACvSelectPage1(pDevice->PortOffset); //RSPINF_b_1 BBvCaculateParameter(pDevice, 14, CARDwGetCCKControlRate((void *)pDevice, RATE_1M), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_1, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); ///RSPINF_b_2 BBvCaculateParameter(pDevice, 14, CARDwGetCCKControlRate((void *)pDevice, RATE_2M), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_2, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); //RSPINF_b_5 BBvCaculateParameter(pDevice, 14, CARDwGetCCKControlRate((void *)pDevice, RATE_5M), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_5, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); //RSPINF_b_11 BBvCaculateParameter(pDevice, 14, CARDwGetCCKControlRate((void *)pDevice, RATE_11M), PK_TYPE_11B, &wLen, &byServ, &bySignal ); VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_11, MAKEDWORD(wLen,MAKEWORD(bySignal,byServ))); //RSPINF_a_6 s_vCaculateOFDMRParameter(RATE_6M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_9 s_vCaculateOFDMRParameter(RATE_9M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_12 s_vCaculateOFDMRParameter(RATE_12M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_18 s_vCaculateOFDMRParameter(RATE_18M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_24 s_vCaculateOFDMRParameter(RATE_24M, ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_36 s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_36M), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_48 s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_48M), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_54 s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate,byRsvTime)); //RSPINF_a_72 s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M), ePHYType, &byTxRate, &byRsvTime); VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate,byRsvTime)); //Set to Page0 MACvSelectPage0(pDevice->PortOffset); } /* * Description: Update IFS * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ void vUpdateIFS (void *pDeviceHandler) { //Set SIFS, DIFS, EIFS, SlotTime, CwMin PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned char byMaxMin = 0; if (pDevice->byPacketType==PK_TYPE_11A) {//0000 0000 0000 0000,11a pDevice->uSlot = C_SLOT_SHORT; pDevice->uSIFS = C_SIFS_A; pDevice->uDIFS = C_SIFS_A + 2*C_SLOT_SHORT; pDevice->uCwMin = C_CWMIN_A; byMaxMin = 4; } else if (pDevice->byPacketType==PK_TYPE_11B) {//0000 0001 0000 0000,11b pDevice->uSlot = C_SLOT_LONG; pDevice->uSIFS = C_SIFS_BG; pDevice->uDIFS = C_SIFS_BG + 2*C_SLOT_LONG; pDevice->uCwMin = C_CWMIN_B; byMaxMin = 5; } else { // PK_TYPE_11GA & PK_TYPE_11GB pDevice->uSIFS = C_SIFS_BG; if (pDevice->bShortSlotTime) { pDevice->uSlot = C_SLOT_SHORT; } else { pDevice->uSlot = C_SLOT_LONG; } pDevice->uDIFS = C_SIFS_BG + 2*pDevice->uSlot; if (pDevice->wBasicRate & 0x0150) { //0000 0001 0101 0000,24M,12M,6M pDevice->uCwMin = C_CWMIN_A; byMaxMin = 4; } else { pDevice->uCwMin = C_CWMIN_B; byMaxMin = 5; } } pDevice->uCwMax = C_CWMAX; pDevice->uEIFS = C_EIFS; if (pDevice->byRFType == RF_RFMD2959) { // bcs TX_PE will reserve 3 us VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)(pDevice->uSIFS - 3)); VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)(pDevice->uDIFS - 3)); } else { VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)pDevice->uSIFS); VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)pDevice->uDIFS); } VNSvOutPortB(pDevice->PortOffset + MAC_REG_EIFS, (unsigned char)pDevice->uEIFS); VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, (unsigned char)pDevice->uSlot); byMaxMin |= 0xA0;//1010 1111,C_CWMAX = 1023 VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, (unsigned char)byMaxMin); } void CARDvUpdateBasicTopRate (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned char byTopOFDM = RATE_24M, byTopCCK = RATE_1M; unsigned char ii; //Determines the highest basic rate. for (ii = RATE_54M; ii >= RATE_6M; ii --) { if ( (pDevice->wBasicRate) & ((unsigned short)(1<<ii)) ) { byTopOFDM = ii; break; } } pDevice->byTopOFDMBasicRate = byTopOFDM; for (ii = RATE_11M;; ii --) { if ( (pDevice->wBasicRate) & ((unsigned short)(1<<ii)) ) { byTopCCK = ii; break; } if (ii == RATE_1M) break; } pDevice->byTopCCKBasicRate = byTopCCK; } /* * Description: Set NIC Tx Basic Rate * * Parameters: * In: * pDevice - The adapter to be set * wBasicRate - Basic Rate to be set * Out: * none * * Return Value: true if succeeded; false if failed. * */ bool CARDbAddBasicRate (void *pDeviceHandler, unsigned short wRateIdx) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned short wRate = (unsigned short)(1<<wRateIdx); pDevice->wBasicRate |= wRate; //Determines the highest basic rate. CARDvUpdateBasicTopRate((void *)pDevice); return(true); } bool CARDbIsOFDMinBasicRate (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; int ii; for (ii = RATE_54M; ii >= RATE_6M; ii --) { if ((pDevice->wBasicRate) & ((unsigned short)(1<<ii))) return true; } return false; } unsigned char CARDbyGetPktType (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (pDevice->byBBType == BB_TYPE_11A || pDevice->byBBType == BB_TYPE_11B) { return (unsigned char)pDevice->byBBType; } else if (CARDbIsOFDMinBasicRate((void *)pDevice)) { return PK_TYPE_11GA; } else { return PK_TYPE_11GB; } } /* * Description: Set NIC Loopback mode * * Parameters: * In: * pDevice - The adapter to be set * wLoopbackMode - Loopback mode to be set * Out: * none * * Return Value: none * */ void CARDvSetLoopbackMode (unsigned long dwIoBase, unsigned short wLoopbackMode) { switch(wLoopbackMode) { case CARD_LB_NONE: case CARD_LB_MAC: case CARD_LB_PHY: break; default: ASSERT(false); break; } // set MAC loopback MACvSetLoopbackMode(dwIoBase, LOBYTE(wLoopbackMode)); // set Baseband loopback } /* * Description: Software Reset NIC * * Parameters: * In: * pDevice - The adapter to be reset * Out: * none * * Return Value: none * */ bool CARDbSoftwareReset (void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; // reset MAC if (!MACbSafeSoftwareReset(pDevice->PortOffset)) return false; return true; } /* * Description: Caculate TSF offset of two TSF input * Get TSF Offset from RxBCN's TSF and local TSF * * Parameters: * In: * pDevice - The adapter to be sync. * qwTSF1 - Rx BCN's TSF * qwTSF2 - Local TSF * Out: * none * * Return Value: TSF Offset value * */ QWORD CARDqGetTSFOffset (unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2) { QWORD qwTSFOffset; unsigned short wRxBcnTSFOffst= 0; HIDWORD(qwTSFOffset) = 0; LODWORD(qwTSFOffset) = 0; wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate%MAX_RATE]; (qwTSF2).u.dwLowDword += (unsigned long)(wRxBcnTSFOffst); if ((qwTSF2).u.dwLowDword < (unsigned long)(wRxBcnTSFOffst)) { (qwTSF2).u.dwHighDword++; } LODWORD(qwTSFOffset) = LODWORD(qwTSF1) - LODWORD(qwTSF2); if (LODWORD(qwTSF1) < LODWORD(qwTSF2)) { // if borrow needed HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2) - 1 ; } else { HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2); }; return (qwTSFOffset); } /* * Description: Read NIC TSF counter * Get local TSF counter * * Parameters: * In: * pDevice - The adapter to be read * Out: * qwCurrTSF - Current TSF counter * * Return Value: true if success; otherwise false * */ bool CARDbGetCurrentTSF (unsigned long dwIoBase, PQWORD pqwCurrTSF) { unsigned short ww; unsigned char byData; MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { VNSvInPortB(dwIoBase + MAC_REG_TFTCTL, &byData); if ( !(byData & TFTCTL_TSFCNTRRD)) break; } if (ww == W_MAX_TIMEOUT) return(false); VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, &LODWORD(*pqwCurrTSF)); VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, &HIDWORD(*pqwCurrTSF)); return(true); } /* * Description: Read NIC TSF counter * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * qwTSF - Current TSF counter * wbeaconInterval - Beacon Interval * Out: * qwCurrTSF - Current TSF counter * * Return Value: TSF value of next Beacon * */ QWORD CARDqGetNextTBTT (QWORD qwTSF, unsigned short wBeaconInterval) { unsigned int uLowNextTBTT; unsigned int uHighRemain, uLowRemain; unsigned int uBeaconInterval; uBeaconInterval = wBeaconInterval * 1024; // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval uLowNextTBTT = (LODWORD(qwTSF) >> 10) << 10; // low dword (mod) bcn uLowRemain = (uLowNextTBTT) % uBeaconInterval; // uHighRemain = ((0x80000000 % uBeaconInterval)* 2 * HIDWORD(qwTSF)) // % uBeaconInterval; // high dword (mod) bcn uHighRemain = (((0xffffffff % uBeaconInterval) + 1) * HIDWORD(qwTSF)) % uBeaconInterval; uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval; uLowRemain = uBeaconInterval - uLowRemain; // check if carry when add one beacon interval if ((~uLowNextTBTT) < uLowRemain) HIDWORD(qwTSF) ++ ; LODWORD(qwTSF) = uLowNextTBTT + uLowRemain; return (qwTSF); } /* * Description: Set NIC TSF counter for first Beacon time * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * dwIoBase - IO Base * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: none * */ void CARDvSetFirstNextTBTT (unsigned long dwIoBase, unsigned short wBeaconInterval) { QWORD qwNextTBTT; HIDWORD(qwNextTBTT) = 0; LODWORD(qwNextTBTT) = 0; CARDbGetCurrentTSF(dwIoBase, &qwNextTBTT); //Get Local TSF counter qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); // Set NextTBTT VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, LODWORD(qwNextTBTT)); VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, HIDWORD(qwNextTBTT)); MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Card:First Next TBTT[%8xh:%8xh] \n", HIDWORD(qwNextTBTT), LODWORD(qwNextTBTT)); return; } /* * Description: Sync NIC TSF counter for Beacon time * Get NEXTTBTT and write to HW * * Parameters: * In: * pDevice - The adapter to be set * qwTSF - Current TSF counter * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: none * */ void CARDvUpdateNextTBTT (unsigned long dwIoBase, QWORD qwTSF, unsigned short wBeaconInterval) { qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval); // Set NextTBTT VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, LODWORD(qwTSF)); VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, HIDWORD(qwTSF)); MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Card:Update Next TBTT[%8xh:%8xh] \n", (unsigned int) HIDWORD(qwTSF), (unsigned int) LODWORD(qwTSF)); return; }
gpl-2.0
sunios/android_kernel_xiaomi_msm8x74pro
drivers/staging/usbip/userspace/src/usbip_unbind.c
8369
4583
/* * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <sysfs/libsysfs.h> #include <errno.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include "usbip_common.h" #include "utils.h" #include "usbip.h" static const char usbip_unbind_usage_string[] = "usbip unbind <args>\n" " -b, --busid=<busid> Unbind " USBIP_HOST_DRV_NAME ".ko from " "device on <busid>\n"; void usbip_unbind_usage(void) { printf("usage: %s", usbip_unbind_usage_string); } static int unbind_device(char *busid) { char bus_type[] = "usb"; struct sysfs_driver *usbip_host_drv; struct sysfs_device *dev; struct dlist *devlist; int verified = 0; int rc, ret = -1; char attr_name[] = "bConfigurationValue"; char sysfs_mntpath[SYSFS_PATH_MAX]; char busid_attr_path[SYSFS_PATH_MAX]; struct sysfs_attribute *busid_attr; char *val = NULL; int len; /* verify the busid device is using usbip-host */ usbip_host_drv = sysfs_open_driver(bus_type, USBIP_HOST_DRV_NAME); if (!usbip_host_drv) { err("could not open %s driver: %s", USBIP_HOST_DRV_NAME, strerror(errno)); return -1; } devlist = sysfs_get_driver_devices(usbip_host_drv); if (!devlist) { err("%s is not in use by any devices", USBIP_HOST_DRV_NAME); goto err_close_usbip_host_drv; } dlist_for_each_data(devlist, dev, struct sysfs_device) { if (!strncmp(busid, dev->name, strlen(busid)) && !strncmp(dev->driver_name, USBIP_HOST_DRV_NAME, strlen(USBIP_HOST_DRV_NAME))) { verified = 1; break; } } if (!verified) { err("device on busid %s is not using %s", busid, USBIP_HOST_DRV_NAME); goto err_close_usbip_host_drv; } /* * NOTE: A read and write of an attribute value of the device busid * refers to must be done to start probing. That way a rebind of the * default driver for the device occurs. * * This seems very hackish and adds a lot of pointless code. I think it * should be done in the kernel by the driver after del_match_busid is * finished! */ rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX); if (rc < 0) { err("sysfs must be mounted: %s", strerror(errno)); return -1; } snprintf(busid_attr_path, sizeof(busid_attr_path), "%s/%s/%s/%s/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DEVICES_NAME, busid, attr_name); /* read a device attribute */ busid_attr = sysfs_open_attribute(busid_attr_path); if (!busid_attr) { err("could not open %s/%s: %s", busid, attr_name, strerror(errno)); return -1; } if (sysfs_read_attribute(busid_attr) < 0) { err("problem reading attribute: %s", strerror(errno)); goto err_out; } len = busid_attr->len; val = malloc(len); *val = *busid_attr->value; sysfs_close_attribute(busid_attr); /* notify driver of unbind */ rc = modify_match_busid(busid, 0); if (rc < 0) { err("unable to unbind device on %s", busid); goto err_out; } /* write the device attribute */ busid_attr = sysfs_open_attribute(busid_attr_path); if (!busid_attr) { err("could not open %s/%s: %s", busid, attr_name, strerror(errno)); return -1; } rc = sysfs_write_attribute(busid_attr, val, len); if (rc < 0) { err("problem writing attribute: %s", strerror(errno)); goto err_out; } sysfs_close_attribute(busid_attr); ret = 0; printf("unbind device on busid %s: complete\n", busid); err_out: free(val); err_close_usbip_host_drv: sysfs_close_driver(usbip_host_drv); return ret; } int usbip_unbind(int argc, char *argv[]) { static const struct option opts[] = { { "busid", required_argument, NULL, 'b' }, { NULL, 0, NULL, 0 } }; int opt; int ret = -1; for (;;) { opt = getopt_long(argc, argv, "b:", opts, NULL); if (opt == -1) break; switch (opt) { case 'b': ret = unbind_device(optarg); goto out; default: goto err_out; } } err_out: usbip_unbind_usage(); out: return ret; }
gpl-2.0
yaymalaga/yayPrime_kernel
drivers/s390/char/monwriter.c
10417
9781
/* * Character device driver for writing z/VM *MONITOR service records. * * Copyright IBM Corp. 2006, 2009 * * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> */ #define KMSG_COMPONENT "monwriter" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/appldata.h> #include <asm/monwriter.h> #define MONWRITE_MAX_DATALEN 4010 static int mon_max_bufs = 255; static int mon_buf_count; struct mon_buf { struct list_head list; struct monwrite_hdr hdr; int diag_done; char *data; }; static LIST_HEAD(mon_priv_list); struct mon_private { struct list_head priv_list; struct list_head list; struct monwrite_hdr hdr; size_t hdr_to_read; size_t data_to_read; struct mon_buf *current_buf; struct mutex thread_mutex; }; /* * helper functions */ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) { struct appldata_product_id id; int rc; strcpy(id.prod_nr, "LNXAPPL"); id.prod_fn = myhdr->applid; id.record_nr = myhdr->record_num; id.version_nr = myhdr->version; id.release_nr = myhdr->release; id.mod_lvl = myhdr->mod_level; rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); if (rc <= 0) return rc; pr_err("Writing monitor data failed with rc=%i\n", rc); if (rc == 5) return -EPERM; return -EINVAL; } static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, struct monwrite_hdr *monhdr) { struct mon_buf *entry, *next; list_for_each_entry_safe(entry, next, &monpriv->list, list) if ((entry->hdr.mon_function == monhdr->mon_function || monhdr->mon_function == MONWRITE_STOP_INTERVAL) && entry->hdr.applid == monhdr->applid && entry->hdr.record_num == monhdr->record_num && entry->hdr.version == monhdr->version && entry->hdr.release == monhdr->release && entry->hdr.mod_level == monhdr->mod_level) return entry; return NULL; } static int monwrite_new_hdr(struct mon_private *monpriv) { struct monwrite_hdr *monhdr = &monpriv->hdr; struct mon_buf *monbuf; int rc = 0; if (monhdr->datalen > MONWRITE_MAX_DATALEN || monhdr->mon_function > MONWRITE_START_CONFIG || monhdr->hdrlen != sizeof(struct monwrite_hdr)) return -EINVAL; monbuf = NULL; if (monhdr->mon_function != MONWRITE_GEN_EVENT) monbuf = monwrite_find_hdr(monpriv, monhdr); if (monbuf) { if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) { monhdr->datalen = monbuf->hdr.datalen; rc = monwrite_diag(monhdr, monbuf->data, APPLDATA_STOP_REC); list_del(&monbuf->list); mon_buf_count--; kfree(monbuf->data); kfree(monbuf); monbuf = NULL; } } else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) { if (mon_buf_count >= mon_max_bufs) return -ENOSPC; monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL); if (!monbuf) return -ENOMEM; monbuf->data = kzalloc(monhdr->datalen, GFP_KERNEL | GFP_DMA); if (!monbuf->data) { kfree(monbuf); return -ENOMEM; } monbuf->hdr = *monhdr; list_add_tail(&monbuf->list, &monpriv->list); if (monhdr->mon_function != MONWRITE_GEN_EVENT) mon_buf_count++; } monpriv->current_buf = monbuf; return rc; } static int monwrite_new_data(struct mon_private *monpriv) { struct monwrite_hdr *monhdr = &monpriv->hdr; struct mon_buf *monbuf = monpriv->current_buf; int rc = 0; switch (monhdr->mon_function) { case MONWRITE_START_INTERVAL: if (!monbuf->diag_done) { rc = monwrite_diag(monhdr, monbuf->data, APPLDATA_START_INTERVAL_REC); monbuf->diag_done = 1; } break; case MONWRITE_START_CONFIG: if (!monbuf->diag_done) { rc = monwrite_diag(monhdr, monbuf->data, APPLDATA_START_CONFIG_REC); monbuf->diag_done = 1; } break; case MONWRITE_GEN_EVENT: rc = monwrite_diag(monhdr, monbuf->data, APPLDATA_GEN_EVENT_REC); list_del(&monpriv->current_buf->list); kfree(monpriv->current_buf->data); kfree(monpriv->current_buf); monpriv->current_buf = NULL; break; default: /* monhdr->mon_function is checked in monwrite_new_hdr */ BUG(); } return rc; } /* * file operations */ static int monwrite_open(struct inode *inode, struct file *filp) { struct mon_private *monpriv; monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); if (!monpriv) return -ENOMEM; INIT_LIST_HEAD(&monpriv->list); monpriv->hdr_to_read = sizeof(monpriv->hdr); mutex_init(&monpriv->thread_mutex); filp->private_data = monpriv; list_add_tail(&monpriv->priv_list, &mon_priv_list); return nonseekable_open(inode, filp); } static int monwrite_close(struct inode *inode, struct file *filp) { struct mon_private *monpriv = filp->private_data; struct mon_buf *entry, *next; list_for_each_entry_safe(entry, next, &monpriv->list, list) { if (entry->hdr.mon_function != MONWRITE_GEN_EVENT) monwrite_diag(&entry->hdr, entry->data, APPLDATA_STOP_REC); mon_buf_count--; list_del(&entry->list); kfree(entry->data); kfree(entry); } list_del(&monpriv->priv_list); kfree(monpriv); return 0; } static ssize_t monwrite_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) { struct mon_private *monpriv = filp->private_data; size_t len, written; void *to; int rc; mutex_lock(&monpriv->thread_mutex); for (written = 0; written < count; ) { if (monpriv->hdr_to_read) { len = min(count - written, monpriv->hdr_to_read); to = (char *) &monpriv->hdr + sizeof(monpriv->hdr) - monpriv->hdr_to_read; if (copy_from_user(to, data + written, len)) { rc = -EFAULT; goto out_error; } monpriv->hdr_to_read -= len; written += len; if (monpriv->hdr_to_read > 0) continue; rc = monwrite_new_hdr(monpriv); if (rc) goto out_error; monpriv->data_to_read = monpriv->current_buf ? monpriv->current_buf->hdr.datalen : 0; } if (monpriv->data_to_read) { len = min(count - written, monpriv->data_to_read); to = monpriv->current_buf->data + monpriv->hdr.datalen - monpriv->data_to_read; if (copy_from_user(to, data + written, len)) { rc = -EFAULT; goto out_error; } monpriv->data_to_read -= len; written += len; if (monpriv->data_to_read > 0) continue; rc = monwrite_new_data(monpriv); if (rc) goto out_error; } monpriv->hdr_to_read = sizeof(monpriv->hdr); } mutex_unlock(&monpriv->thread_mutex); return written; out_error: monpriv->data_to_read = 0; monpriv->hdr_to_read = sizeof(struct monwrite_hdr); mutex_unlock(&monpriv->thread_mutex); return rc; } static const struct file_operations monwrite_fops = { .owner = THIS_MODULE, .open = &monwrite_open, .release = &monwrite_close, .write = &monwrite_write, .llseek = noop_llseek, }; static struct miscdevice mon_dev = { .name = "monwriter", .fops = &monwrite_fops, .minor = MISC_DYNAMIC_MINOR, }; /* * suspend/resume */ static int monwriter_freeze(struct device *dev) { struct mon_private *monpriv; struct mon_buf *monbuf; list_for_each_entry(monpriv, &mon_priv_list, priv_list) { list_for_each_entry(monbuf, &monpriv->list, list) { if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT) monwrite_diag(&monbuf->hdr, monbuf->data, APPLDATA_STOP_REC); } } return 0; } static int monwriter_restore(struct device *dev) { struct mon_private *monpriv; struct mon_buf *monbuf; list_for_each_entry(monpriv, &mon_priv_list, priv_list) { list_for_each_entry(monbuf, &monpriv->list, list) { if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL) monwrite_diag(&monbuf->hdr, monbuf->data, APPLDATA_START_INTERVAL_REC); if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG) monwrite_diag(&monbuf->hdr, monbuf->data, APPLDATA_START_CONFIG_REC); } } return 0; } static int monwriter_thaw(struct device *dev) { return monwriter_restore(dev); } static const struct dev_pm_ops monwriter_pm_ops = { .freeze = monwriter_freeze, .thaw = monwriter_thaw, .restore = monwriter_restore, }; static struct platform_driver monwriter_pdrv = { .driver = { .name = "monwriter", .owner = THIS_MODULE, .pm = &monwriter_pm_ops, }, }; static struct platform_device *monwriter_pdev; /* * module init/exit */ static int __init mon_init(void) { int rc; if (!MACHINE_IS_VM) return -ENODEV; rc = platform_driver_register(&monwriter_pdrv); if (rc) return rc; monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL, 0); if (IS_ERR(monwriter_pdev)) { rc = PTR_ERR(monwriter_pdev); goto out_driver; } /* * misc_register() has to be the last action in module_init(), because * file operations will be available right after this. */ rc = misc_register(&mon_dev); if (rc) goto out_device; return 0; out_device: platform_device_unregister(monwriter_pdev); out_driver: platform_driver_unregister(&monwriter_pdrv); return rc; } static void __exit mon_exit(void) { misc_deregister(&mon_dev); platform_device_unregister(monwriter_pdev); platform_driver_unregister(&monwriter_pdrv); } module_init(mon_init); module_exit(mon_exit); module_param_named(max_bufs, mon_max_bufs, int, 0644); MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers " "that can be active at one time"); MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>"); MODULE_DESCRIPTION("Character device driver for writing z/VM " "APPLDATA monitor records."); MODULE_LICENSE("GPL");
gpl-2.0
GalaxyTab4/aosparadox_kernel_samsung_msm8226
net/llc/llc_core.c
11697
4256
/* * llc_core.c - Minimum needed routines for sap handling and module init/exit * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/llc.h> LIST_HEAD(llc_sap_list); DEFINE_SPINLOCK(llc_sap_list_lock); /** * llc_sap_alloc - allocates and initializes sap. * * Allocates and initializes sap. */ static struct llc_sap *llc_sap_alloc(void) { struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); int i; if (sap) { /* sap->laddr.mac - leave as a null, it's filled by bind */ sap->state = LLC_SAP_STATE_ACTIVE; spin_lock_init(&sap->sk_lock); for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i); atomic_set(&sap->refcnt, 1); } return sap; } static struct llc_sap *__llc_sap_find(unsigned char sap_value) { struct llc_sap* sap; list_for_each_entry(sap, &llc_sap_list, node) if (sap->laddr.lsap == sap_value) goto out; sap = NULL; out: return sap; } /** * llc_sap_find - searchs a SAP in station * @sap_value: sap to be found * * Searchs for a sap in the sap list of the LLC's station upon the sap ID. * If the sap is found it will be refcounted and the user will have to do * a llc_sap_put after use. * Returns the sap or %NULL if not found. */ struct llc_sap *llc_sap_find(unsigned char sap_value) { struct llc_sap *sap; rcu_read_lock_bh(); sap = __llc_sap_find(sap_value); if (sap) llc_sap_hold(sap); rcu_read_unlock_bh(); return sap; } /** * llc_sap_open - open interface to the upper layers. * @lsap: SAP number. * @func: rcv func for datalink protos * * Interface function to upper layer. Each one who wants to get a SAP * (for example NetBEUI) should call this function. Returns the opened * SAP for success, NULL for failure. */ struct llc_sap *llc_sap_open(unsigned char lsap, int (*func)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)) { struct llc_sap *sap = NULL; spin_lock_bh(&llc_sap_list_lock); if (__llc_sap_find(lsap)) /* SAP already exists */ goto out; sap = llc_sap_alloc(); if (!sap) goto out; sap->laddr.lsap = lsap; sap->rcv_func = func; list_add_tail_rcu(&sap->node, &llc_sap_list); out: spin_unlock_bh(&llc_sap_list_lock); return sap; } /** * llc_sap_close - close interface for upper layers. * @sap: SAP to be closed. * * Close interface function to upper layer. Each one who wants to * close an open SAP (for example NetBEUI) should call this function. * Removes this sap from the list of saps in the station and then * frees the memory for this sap. */ void llc_sap_close(struct llc_sap *sap) { WARN_ON(sap->sk_count); spin_lock_bh(&llc_sap_list_lock); list_del_rcu(&sap->node); spin_unlock_bh(&llc_sap_list_lock); synchronize_rcu(); kfree(sap); } static struct packet_type llc_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_802_2), .func = llc_rcv, }; static struct packet_type llc_tr_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_TR_802_2), .func = llc_rcv, }; static int __init llc_init(void) { dev_add_pack(&llc_packet_type); dev_add_pack(&llc_tr_packet_type); return 0; } static void __exit llc_exit(void) { dev_remove_pack(&llc_packet_type); dev_remove_pack(&llc_tr_packet_type); } module_init(llc_init); module_exit(llc_exit); EXPORT_SYMBOL(llc_sap_list); EXPORT_SYMBOL(llc_sap_list_lock); EXPORT_SYMBOL(llc_sap_find); EXPORT_SYMBOL(llc_sap_open); EXPORT_SYMBOL(llc_sap_close); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Procom 1997, Jay Schullist 2001, Arnaldo C. Melo 2001-2003"); MODULE_DESCRIPTION("LLC IEEE 802.2 core support");
gpl-2.0
diorahman/linux
net/bridge/netfilter/ebt_limit.c
13745
3247
/* * ebt_limit * * Authors: * Tom Marshall <tommy@home.tig-grr.com> * * Mostly copied from netfilter's ipt_limit.c, see that file for * more explanation * * September, 2003 * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_limit.h> static DEFINE_SPINLOCK(limit_lock); #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24)) #define _POW2_BELOW2(x) ((x)|((x)>>1)) #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2)) #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4)) #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8)) #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16)) #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) static bool ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct ebt_limit_info *info = (void *)par->matchinfo; unsigned long now = jiffies; spin_lock_bh(&limit_lock); info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY; if (info->credit > info->credit_cap) info->credit = info->credit_cap; if (info->credit >= info->cost) { /* We're not limited. */ info->credit -= info->cost; spin_unlock_bh(&limit_lock); return true; } spin_unlock_bh(&limit_lock); return false; } /* Precision saver. */ static u_int32_t user2credits(u_int32_t user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) /* Divide first. */ return (user / EBT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY; return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE; } static int ebt_limit_mt_check(const struct xt_mtchk_param *par) { struct ebt_limit_info *info = par->matchinfo; /* Check for overflow. */ if (info->burst == 0 || user2credits(info->avg * info->burst) < user2credits(info->avg)) { pr_info("overflow, try lower: %u/%u\n", info->avg, info->burst); return -EINVAL; } /* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */ info->prev = jiffies; info->credit = user2credits(info->avg * info->burst); info->credit_cap = user2credits(info->avg * info->burst); info->cost = user2credits(info->avg); return 0; } #ifdef CONFIG_COMPAT /* * no conversion function needed -- * only avg/burst have meaningful values in userspace. */ struct ebt_compat_limit_info { compat_uint_t avg, burst; compat_ulong_t prev; compat_uint_t credit, credit_cap, cost; }; #endif static struct xt_match ebt_limit_mt_reg __read_mostly = { .name = "limit", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_limit_mt, .checkentry = ebt_limit_mt_check, .matchsize = sizeof(struct ebt_limit_info), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct ebt_compat_limit_info), #endif .me = THIS_MODULE, }; static int __init ebt_limit_init(void) { return xt_register_match(&ebt_limit_mt_reg); } static void __exit ebt_limit_fini(void) { xt_unregister_match(&ebt_limit_mt_reg); } module_init(ebt_limit_init); module_exit(ebt_limit_fini); MODULE_DESCRIPTION("Ebtables: Rate-limit match"); MODULE_LICENSE("GPL");
gpl-2.0
Jason-Lam/linux-am335x
kernel/rcutree_trace.c
178
14243
/* * Read-Copy Update tracing for classic implementation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Papers: http://www.rdrop.com/users/paulmck/RCU * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #define RCU_TREE_NONCORE #include "rcutree.h" #ifdef CONFIG_RCU_BOOST static char convert_kthread_status(unsigned int kthread_status) { if (kthread_status > RCU_KTHREAD_MAX) return '?'; return "SRWOY"[kthread_status]; } #endif /* #ifdef CONFIG_RCU_BOOST */ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; seq_printf(m, "%3d%cc=%lu g=%lu pq=%d pgp=%lu qp=%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->completed, rdp->gpnum, rdp->passed_quiesce, rdp->passed_quiesce_gpnum, rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, " dt=%d/%d/%d df=%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nmi_nesting, rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); seq_printf(m, " ql=%ld qs=%c%c%c%c", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]], ".R"[rdp->nxttail[RCU_WAIT_TAIL] != rdp->nxttail[RCU_NEXT_READY_TAIL]], ".W"[rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_WAIT_TAIL]], ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); #ifdef CONFIG_RCU_BOOST seq_printf(m, " kt=%d/%c/%d ktl=%x", per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, rdp->cpu)), per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); #endif /* #ifdef CONFIG_RCU_BOOST */ seq_printf(m, " b=%ld", rdp->blimit); seq_printf(m, " ci=%lu co=%lu ca=%lu\n", rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } #define PRINT_RCU_DATA(name, func, m) \ do { \ int _p_r_d_i; \ \ for_each_possible_cpu(_p_r_d_i) \ func(m, &per_cpu(name, _p_r_d_i)); \ } while (0) static int show_rcudata(struct seq_file *m, void *unused) { #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "rcu_preempt:\n"); PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ seq_puts(m, "rcu_sched:\n"); PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m); seq_puts(m, "rcu_bh:\n"); PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); return 0; } static int rcudata_open(struct inode *inode, struct file *file) { return single_open(file, show_rcudata, NULL); } static const struct file_operations rcudata_fops = { .owner = THIS_MODULE, .open = rcudata_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; seq_printf(m, "%d,%s,%lu,%lu,%d,%lu,%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"", rdp->completed, rdp->gpnum, rdp->passed_quiesce, rdp->passed_quiesce_gpnum, rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, ",%d,%d,%d,%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nmi_nesting, rdp->dynticks_fqs); #endif /* #ifdef CONFIG_NO_HZ */ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]], ".R"[rdp->nxttail[RCU_WAIT_TAIL] != rdp->nxttail[RCU_NEXT_READY_TAIL]], ".W"[rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_WAIT_TAIL]], ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); #ifdef CONFIG_RCU_BOOST seq_printf(m, ",%d,\"%c\"", per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, rdp->cpu))); #endif /* #ifdef CONFIG_RCU_BOOST */ seq_printf(m, ",%ld", rdp->blimit); seq_printf(m, ",%lu,%lu,%lu\n", rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } static int show_rcudata_csv(struct seq_file *m, void *unused) { seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); #ifdef CONFIG_RCU_BOOST seq_puts(m, "\"kt\",\"ktl\""); #endif /* #ifdef CONFIG_RCU_BOOST */ seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n"); #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "\"rcu_preempt:\"\n"); PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ seq_puts(m, "\"rcu_sched:\"\n"); PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m); seq_puts(m, "\"rcu_bh:\"\n"); PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); return 0; } static int rcudata_csv_open(struct inode *inode, struct file *file) { return single_open(file, show_rcudata_csv, NULL); } static const struct file_operations rcudata_csv_fops = { .owner = THIS_MODULE, .open = rcudata_csv_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #ifdef CONFIG_RCU_BOOST static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp) { seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu " "j=%04x bt=%04x\n", rnp->grplo, rnp->grphi, "T."[list_empty(&rnp->blkd_tasks)], "N."[!rnp->gp_tasks], "E."[!rnp->exp_tasks], "B."[!rnp->boost_tasks], convert_kthread_status(rnp->boost_kthread_status), rnp->n_tasks_boosted, rnp->n_exp_boosts, rnp->n_normal_boosts, (int)(jiffies & 0xffff), (int)(rnp->boost_time & 0xffff)); seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n", " balk", rnp->n_balk_blkd_tasks, rnp->n_balk_exp_gp_tasks, rnp->n_balk_boost_tasks, rnp->n_balk_notblocked, rnp->n_balk_notyet, rnp->n_balk_nos); } static int show_rcu_node_boost(struct seq_file *m, void *unused) { struct rcu_node *rnp; rcu_for_each_leaf_node(&rcu_preempt_state, rnp) print_one_rcu_node_boost(m, rnp); return 0; } static int rcu_node_boost_open(struct inode *inode, struct file *file) { return single_open(file, show_rcu_node_boost, NULL); } static const struct file_operations rcu_node_boost_fops = { .owner = THIS_MODULE, .open = rcu_node_boost_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Create the rcuboost debugfs entry. Standard error return. */ static int rcu_boost_trace_create_file(struct dentry *rcudir) { return !debugfs_create_file("rcuboost", 0444, rcudir, NULL, &rcu_node_boost_fops); } #else /* #ifdef CONFIG_RCU_BOOST */ static int rcu_boost_trace_create_file(struct dentry *rcudir) { return 0; /* There cannot be an error if we didn't create it! */ } #endif /* #else #ifdef CONFIG_RCU_BOOST */ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) { unsigned long gpnum; int level = 0; struct rcu_node *rnp; gpnum = rsp->gpnum; seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x " "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", rsp->completed, gpnum, rsp->signaled, (long)(rsp->jiffies_force_qs - jiffies), (int)(jiffies & 0xffff), rsp->n_force_qs, rsp->n_force_qs_ngp, rsp->n_force_qs - rsp->n_force_qs_ngp, rsp->n_force_qs_lh); for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { if (rnp->level != level) { seq_puts(m, "\n"); level = rnp->level; } seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ", rnp->qsmask, rnp->qsmaskinit, ".G"[rnp->gp_tasks != NULL], ".E"[rnp->exp_tasks != NULL], ".T"[!list_empty(&rnp->blkd_tasks)], rnp->grplo, rnp->grphi, rnp->grpnum); } seq_puts(m, "\n"); } static int show_rcuhier(struct seq_file *m, void *unused) { #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "rcu_preempt:\n"); print_one_rcu_state(m, &rcu_preempt_state); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ seq_puts(m, "rcu_sched:\n"); print_one_rcu_state(m, &rcu_sched_state); seq_puts(m, "rcu_bh:\n"); print_one_rcu_state(m, &rcu_bh_state); return 0; } static int rcuhier_open(struct inode *inode, struct file *file) { return single_open(file, show_rcuhier, NULL); } static const struct file_operations rcuhier_fops = { .owner = THIS_MODULE, .open = rcuhier_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) { unsigned long flags; unsigned long completed; unsigned long gpnum; unsigned long gpage; unsigned long gpmax; struct rcu_node *rnp = &rsp->node[0]; raw_spin_lock_irqsave(&rnp->lock, flags); completed = rsp->completed; gpnum = rsp->gpnum; if (rsp->completed == rsp->gpnum) gpage = 0; else gpage = jiffies - rsp->gp_start; gpmax = rsp->gp_max; raw_spin_unlock_irqrestore(&rnp->lock, flags); seq_printf(m, "%s: completed=%ld gpnum=%lu age=%ld max=%ld\n", rsp->name, completed, gpnum, gpage, gpmax); } static int show_rcugp(struct seq_file *m, void *unused) { #ifdef CONFIG_TREE_PREEMPT_RCU show_one_rcugp(m, &rcu_preempt_state); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ show_one_rcugp(m, &rcu_sched_state); show_one_rcugp(m, &rcu_bh_state); return 0; } static int rcugp_open(struct inode *inode, struct file *file) { return single_open(file, show_rcugp, NULL); } static const struct file_operations rcugp_fops = { .owner = THIS_MODULE, .open = rcugp_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) { seq_printf(m, "%3d%cnp=%ld " "qsp=%ld rpq=%ld cbr=%ld cng=%ld " "gpc=%ld gps=%ld nf=%ld nn=%ld\n", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->n_rcu_pending, rdp->n_rp_qs_pending, rdp->n_rp_report_qs, rdp->n_rp_cb_ready, rdp->n_rp_cpu_needs_gp, rdp->n_rp_gp_completed, rdp->n_rp_gp_started, rdp->n_rp_need_fqs, rdp->n_rp_need_nothing); } static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp) { int cpu; struct rcu_data *rdp; for_each_possible_cpu(cpu) { rdp = per_cpu_ptr(rsp->rda, cpu); if (rdp->beenonline) print_one_rcu_pending(m, rdp); } } static int show_rcu_pending(struct seq_file *m, void *unused) { #ifdef CONFIG_TREE_PREEMPT_RCU seq_puts(m, "rcu_preempt:\n"); print_rcu_pendings(m, &rcu_preempt_state); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ seq_puts(m, "rcu_sched:\n"); print_rcu_pendings(m, &rcu_sched_state); seq_puts(m, "rcu_bh:\n"); print_rcu_pendings(m, &rcu_bh_state); return 0; } static int rcu_pending_open(struct inode *inode, struct file *file) { return single_open(file, show_rcu_pending, NULL); } static const struct file_operations rcu_pending_fops = { .owner = THIS_MODULE, .open = rcu_pending_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int show_rcutorture(struct seq_file *m, void *unused) { seq_printf(m, "rcutorture test sequence: %lu %s\n", rcutorture_testseq >> 1, (rcutorture_testseq & 0x1) ? "(test in progress)" : ""); seq_printf(m, "rcutorture update version number: %lu\n", rcutorture_vernum); return 0; } static int rcutorture_open(struct inode *inode, struct file *file) { return single_open(file, show_rcutorture, NULL); } static const struct file_operations rcutorture_fops = { .owner = THIS_MODULE, .open = rcutorture_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *rcudir; static int __init rcutree_trace_init(void) { struct dentry *retval; rcudir = debugfs_create_dir("rcu", NULL); if (!rcudir) goto free_out; retval = debugfs_create_file("rcudata", 0444, rcudir, NULL, &rcudata_fops); if (!retval) goto free_out; retval = debugfs_create_file("rcudata.csv", 0444, rcudir, NULL, &rcudata_csv_fops); if (!retval) goto free_out; if (rcu_boost_trace_create_file(rcudir)) goto free_out; retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); if (!retval) goto free_out; retval = debugfs_create_file("rcuhier", 0444, rcudir, NULL, &rcuhier_fops); if (!retval) goto free_out; retval = debugfs_create_file("rcu_pending", 0444, rcudir, NULL, &rcu_pending_fops); if (!retval) goto free_out; retval = debugfs_create_file("rcutorture", 0444, rcudir, NULL, &rcutorture_fops); if (!retval) goto free_out; return 0; free_out: debugfs_remove_recursive(rcudir); return 1; } static void __exit rcutree_trace_cleanup(void) { debugfs_remove_recursive(rcudir); } module_init(rcutree_trace_init); module_exit(rcutree_trace_cleanup); MODULE_AUTHOR("Paul E. McKenney"); MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation"); MODULE_LICENSE("GPL");
gpl-2.0
mephistophilis/samsung_nowplus_kernel
drivers/acpi/acpica/exmisc.c
946
20207
/****************************************************************************** * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * *****************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "amlresrc.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exmisc") /******************************************************************************* * * FUNCTION: acpi_ex_get_object_reference * * PARAMETERS: obj_desc - Create a reference to this object * return_desc - Where to store the reference * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Obtain and return a "reference" to the target object * Common code for the ref_of_op and the cond_ref_of_op. * ******************************************************************************/ acpi_status acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, union acpi_operand_object **return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *reference_obj; union acpi_operand_object *referenced_obj; ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc); *return_desc = NULL; switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_OPERAND: if (obj_desc->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * Must be a reference to a Local or Arg */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_DEBUG: /* The referenced object is the pseudo-node for the local/arg */ referenced_obj = obj_desc->reference.object; break; default: ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X", obj_desc->reference.class)); return_ACPI_STATUS(AE_AML_INTERNAL); } break; case ACPI_DESC_TYPE_NAMED: /* * A named reference that has already been resolved to a Node */ referenced_obj = obj_desc; break; default: ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X", ACPI_GET_DESCRIPTOR_TYPE(obj_desc))); return_ACPI_STATUS(AE_TYPE); } /* Create a new reference object */ reference_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); if (!reference_obj) { return_ACPI_STATUS(AE_NO_MEMORY); } reference_obj->reference.class = ACPI_REFCLASS_REFOF; reference_obj->reference.object = referenced_obj; *return_desc = reference_obj; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Object %p Type [%s], returning Reference %p\n", obj_desc, acpi_ut_get_object_type_name(obj_desc), *return_desc)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_concat_template * * PARAMETERS: Operand0 - First source object * Operand1 - Second source object * actual_return_desc - Where to place the return object * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Concatenate two resource templates * ******************************************************************************/ acpi_status acpi_ex_concat_template(union acpi_operand_object *operand0, union acpi_operand_object *operand1, union acpi_operand_object **actual_return_desc, struct acpi_walk_state *walk_state) { acpi_status status; union acpi_operand_object *return_desc; u8 *new_buf; u8 *end_tag; acpi_size length0; acpi_size length1; acpi_size new_length; ACPI_FUNCTION_TRACE(ex_concat_template); /* * Find the end_tag descriptor in each resource template. * Note1: returned pointers point TO the end_tag, not past it. * Note2: zero-length buffers are allowed; treated like one end_tag */ /* Get the length of the first resource template */ status = acpi_ut_get_resource_end_tag(operand0, &end_tag); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer); /* Get the length of the second resource template */ status = acpi_ut_get_resource_end_tag(operand1, &end_tag); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer); /* Combine both lengths, minimum size will be 2 for end_tag */ new_length = length0 + length1 + sizeof(struct aml_resource_end_tag); /* Create a new buffer object for the result (with one end_tag) */ return_desc = acpi_ut_create_buffer_object(new_length); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the templates to the new buffer, 0 first, then 1 follows. One * end_tag descriptor is copied from Operand1. */ new_buf = return_desc->buffer.pointer; ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0); ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1); /* Insert end_tag and set the checksum to zero, means "ignore checksum" */ new_buf[new_length - 1] = 0; new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1; /* Return the completed resource template */ *actual_return_desc = return_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_do_concatenate * * PARAMETERS: Operand0 - First source object * Operand1 - Second source object * actual_return_desc - Where to place the return object * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Concatenate two objects OF THE SAME TYPE. * ******************************************************************************/ acpi_status acpi_ex_do_concatenate(union acpi_operand_object *operand0, union acpi_operand_object *operand1, union acpi_operand_object **actual_return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *local_operand1 = operand1; union acpi_operand_object *return_desc; char *new_buf; acpi_status status; ACPI_FUNCTION_TRACE(ex_do_concatenate); /* * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, 16); break; case ACPI_TYPE_STRING: status = acpi_ex_convert_to_string(operand1, &local_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(operand1, &local_operand1); break; default: ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X", operand0->common.type)); status = AE_AML_INTERNAL; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Both operands are now known to be the same object type * (Both are Integer, String, or Buffer), and we can now perform the * concatenation. */ /* * There are three cases to handle: * * 1) Two Integers concatenated to produce a new Buffer * 2) Two Strings concatenated to produce a new String * 3) Two Buffers concatenated to produce a new Buffer */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: /* Result of two Integers is a Buffer */ /* Need enough buffer space for two integers */ return_desc = acpi_ut_create_buffer_object((acpi_size) ACPI_MUL_2 (acpi_gbl_integer_byte_width)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } new_buf = (char *)return_desc->buffer.pointer; /* Copy the first integer, LSB first */ ACPI_MEMCPY(new_buf, &operand0->integer.value, acpi_gbl_integer_byte_width); /* Copy the second integer (LSB first) after the first */ ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width, &local_operand1->integer.value, acpi_gbl_integer_byte_width); break; case ACPI_TYPE_STRING: /* Result of two Strings is a String */ return_desc = acpi_ut_create_string_object(((acpi_size) operand0->string. length + local_operand1-> string.length)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } new_buf = return_desc->string.pointer; /* Concatenate the strings */ ACPI_STRCPY(new_buf, operand0->string.pointer); ACPI_STRCPY(new_buf + operand0->string.length, local_operand1->string.pointer); break; case ACPI_TYPE_BUFFER: /* Result of two Buffers is a Buffer */ return_desc = acpi_ut_create_buffer_object(((acpi_size) operand0->buffer. length + local_operand1-> buffer.length)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } new_buf = (char *)return_desc->buffer.pointer; /* Concatenate the buffers */ ACPI_MEMCPY(new_buf, operand0->buffer.pointer, operand0->buffer.length); ACPI_MEMCPY(new_buf + operand0->buffer.length, local_operand1->buffer.pointer, local_operand1->buffer.length); break; default: /* Invalid object type, should not happen here */ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X", operand0->common.type)); status = AE_AML_INTERNAL; goto cleanup; } *actual_return_desc = return_desc; cleanup: if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_do_math_op * * PARAMETERS: Opcode - AML opcode * Integer0 - Integer operand #0 * Integer1 - Integer operand #1 * * RETURN: Integer result of the operation * * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the * math functions here is to prevent a lot of pointer dereferencing * to obtain the operands. * ******************************************************************************/ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1) { ACPI_FUNCTION_ENTRY(); switch (opcode) { case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */ return (integer0 + integer1); case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */ return (integer0 & integer1); case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */ return (~(integer0 & integer1)); case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */ return (integer0 | integer1); case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */ return (~(integer0 | integer1)); case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */ return (integer0 ^ integer1); case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */ return (integer0 * integer1); case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 << integer1); case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 >> integer1); case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */ return (integer0 - integer1); default: return (0); } } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_numeric_op * * PARAMETERS: Opcode - AML opcode * Integer0 - Integer operand #0 * Integer1 - Integer operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric * operators (LAnd and LOr), both operands must be integers. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Integer0 && Integer1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_numeric_op(u16 opcode, u64 integer0, u64 integer1, u8 *logical_result) { acpi_status status = AE_OK; u8 local_result = FALSE; ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op); switch (opcode) { case AML_LAND_OP: /* LAnd (Integer0, Integer1) */ if (integer0 && integer1) { local_result = TRUE; } break; case AML_LOR_OP: /* LOr (Integer0, Integer1) */ if (integer0 || integer1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_op * * PARAMETERS: Opcode - AML opcode * Operand0 - operand #0 * Operand1 - operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the * functions here is to prevent a lot of pointer dereferencing * to obtain the operands and to simplify the generation of the * logical value. For the Numeric operators (LAnd and LOr), both * operands must be integers. For the other logical operators, * operands can be any combination of Integer/String/Buffer. The * first operand determines the type to which the second operand * will be converted. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Operand0 == Operand1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_op(u16 opcode, union acpi_operand_object *operand0, union acpi_operand_object *operand1, u8 * logical_result) { union acpi_operand_object *local_operand1 = operand1; u64 integer0; u64 integer1; u32 length0; u32 length1; acpi_status status = AE_OK; u8 local_result = FALSE; int compare; ACPI_FUNCTION_TRACE(ex_do_logical_op); /* * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI 3.0+ specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, 16); break; case ACPI_TYPE_STRING: status = acpi_ex_convert_to_string(operand1, &local_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(operand1, &local_operand1); break; default: status = AE_AML_INTERNAL; break; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Two cases: 1) Both Integers, 2) Both Strings or Buffers */ if (operand0->common.type == ACPI_TYPE_INTEGER) { /* * 1) Both operands are of type integer * Note: local_operand1 may have changed above */ integer0 = operand0->integer.value; integer1 = local_operand1->integer.value; switch (opcode) { case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ if (integer0 == integer1) { local_result = TRUE; } break; case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ if (integer0 > integer1) { local_result = TRUE; } break; case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ if (integer0 < integer1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } } else { /* * 2) Both operands are Strings or both are Buffers * Note: Code below takes advantage of common Buffer/String * object fields. local_operand1 may have changed above. Use * memcmp to handle nulls in buffers. */ length0 = operand0->buffer.length; length1 = local_operand1->buffer.length; /* Lexicographic compare: compare the data bytes */ compare = ACPI_MEMCMP(operand0->buffer.pointer, local_operand1->buffer.pointer, (length0 > length1) ? length1 : length0); switch (opcode) { case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ /* Length and all bytes must be equal */ if ((length0 == length1) && (compare == 0)) { /* Length and all bytes match ==> TRUE */ local_result = TRUE; } break; case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ if (compare > 0) { local_result = TRUE; goto cleanup; /* TRUE */ } if (compare < 0) { goto cleanup; /* FALSE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 > length1) { local_result = TRUE; } break; case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ if (compare > 0) { goto cleanup; /* FALSE */ } if (compare < 0) { local_result = TRUE; goto cleanup; /* TRUE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 < length1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } } cleanup: /* New object was created if implicit conversion performed - delete */ if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); }
gpl-2.0
rt-linux/linux
drivers/media/dvb-frontends/stb6100.c
1714
16646
/* STB6100 Silicon Tuner Copyright (C) Manu Abraham (abraham.manu@gmail.com) Copyright (C) ST Microelectronics This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include "dvb_frontend.h" #include "stb6100.h" static unsigned int verbose; module_param(verbose, int, 0644); /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 #define FE_ERROR 0 #define FE_NOTICE 1 #define FE_INFO 2 #define FE_DEBUG 3 #define dprintk(x, y, z, format, arg...) do { \ if (z) { \ if ((x > FE_ERROR) && (x > y)) \ printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \ else if ((x > FE_NOTICE) && (x > y)) \ printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \ else if ((x > FE_INFO) && (x > y)) \ printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \ else if ((x > FE_DEBUG) && (x > y)) \ printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \ } else { \ if (x > y) \ printk(format, ##arg); \ } \ } while (0) struct stb6100_lkup { u32 val_low; u32 val_high; u8 reg; }; static int stb6100_release(struct dvb_frontend *fe); static const struct stb6100_lkup lkup[] = { { 0, 950000, 0x0a }, { 950000, 1000000, 0x0a }, { 1000000, 1075000, 0x0c }, { 1075000, 1200000, 0x00 }, { 1200000, 1300000, 0x01 }, { 1300000, 1370000, 0x02 }, { 1370000, 1470000, 0x04 }, { 1470000, 1530000, 0x05 }, { 1530000, 1650000, 0x06 }, { 1650000, 1800000, 0x08 }, { 1800000, 1950000, 0x0a }, { 1950000, 2150000, 0x0c }, { 2150000, 9999999, 0x0c }, { 0, 0, 0x00 } }; /* Register names for easy debugging. */ static const char *stb6100_regnames[] = { [STB6100_LD] = "LD", [STB6100_VCO] = "VCO", [STB6100_NI] = "NI", [STB6100_NF_LSB] = "NF", [STB6100_K] = "K", [STB6100_G] = "G", [STB6100_F] = "F", [STB6100_DLB] = "DLB", [STB6100_TEST1] = "TEST1", [STB6100_FCCK] = "FCCK", [STB6100_LPEN] = "LPEN", [STB6100_TEST3] = "TEST3", }; /* Template for normalisation, i.e. setting unused or undocumented * bits as required according to the documentation. */ struct stb6100_regmask { u8 mask; u8 set; }; static const struct stb6100_regmask stb6100_template[] = { [STB6100_LD] = { 0xff, 0x00 }, [STB6100_VCO] = { 0xff, 0x00 }, [STB6100_NI] = { 0xff, 0x00 }, [STB6100_NF_LSB] = { 0xff, 0x00 }, [STB6100_K] = { 0xc7, 0x38 }, [STB6100_G] = { 0xef, 0x10 }, [STB6100_F] = { 0x1f, 0xc0 }, [STB6100_DLB] = { 0x38, 0xc4 }, [STB6100_TEST1] = { 0x00, 0x8f }, [STB6100_FCCK] = { 0x40, 0x0d }, [STB6100_LPEN] = { 0xf0, 0x0b }, [STB6100_TEST3] = { 0x00, 0xde }, }; /* * Currently unused. Some boards might need it in the future */ static inline void stb6100_normalise_regs(u8 regs[]) { int i; for (i = 0; i < STB6100_NUMREGS; i++) regs[i] = (regs[i] & stb6100_template[i].mask) | stb6100_template[i].set; } static int stb6100_read_regs(struct stb6100_state *state, u8 regs[]) { int rc; struct i2c_msg msg = { .addr = state->config->tuner_address, .flags = I2C_M_RD, .buf = regs, .len = STB6100_NUMREGS }; rc = i2c_transfer(state->i2c, &msg, 1); if (unlikely(rc != 1)) { dprintk(verbose, FE_ERROR, 1, "Read (0x%x) err, rc=[%d]", state->config->tuner_address, rc); return -EREMOTEIO; } if (unlikely(verbose > FE_DEBUG)) { int i; dprintk(verbose, FE_DEBUG, 1, " Read from 0x%02x", state->config->tuner_address); for (i = 0; i < STB6100_NUMREGS; i++) dprintk(verbose, FE_DEBUG, 1, " %s: 0x%02x", stb6100_regnames[i], regs[i]); } return 0; } static int stb6100_read_reg(struct stb6100_state *state, u8 reg) { u8 regs[STB6100_NUMREGS]; struct i2c_msg msg = { .addr = state->config->tuner_address + reg, .flags = I2C_M_RD, .buf = regs, .len = 1 }; i2c_transfer(state->i2c, &msg, 1); if (unlikely(reg >= STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg); return -EINVAL; } if (unlikely(verbose > FE_DEBUG)) { dprintk(verbose, FE_DEBUG, 1, " Read from 0x%02x", state->config->tuner_address); dprintk(verbose, FE_DEBUG, 1, " %s: 0x%02x", stb6100_regnames[reg], regs[0]); } return (unsigned int)regs[0]; } static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len) { int rc; u8 cmdbuf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = state->config->tuner_address, .flags = 0, .buf = cmdbuf, .len = len + 1 }; if (1 + len > sizeof(cmdbuf)) { printk(KERN_WARNING "%s: i2c wr: len=%d is too big!\n", KBUILD_MODNAME, len); return -EINVAL; } if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register range %d:%d", start, len); return -EINVAL; } memcpy(&cmdbuf[1], buf, len); cmdbuf[0] = start; if (unlikely(verbose > FE_DEBUG)) { int i; dprintk(verbose, FE_DEBUG, 1, " Write @ 0x%02x: [%d:%d]", state->config->tuner_address, start, len); for (i = 0; i < len; i++) dprintk(verbose, FE_DEBUG, 1, " %s: 0x%02x", stb6100_regnames[start + i], buf[i]); } rc = i2c_transfer(state->i2c, &msg, 1); if (unlikely(rc != 1)) { dprintk(verbose, FE_ERROR, 1, "(0x%x) write err [%d:%d], rc=[%d]", (unsigned int)state->config->tuner_address, start, len, rc); return -EREMOTEIO; } return 0; } static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data) { if (unlikely(reg >= STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg); return -EREMOTEIO; } data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set; return stb6100_write_reg_range(state, &data, reg, 1); } static int stb6100_get_status(struct dvb_frontend *fe, u32 *status) { int rc; struct stb6100_state *state = fe->tuner_priv; rc = stb6100_read_reg(state, STB6100_LD); if (rc < 0) { dprintk(verbose, FE_ERROR, 1, "%s failed", __func__); return rc; } return (rc & STB6100_LD_LOCK) ? TUNER_STATUS_LOCKED : 0; } static int stb6100_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { int rc; u8 f; struct stb6100_state *state = fe->tuner_priv; rc = stb6100_read_reg(state, STB6100_F); if (rc < 0) return rc; f = rc & STB6100_F_F; state->status.bandwidth = (f + 5) * 2000; /* x2 for ZIF */ *bandwidth = state->bandwidth = state->status.bandwidth * 1000; dprintk(verbose, FE_DEBUG, 1, "bandwidth = %u Hz", state->bandwidth); return 0; } static int stb6100_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth) { u32 tmp; int rc; struct stb6100_state *state = fe->tuner_priv; dprintk(verbose, FE_DEBUG, 1, "set bandwidth to %u Hz", bandwidth); bandwidth /= 2; /* ZIF */ if (bandwidth >= 36000000) /* F[4:0] BW/2 max =31+5=36 mhz for F=31 */ tmp = 31; else if (bandwidth <= 5000000) /* bw/2 min = 5Mhz for F=0 */ tmp = 0; else /* if 5 < bw/2 < 36 */ tmp = (bandwidth + 500000) / 1000000 - 5; /* Turn on LPF bandwidth setting clock control, * set bandwidth, wait 10ms, turn off. */ rc = stb6100_write_reg(state, STB6100_FCCK, 0x0d | STB6100_FCCK_FCCK); if (rc < 0) return rc; rc = stb6100_write_reg(state, STB6100_F, 0xc0 | tmp); if (rc < 0) return rc; msleep(5); /* This is dangerous as another (related) thread may start */ rc = stb6100_write_reg(state, STB6100_FCCK, 0x0d); if (rc < 0) return rc; msleep(10); /* This is dangerous as another (related) thread may start */ return 0; } static int stb6100_get_frequency(struct dvb_frontend *fe, u32 *frequency) { int rc; u32 nint, nfrac, fvco; int psd2, odiv; struct stb6100_state *state = fe->tuner_priv; u8 regs[STB6100_NUMREGS]; rc = stb6100_read_regs(state, regs); if (rc < 0) return rc; odiv = (regs[STB6100_VCO] & STB6100_VCO_ODIV) >> STB6100_VCO_ODIV_SHIFT; psd2 = (regs[STB6100_K] & STB6100_K_PSD2) >> STB6100_K_PSD2_SHIFT; nint = regs[STB6100_NI]; nfrac = ((regs[STB6100_K] & STB6100_K_NF_MSB) << 8) | regs[STB6100_NF_LSB]; fvco = (nfrac * state->reference >> (9 - psd2)) + (nint * state->reference << psd2); *frequency = state->frequency = fvco >> (odiv + 1); dprintk(verbose, FE_DEBUG, 1, "frequency = %u kHz, odiv = %u, psd2 = %u, fxtal = %u kHz, fvco = %u kHz, N(I) = %u, N(F) = %u", state->frequency, odiv, psd2, state->reference, fvco, nint, nfrac); return 0; } static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency) { int rc; const struct stb6100_lkup *ptr; struct stb6100_state *state = fe->tuner_priv; struct dtv_frontend_properties *p = &fe->dtv_property_cache; u32 srate = 0, fvco, nint, nfrac; u8 regs[STB6100_NUMREGS]; u8 g, psd2, odiv; dprintk(verbose, FE_DEBUG, 1, "Version 2010-8-14 13:51"); if (fe->ops.get_frontend) { dprintk(verbose, FE_DEBUG, 1, "Get frontend parameters"); fe->ops.get_frontend(fe); } srate = p->symbol_rate; /* Set up tuner cleanly, LPF calibration on */ rc = stb6100_write_reg(state, STB6100_FCCK, 0x4d | STB6100_FCCK_FCCK); if (rc < 0) return rc; /* allow LPF calibration */ /* PLL Loop disabled, bias on, VCO on, synth on */ regs[STB6100_LPEN] = 0xeb; rc = stb6100_write_reg(state, STB6100_LPEN, regs[STB6100_LPEN]); if (rc < 0) return rc; /* Program the registers with their data values */ /* VCO divide ratio (LO divide ratio, VCO prescaler enable). */ if (frequency <= 1075000) odiv = 1; else odiv = 0; /* VCO enabled, search clock off as per LL3.7, 3.4.1 */ regs[STB6100_VCO] = 0xe0 | (odiv << STB6100_VCO_ODIV_SHIFT); /* OSM */ for (ptr = lkup; (ptr->val_high != 0) && !CHKRANGE(frequency, ptr->val_low, ptr->val_high); ptr++); if (ptr->val_high == 0) { printk(KERN_ERR "%s: frequency out of range: %u kHz\n", __func__, frequency); return -EINVAL; } regs[STB6100_VCO] = (regs[STB6100_VCO] & ~STB6100_VCO_OSM) | ptr->reg; rc = stb6100_write_reg(state, STB6100_VCO, regs[STB6100_VCO]); if (rc < 0) return rc; if ((frequency > 1075000) && (frequency <= 1325000)) psd2 = 0; else psd2 = 1; /* F(VCO) = F(LO) * (ODIV == 0 ? 2 : 4) */ fvco = frequency << (1 + odiv); /* N(I) = floor(f(VCO) / (f(XTAL) * (PSD2 ? 2 : 1))) */ nint = fvco / (state->reference << psd2); /* N(F) = round(f(VCO) / f(XTAL) * (PSD2 ? 2 : 1) - N(I)) * 2 ^ 9 */ nfrac = DIV_ROUND_CLOSEST((fvco - (nint * state->reference << psd2)) << (9 - psd2), state->reference); /* NI */ regs[STB6100_NI] = nint; rc = stb6100_write_reg(state, STB6100_NI, regs[STB6100_NI]); if (rc < 0) return rc; /* NF */ regs[STB6100_NF_LSB] = nfrac; rc = stb6100_write_reg(state, STB6100_NF_LSB, regs[STB6100_NF_LSB]); if (rc < 0) return rc; /* K */ regs[STB6100_K] = (0x38 & ~STB6100_K_PSD2) | (psd2 << STB6100_K_PSD2_SHIFT); regs[STB6100_K] = (regs[STB6100_K] & ~STB6100_K_NF_MSB) | ((nfrac >> 8) & STB6100_K_NF_MSB); rc = stb6100_write_reg(state, STB6100_K, regs[STB6100_K]); if (rc < 0) return rc; /* G Baseband gain. */ if (srate >= 15000000) g = 9; /* +4 dB */ else if (srate >= 5000000) g = 11; /* +8 dB */ else g = 14; /* +14 dB */ regs[STB6100_G] = (0x10 & ~STB6100_G_G) | g; regs[STB6100_G] &= ~STB6100_G_GCT; /* mask GCT */ regs[STB6100_G] |= (1 << 5); /* 2Vp-p Mode */ rc = stb6100_write_reg(state, STB6100_G, regs[STB6100_G]); if (rc < 0) return rc; /* F we don't write as it is set up in BW set */ /* DLB set DC servo loop BW to 160Hz (LLA 3.8 / 2.1) */ regs[STB6100_DLB] = 0xcc; rc = stb6100_write_reg(state, STB6100_DLB, regs[STB6100_DLB]); if (rc < 0) return rc; dprintk(verbose, FE_DEBUG, 1, "frequency = %u, srate = %u, g = %u, odiv = %u, psd2 = %u, fxtal = %u, osm = %u, fvco = %u, N(I) = %u, N(F) = %u", frequency, srate, (unsigned int)g, (unsigned int)odiv, (unsigned int)psd2, state->reference, ptr->reg, fvco, nint, nfrac); /* Set up the test registers */ regs[STB6100_TEST1] = 0x8f; rc = stb6100_write_reg(state, STB6100_TEST1, regs[STB6100_TEST1]); if (rc < 0) return rc; regs[STB6100_TEST3] = 0xde; rc = stb6100_write_reg(state, STB6100_TEST3, regs[STB6100_TEST3]); if (rc < 0) return rc; /* Bring up tuner according to LLA 3.7 3.4.1, step 2 */ regs[STB6100_LPEN] = 0xfb; /* PLL Loop enabled, bias on, VCO on, synth on */ rc = stb6100_write_reg(state, STB6100_LPEN, regs[STB6100_LPEN]); if (rc < 0) return rc; msleep(2); /* Bring up tuner according to LLA 3.7 3.4.1, step 3 */ regs[STB6100_VCO] &= ~STB6100_VCO_OCK; /* VCO fast search */ rc = stb6100_write_reg(state, STB6100_VCO, regs[STB6100_VCO]); if (rc < 0) return rc; msleep(10); /* This is dangerous as another (related) thread may start */ /* wait for LO to lock */ regs[STB6100_VCO] &= ~STB6100_VCO_OSCH; /* vco search disabled */ regs[STB6100_VCO] |= STB6100_VCO_OCK; /* search clock off */ rc = stb6100_write_reg(state, STB6100_VCO, regs[STB6100_VCO]); if (rc < 0) return rc; rc = stb6100_write_reg(state, STB6100_FCCK, 0x0d); if (rc < 0) return rc; /* Stop LPF calibration */ msleep(10); /* This is dangerous as another (related) thread may start */ /* wait for stabilisation, (should not be necessary) */ return 0; } static int stb6100_sleep(struct dvb_frontend *fe) { /* TODO: power down */ return 0; } static int stb6100_init(struct dvb_frontend *fe) { struct stb6100_state *state = fe->tuner_priv; struct tuner_state *status = &state->status; status->tunerstep = 125000; status->ifreq = 0; status->refclock = 27000000; /* Hz */ status->iqsense = 1; status->bandwidth = 36000; /* kHz */ state->bandwidth = status->bandwidth * 1000; /* Hz */ state->reference = status->refclock / 1000; /* kHz */ /* Set default bandwidth. Modified, PN 13-May-10 */ return 0; } static int stb6100_get_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state) { switch (param) { case DVBFE_TUNER_FREQUENCY: stb6100_get_frequency(fe, &state->frequency); break; case DVBFE_TUNER_TUNERSTEP: break; case DVBFE_TUNER_IFFREQ: break; case DVBFE_TUNER_BANDWIDTH: stb6100_get_bandwidth(fe, &state->bandwidth); break; case DVBFE_TUNER_REFCLOCK: break; default: break; } return 0; } static int stb6100_set_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state) { struct stb6100_state *tstate = fe->tuner_priv; switch (param) { case DVBFE_TUNER_FREQUENCY: stb6100_set_frequency(fe, state->frequency); tstate->frequency = state->frequency; break; case DVBFE_TUNER_TUNERSTEP: break; case DVBFE_TUNER_IFFREQ: break; case DVBFE_TUNER_BANDWIDTH: stb6100_set_bandwidth(fe, state->bandwidth); tstate->bandwidth = state->bandwidth; break; case DVBFE_TUNER_REFCLOCK: break; default: break; } return 0; } static struct dvb_tuner_ops stb6100_ops = { .info = { .name = "STB6100 Silicon Tuner", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 0, }, .init = stb6100_init, .sleep = stb6100_sleep, .get_status = stb6100_get_status, .get_state = stb6100_get_state, .set_state = stb6100_set_state, .release = stb6100_release }; struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe, const struct stb6100_config *config, struct i2c_adapter *i2c) { struct stb6100_state *state = NULL; state = kzalloc(sizeof (struct stb6100_state), GFP_KERNEL); if (!state) return NULL; state->config = config; state->i2c = i2c; state->frontend = fe; state->reference = config->refclock / 1000; /* kHz */ fe->tuner_priv = state; fe->ops.tuner_ops = stb6100_ops; printk("%s: Attaching STB6100 \n", __func__); return fe; } static int stb6100_release(struct dvb_frontend *fe) { struct stb6100_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } EXPORT_SYMBOL(stb6100_attach); MODULE_PARM_DESC(verbose, "Set Verbosity level"); MODULE_AUTHOR("Manu Abraham"); MODULE_DESCRIPTION("STB6100 Silicon tuner"); MODULE_LICENSE("GPL");
gpl-2.0
proximo256/kernel_samsung_exynos7420
drivers/media/pci/cx88/cx88-input.c
2738
16734
/* * * Device driver for GPIO attached remote control interfaces * on Conexant 2388x based TV/DVB cards. * * Copyright (c) 2003 Pavel Machek * Copyright (c) 2004 Gerd Knorr * Copyright (c) 2004, 2005 Chris Pascoe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/hrtimer.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include "cx88.h" #include <media/rc-core.h> #define MODULE_NAME "cx88xx" /* ---------------------------------------------------------------------- */ struct cx88_IR { struct cx88_core *core; struct rc_dev *dev; int users; char name[32]; char phys[32]; /* sample from gpio pin 16 */ u32 sampling; /* poll external decoder */ int polling; struct hrtimer timer; u32 gpio_addr; u32 last_gpio; u32 mask_keycode; u32 mask_keydown; u32 mask_keyup; }; static unsigned ir_samplerate = 4; module_param(ir_samplerate, uint, 0444); MODULE_PARM_DESC(ir_samplerate, "IR samplerate in kHz, 1 - 20, default 4"); static int ir_debug; module_param(ir_debug, int, 0644); /* debug level [IR] */ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]"); #define ir_dprintk(fmt, arg...) if (ir_debug) \ printk(KERN_DEBUG "%s IR: " fmt , ir->core->name , ##arg) #define dprintk(fmt, arg...) if (ir_debug) \ printk(KERN_DEBUG "cx88 IR: " fmt , ##arg) /* ---------------------------------------------------------------------- */ static void cx88_ir_handle_key(struct cx88_IR *ir) { struct cx88_core *core = ir->core; u32 gpio, data, auxgpio; /* read gpio value */ gpio = cx_read(ir->gpio_addr); switch (core->boardnr) { case CX88_BOARD_NPGTECH_REALTV_TOP10FM: /* This board apparently uses a combination of 2 GPIO to represent the keys. Additionally, the second GPIO can be used for parity. Example: for key "5" gpio = 0x758, auxgpio = 0xe5 or 0xf5 for key "Power" gpio = 0x758, auxgpio = 0xed or 0xfd */ auxgpio = cx_read(MO_GP1_IO); /* Take out the parity part */ gpio=(gpio & 0x7fd) + (auxgpio & 0xef); break; case CX88_BOARD_WINFAST_DTV1000: case CX88_BOARD_WINFAST_DTV1800H: case CX88_BOARD_WINFAST_DTV1800H_XC4000: case CX88_BOARD_WINFAST_DTV2000H_PLUS: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43: gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900); auxgpio = gpio; break; default: auxgpio = gpio; } if (ir->polling) { if (ir->last_gpio == auxgpio) return; ir->last_gpio = auxgpio; } /* extract data */ data = ir_extract_bits(gpio, ir->mask_keycode); ir_dprintk("irq gpio=0x%x code=%d | %s%s%s\n", gpio, data, ir->polling ? "poll" : "irq", (gpio & ir->mask_keydown) ? " down" : "", (gpio & ir->mask_keyup) ? " up" : ""); if (ir->core->boardnr == CX88_BOARD_NORWOOD_MICRO) { u32 gpio_key = cx_read(MO_GP0_IO); data = (data << 4) | ((gpio_key & 0xf0) >> 4); rc_keydown(ir->dev, data, 0); } else if (ir->mask_keydown) { /* bit set on keydown */ if (gpio & ir->mask_keydown) rc_keydown_notimeout(ir->dev, data, 0); else rc_keyup(ir->dev); } else if (ir->mask_keyup) { /* bit cleared on keydown */ if (0 == (gpio & ir->mask_keyup)) rc_keydown_notimeout(ir->dev, data, 0); else rc_keyup(ir->dev); } else { /* can't distinguish keydown/up :-/ */ rc_keydown_notimeout(ir->dev, data, 0); rc_keyup(ir->dev); } } static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer) { unsigned long missed; struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer); cx88_ir_handle_key(ir); missed = hrtimer_forward_now(&ir->timer, ktime_set(0, ir->polling * 1000000)); if (missed > 1) ir_dprintk("Missed ticks %ld\n", missed - 1); return HRTIMER_RESTART; } static int __cx88_ir_start(void *priv) { struct cx88_core *core = priv; struct cx88_IR *ir; if (!core || !core->ir) return -EINVAL; ir = core->ir; if (ir->polling) { hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ir->timer.function = cx88_ir_work; hrtimer_start(&ir->timer, ktime_set(0, ir->polling * 1000000), HRTIMER_MODE_REL); } if (ir->sampling) { core->pci_irqmask |= PCI_INT_IR_SMPINT; cx_write(MO_DDS_IO, 0x33F286 * ir_samplerate); /* samplerate */ cx_write(MO_DDSCFG_IO, 0x5); /* enable */ } return 0; } static void __cx88_ir_stop(void *priv) { struct cx88_core *core = priv; struct cx88_IR *ir; if (!core || !core->ir) return; ir = core->ir; if (ir->sampling) { cx_write(MO_DDSCFG_IO, 0x0); core->pci_irqmask &= ~PCI_INT_IR_SMPINT; } if (ir->polling) hrtimer_cancel(&ir->timer); } int cx88_ir_start(struct cx88_core *core) { if (core->ir->users) return __cx88_ir_start(core); return 0; } void cx88_ir_stop(struct cx88_core *core) { if (core->ir->users) __cx88_ir_stop(core); } static int cx88_ir_open(struct rc_dev *rc) { struct cx88_core *core = rc->priv; core->ir->users++; return __cx88_ir_start(core); } static void cx88_ir_close(struct rc_dev *rc) { struct cx88_core *core = rc->priv; core->ir->users--; if (!core->ir->users) __cx88_ir_stop(core); } /* ---------------------------------------------------------------------- */ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) { struct cx88_IR *ir; struct rc_dev *dev; char *ir_codes = NULL; u64 rc_type = RC_BIT_OTHER; int err = -ENOMEM; u32 hardware_mask = 0; /* For devices with a hardware mask, when * used with a full-code IR table */ ir = kzalloc(sizeof(*ir), GFP_KERNEL); dev = rc_allocate_device(); if (!ir || !dev) goto err_out_free; ir->dev = dev; /* detect & configure */ switch (core->boardnr) { case CX88_BOARD_DNTV_LIVE_DVB_T: case CX88_BOARD_KWORLD_DVB_T: case CX88_BOARD_KWORLD_DVB_T_CX22702: ir_codes = RC_MAP_DNTV_LIVE_DVB_T; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x1f; ir->mask_keyup = 0x60; ir->polling = 50; /* ms */ break; case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1: ir_codes = RC_MAP_CINERGY_1400; ir->sampling = 0xeb04; /* address */ break; case CX88_BOARD_HAUPPAUGE: case CX88_BOARD_HAUPPAUGE_DVB_T1: case CX88_BOARD_HAUPPAUGE_NOVASE2_S1: case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1: case CX88_BOARD_HAUPPAUGE_HVR1100: case CX88_BOARD_HAUPPAUGE_HVR3000: case CX88_BOARD_HAUPPAUGE_HVR4000: case CX88_BOARD_HAUPPAUGE_HVR4000LITE: case CX88_BOARD_PCHDTV_HD3000: case CX88_BOARD_PCHDTV_HD5500: case CX88_BOARD_HAUPPAUGE_IRONLY: ir_codes = RC_MAP_HAUPPAUGE; ir->sampling = 1; break; case CX88_BOARD_WINFAST_DTV2000H: case CX88_BOARD_WINFAST_DTV2000H_J: case CX88_BOARD_WINFAST_DTV1800H: case CX88_BOARD_WINFAST_DTV1800H_XC4000: case CX88_BOARD_WINFAST_DTV2000H_PLUS: ir_codes = RC_MAP_WINFAST; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0x8f8; ir->mask_keyup = 0x100; ir->polling = 50; /* ms */ break; case CX88_BOARD_WINFAST2000XP_EXPERT: case CX88_BOARD_WINFAST_DTV1000: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36: case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43: ir_codes = RC_MAP_WINFAST; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0x8f8; ir->mask_keyup = 0x100; ir->polling = 1; /* ms */ break; case CX88_BOARD_IODATA_GVBCTV7E: ir_codes = RC_MAP_IODATA_BCTV7E; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0xfd; ir->mask_keydown = 0x02; ir->polling = 5; /* ms */ break; case CX88_BOARD_PROLINK_PLAYTVPVR: case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO: /* * It seems that this hardware is paired with NEC extended * address 0x866b. So, unfortunately, its usage with other * IR's with different address won't work. Still, there are * other IR's from the same manufacturer that works, like the * 002-T mini RC, provided with newer PV hardware */ ir_codes = RC_MAP_PIXELVIEW_MK12; ir->gpio_addr = MO_GP1_IO; ir->mask_keyup = 0x80; ir->polling = 10; /* ms */ hardware_mask = 0x3f; /* Hardware returns only 6 bits from command part */ break; case CX88_BOARD_PROLINK_PV_8000GT: case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: ir_codes = RC_MAP_PIXELVIEW_NEW; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x3f; ir->mask_keyup = 0x80; ir->polling = 1; /* ms */ break; case CX88_BOARD_KWORLD_LTV883: ir_codes = RC_MAP_PIXELVIEW; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x1f; ir->mask_keyup = 0x60; ir->polling = 1; /* ms */ break; case CX88_BOARD_ADSTECH_DVB_T_PCI: ir_codes = RC_MAP_ADSTECH_DVB_T_PCI; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0xbf; ir->mask_keyup = 0x40; ir->polling = 50; /* ms */ break; case CX88_BOARD_MSI_TVANYWHERE_MASTER: ir_codes = RC_MAP_MSI_TVANYWHERE; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x1f; ir->mask_keyup = 0x40; ir->polling = 1; /* ms */ break; case CX88_BOARD_AVERTV_303: case CX88_BOARD_AVERTV_STUDIO_303: ir_codes = RC_MAP_AVERTV_303; ir->gpio_addr = MO_GP2_IO; ir->mask_keycode = 0xfb; ir->mask_keydown = 0x02; ir->polling = 50; /* ms */ break; case CX88_BOARD_OMICOM_SS4_PCI: case CX88_BOARD_SATTRADE_ST4200: case CX88_BOARD_TBS_8920: case CX88_BOARD_TBS_8910: case CX88_BOARD_PROF_7300: case CX88_BOARD_PROF_7301: case CX88_BOARD_PROF_6200: ir_codes = RC_MAP_TBS_NEC; ir->sampling = 0xff00; /* address */ break; case CX88_BOARD_TEVII_S464: case CX88_BOARD_TEVII_S460: case CX88_BOARD_TEVII_S420: ir_codes = RC_MAP_TEVII_NEC; ir->sampling = 0xff00; /* address */ break; case CX88_BOARD_DNTV_LIVE_DVB_T_PRO: ir_codes = RC_MAP_DNTV_LIVE_DVBT_PRO; ir->sampling = 0xff00; /* address */ break; case CX88_BOARD_NORWOOD_MICRO: ir_codes = RC_MAP_NORWOOD; ir->gpio_addr = MO_GP1_IO; ir->mask_keycode = 0x0e; ir->mask_keyup = 0x80; ir->polling = 50; /* ms */ break; case CX88_BOARD_NPGTECH_REALTV_TOP10FM: ir_codes = RC_MAP_NPGTECH; ir->gpio_addr = MO_GP0_IO; ir->mask_keycode = 0xfa; ir->polling = 50; /* ms */ break; case CX88_BOARD_PINNACLE_PCTV_HD_800i: ir_codes = RC_MAP_PINNACLE_PCTV_HD; ir->sampling = 1; break; case CX88_BOARD_POWERCOLOR_REAL_ANGEL: ir_codes = RC_MAP_POWERCOLOR_REAL_ANGEL; ir->gpio_addr = MO_GP2_IO; ir->mask_keycode = 0x7e; ir->polling = 100; /* ms */ break; case CX88_BOARD_TWINHAN_VP1027_DVBS: ir_codes = RC_MAP_TWINHAN_VP1027_DVBS; rc_type = RC_BIT_NEC; ir->sampling = 0xff00; /* address */ break; } if (!ir_codes) { err = -ENODEV; goto err_out_free; } /* * The usage of mask_keycode were very convenient, due to several * reasons. Among others, the scancode tables were using the scancode * as the index elements. So, the less bits it was used, the smaller * the table were stored. After the input changes, the better is to use * the full scancodes, since it allows replacing the IR remote by * another one. Unfortunately, there are still some hardware, like * Pixelview Ultra Pro, where only part of the scancode is sent via * GPIO. So, there's no way to get the full scancode. Due to that, * hardware_mask were introduced here: it represents those hardware * that has such limits. */ if (hardware_mask && !ir->mask_keycode) ir->mask_keycode = hardware_mask; /* init input device */ snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name); snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci)); dev->input_name = ir->name; dev->input_phys = ir->phys; dev->input_id.bustype = BUS_PCI; dev->input_id.version = 1; if (pci->subsystem_vendor) { dev->input_id.vendor = pci->subsystem_vendor; dev->input_id.product = pci->subsystem_device; } else { dev->input_id.vendor = pci->vendor; dev->input_id.product = pci->device; } dev->dev.parent = &pci->dev; dev->map_name = ir_codes; dev->driver_name = MODULE_NAME; dev->priv = core; dev->open = cx88_ir_open; dev->close = cx88_ir_close; dev->scanmask = hardware_mask; if (ir->sampling) { dev->driver_type = RC_DRIVER_IR_RAW; dev->timeout = 10 * 1000 * 1000; /* 10 ms */ } else { dev->driver_type = RC_DRIVER_SCANCODE; dev->allowed_protos = rc_type; } ir->core = core; core->ir = ir; /* all done */ err = rc_register_device(dev); if (err) goto err_out_free; return 0; err_out_free: rc_free_device(dev); core->ir = NULL; kfree(ir); return err; } int cx88_ir_fini(struct cx88_core *core) { struct cx88_IR *ir = core->ir; /* skip detach on non attached boards */ if (NULL == ir) return 0; cx88_ir_stop(core); rc_unregister_device(ir->dev); kfree(ir); /* done */ core->ir = NULL; return 0; } /* ---------------------------------------------------------------------- */ void cx88_ir_irq(struct cx88_core *core) { struct cx88_IR *ir = core->ir; u32 samples; unsigned todo, bits; struct ir_raw_event ev; if (!ir || !ir->sampling) return; /* * Samples are stored in a 32 bit register, oldest sample in * the msb. A set bit represents space and an unset bit * represents a pulse. */ samples = cx_read(MO_SAMPLE_IO); if (samples == 0xff && ir->dev->idle) return; init_ir_raw_event(&ev); for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } ir_raw_event_handle(ir->dev); } static int get_key_pvr2000(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { int flags, code; /* poll IR chip */ flags = i2c_smbus_read_byte_data(ir->c, 0x10); if (flags < 0) { dprintk("read error\n"); return 0; } /* key pressed ? */ if (0 == (flags & 0x80)) return 0; /* read actual key code */ code = i2c_smbus_read_byte_data(ir->c, 0x00); if (code < 0) { dprintk("read error\n"); return 0; } dprintk("IR Key/Flags: (0x%02x/0x%02x)\n", code & 0xff, flags & 0xff); *ir_key = code & 0xff; *ir_raw = code; return 1; } void cx88_i2c_init_ir(struct cx88_core *core) { struct i2c_board_info info; const unsigned short default_addr_list[] = { 0x18, 0x6b, 0x71, I2C_CLIENT_END }; const unsigned short pvr2000_addr_list[] = { 0x18, 0x1a, I2C_CLIENT_END }; const unsigned short *addr_list = default_addr_list; const unsigned short *addrp; /* Instantiate the IR receiver device, if present */ if (0 != core->i2c_rc) return; memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "ir_video", I2C_NAME_SIZE); switch (core->boardnr) { case CX88_BOARD_LEADTEK_PVR2000: addr_list = pvr2000_addr_list; core->init_data.name = "cx88 Leadtek PVR 2000 remote"; core->init_data.type = RC_BIT_UNKNOWN; core->init_data.get_key = get_key_pvr2000; core->init_data.ir_codes = RC_MAP_EMPTY; break; } /* * We can't call i2c_new_probed_device() because it uses * quick writes for probing and at least some RC receiver * devices only reply to reads. * Also, Hauppauge XVR needs to be specified, as address 0x71 * conflicts with another remote type used with saa7134 */ for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) { info.platform_data = NULL; memset(&core->init_data, 0, sizeof(core->init_data)); if (*addrp == 0x71) { /* Hauppauge XVR */ core->init_data.name = "cx88 Hauppauge XVR remote"; core->init_data.ir_codes = RC_MAP_HAUPPAUGE; core->init_data.type = RC_BIT_RC5; core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; info.platform_data = &core->init_data; } if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK, NULL) >= 0) { info.addr = *addrp; i2c_new_device(&core->i2c_adap, &info); break; } } } /* ---------------------------------------------------------------------- */ MODULE_AUTHOR("Gerd Knorr, Pavel Machek, Chris Pascoe"); MODULE_DESCRIPTION("input driver for cx88 GPIO-based IR remote controls"); MODULE_LICENSE("GPL");
gpl-2.0
stedman420/android_kernel_zte_hera
drivers/video/msm/mdp_dma_s.c
3506
4600
/* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/fb.h> #include "mdp.h" #include "msm_fb.h" static void mdp_dma_s_update_lcd(struct msm_fb_data_type *mfd) { MDPIBUF *iBuf = &mfd->ibuf; int mddi_dest = FALSE; uint32 outBpp = iBuf->bpp; uint32 dma_s_cfg_reg; uint8 *src; struct msm_fb_panel_data *pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; dma_s_cfg_reg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB | DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS; if (mfd->fb_imgType == MDP_BGR_565) dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; else dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB; if (outBpp == 4) dma_s_cfg_reg |= DMA_IBUF_C3ALPHA_EN; if (outBpp == 2) dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565; if (mfd->panel_info.pdest != DISPLAY_2) { printk(KERN_ERR "error: non-secondary type through dma_s!\n"); return; } if (mfd->panel_info.type == MDDI_PANEL || mfd->panel_info.type == EXT_MDDI_PANEL) { dma_s_cfg_reg |= DMA_OUT_SEL_MDDI; mddi_dest = TRUE; } else { dma_s_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY; outp32(MDP_EBI2_LCD1, mfd->data_port_phys); } src = (uint8 *) iBuf->buf; /* starting input address */ src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width) * outBpp; /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); /* PIXELSIZE */ if (mfd->panel_info.type == MDDI_PANEL) { MDP_OUTP(MDP_BASE + 0xa0004, (iBuf->dma_h << 16 | iBuf->dma_w)); MDP_OUTP(MDP_BASE + 0xa0008, src); /* ibuf address */ MDP_OUTP(MDP_BASE + 0xa000c, iBuf->ibuf_width * outBpp);/* ystride */ } else { MDP_OUTP(MDP_BASE + 0xb0004, (iBuf->dma_h << 16 | iBuf->dma_w)); MDP_OUTP(MDP_BASE + 0xb0008, src); /* ibuf address */ MDP_OUTP(MDP_BASE + 0xb000c, iBuf->ibuf_width * outBpp);/* ystride */ } if (mfd->panel_info.bpp == 18) { dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */ DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; } else { dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */ DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS; } if (mddi_dest) { if (mfd->panel_info.type == MDDI_PANEL) { MDP_OUTP(MDP_BASE + 0xa0010, (iBuf->dma_y << 16) | iBuf->dma_x); MDP_OUTP(MDP_BASE + 0x00090, 1); } else { MDP_OUTP(MDP_BASE + 0xb0010, (iBuf->dma_y << 16) | iBuf->dma_x); MDP_OUTP(MDP_BASE + 0x00090, 2); } MDP_OUTP(MDP_BASE + 0x00094, (MDDI_VDO_PACKET_DESC << 16) | mfd->panel_info.mddi.vdopkt); } else { /* setting LCDC write window */ pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w, iBuf->dma_h); } if (mfd->panel_info.type == MDDI_PANEL) MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg); else MDP_OUTP(MDP_BASE + 0xb0000, dma_s_cfg_reg); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); if (mfd->panel_info.type == MDDI_PANEL) mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); else mdp_pipe_kickoff(MDP_DMA_E_TERM, mfd); } void mdp_dma_s_update(struct msm_fb_data_type *mfd) { down(&mfd->dma->mutex); if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) { down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); if (mfd->panel_info.type == MDDI_PANEL) mdp_enable_irq(MDP_DMA_S_TERM); else mdp_enable_irq(MDP_DMA_E_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); mfd->ibuf_flushed = TRUE; mdp_dma_s_update_lcd(mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&mfd->dma->comp); if (mfd->panel_info.type == MDDI_PANEL) mdp_disable_irq(MDP_DMA_S_TERM); else mdp_disable_irq(MDP_DMA_E_TERM); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
gpl-2.0
daeiron/LGD855_kernel
drivers/hid/hid-zpff.c
3762
4091
/* * Force feedback support for Zeroplus based devices * * Copyright (c) 2005, 2006 Anssi Hannula <anssi.hannula@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_ZEROPLUS_FF #include "usbhid/usbhid.h" struct zpff_device { struct hid_report *report; }; static int zpff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct zpff_device *zpff = data; int left, right; /* * The following is specified the other way around in the Zeroplus * datasheet but the order below is correct for the XFX Executioner; * however it is possible that the XFX Executioner is an exception */ left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dbg_hid("called with 0x%04x 0x%04x\n", left, right); left = left * 0x7f / 0xffff; right = right * 0x7f / 0xffff; zpff->report->field[2]->value[0] = left; zpff->report->field[3]->value[0] = right; dbg_hid("running with 0x%02x 0x%02x\n", left, right); usbhid_submit_report(hid, zpff->report, USB_DIR_OUT); return 0; } static int zpff_init(struct hid_device *hid) { struct zpff_device *zpff; struct hid_report *report; struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev = hidinput->input; int error; if (list_empty(report_list)) { hid_err(hid, "no output report found\n"); return -ENODEV; } report = list_entry(report_list->next, struct hid_report, list); if (report->maxfield < 4) { hid_err(hid, "not enough fields in report\n"); return -ENODEV; } zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL); if (!zpff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, zpff, zpff_play); if (error) { kfree(zpff); return error; } zpff->report = report; zpff->report->field[0]->value[0] = 0x00; zpff->report->field[1]->value[0] = 0x02; zpff->report->field[2]->value[0] = 0x00; zpff->report->field[3]->value[0] = 0x00; usbhid_submit_report(hid, zpff->report, USB_DIR_OUT); hid_info(hid, "force feedback for Zeroplus based devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); return 0; } #else static inline int zpff_init(struct hid_device *hid) { return 0; } #endif static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } zpff_init(hdev); return 0; err: return ret; } static const struct hid_device_id zp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, { } }; MODULE_DEVICE_TABLE(hid, zp_devices); static struct hid_driver zp_driver = { .name = "zeroplus", .id_table = zp_devices, .probe = zp_probe, }; static int __init zp_init(void) { return hid_register_driver(&zp_driver); } static void __exit zp_exit(void) { hid_unregister_driver(&zp_driver); } module_init(zp_init); module_exit(zp_exit); MODULE_LICENSE("GPL");
gpl-2.0
pacerom/kernel_google_msm
drivers/staging/wlags49_h2/wl_priv.c
4274
64944
/******************************************************************************* * Agere Systems Inc. * Wireless device driver for Linux (wlags49). * * Copyright (c) 1998-2003 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Initially developed by TriplePoint, Inc. * http://www.triplepoint.com * *------------------------------------------------------------------------------ * * This file defines handling routines for the private IOCTLs * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2003 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ******************************************************************************/ /******************************************************************************* * include files ******************************************************************************/ #include <wl_version.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <debug.h> #include <hcf.h> #include <hcfdef.h> #include <wl_if.h> #include <wl_internal.h> #include <wl_enc.h> #include <wl_main.h> #include <wl_priv.h> #include <wl_util.h> #include <wl_netdev.h> int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp ); int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp ); int cfg_driver_info( struct uilreq *urq, struct wl_private *lp ); int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp ); /******************************************************************************* * global variables ******************************************************************************/ #if DBG extern dbg_info_t *DbgInfo; #endif // DBG /* If USE_UIL is not defined, then none of the UIL Interface code below will be included in the build */ #ifdef USE_UIL /******************************************************************************* * wvlan_uil() ******************************************************************************* * * DESCRIPTION: * * The handler function for the UIL interface. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_uil( struct uilreq *urq, struct wl_private *lp ) { int ioctl_ret = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil" ); DBG_ENTER( DbgInfo ); switch( urq->command ) { case UIL_FUN_CONNECT: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_CONNECT\n"); ioctl_ret = wvlan_uil_connect( urq, lp ); break; case UIL_FUN_DISCONNECT: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_DISCONNECT\n"); ioctl_ret = wvlan_uil_disconnect( urq, lp ); break; case UIL_FUN_ACTION: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_ACTION\n" ); ioctl_ret = wvlan_uil_action( urq, lp ); break; case UIL_FUN_SEND_DIAG_MSG: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_SEND_DIAG_MSG\n"); ioctl_ret = wvlan_uil_send_diag_msg( urq, lp ); break; case UIL_FUN_GET_INFO: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_GET_INFO\n"); ioctl_ret = wvlan_uil_get_info( urq, lp ); break; case UIL_FUN_PUT_INFO: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- WVLAN2_UIL_PUT_INFO\n"); ioctl_ret = wvlan_uil_put_info( urq, lp ); break; default: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL -- UNSUPPORTED UIL CODE: 0x%X", urq->command ); ioctl_ret = -EOPNOTSUPP; break; } DBG_LEAVE( DbgInfo ); return ioctl_ret; } // wvlan_uil /*============================================================================*/ /******************************************************************************* * wvlan_uil_connect() ******************************************************************************* * * DESCRIPTION: * * Connect to the UIL in order to make a request. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_connect( struct uilreq *urq, struct wl_private *lp ) { int result = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_connect" ); DBG_ENTER( DbgInfo ); if( !( lp->flags & WVLAN2_UIL_CONNECTED )) { lp->flags |= WVLAN2_UIL_CONNECTED; urq->hcfCtx = &( lp->hcfCtx ); urq->result = UIL_SUCCESS; } else { DBG_WARNING( DbgInfo, "UIL_ERR_IN_USE\n" ); urq->result = UIL_ERR_IN_USE; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_connect /*============================================================================*/ /******************************************************************************* * wvlan_uil_disconnect() ******************************************************************************* * * DESCRIPTION: * * Disonnect from the UIL after a request has been completed. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_disconnect( struct uilreq *urq, struct wl_private *lp ) { int result = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_disconnect" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { if (lp->flags & WVLAN2_UIL_CONNECTED) { lp->flags &= ~WVLAN2_UIL_CONNECTED; /* if (lp->flags & WVLAN2_UIL_BUSY) { lp->flags &= ~WVLAN2_UIL_BUSY; netif_start_queue(lp->dev); } */ } urq->hcfCtx = NULL; urq->result = UIL_SUCCESS; } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_disconnect /*============================================================================*/ /******************************************************************************* * wvlan_uil_action() ******************************************************************************* * * DESCRIPTION: * * Handler for the UIL_ACT_xxx subcodes associated with UIL_FUN_ACTION * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_action( struct uilreq *urq, struct wl_private *lp ) { int result = 0; ltv_t *ltv; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_action" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { /* Make sure there's an LTV in the request buffer */ ltv = (ltv_t *)urq->data; if( ltv != NULL ) { /* Switch on the Type field of the LTV contained in the request buffer */ switch( ltv->typ ) { case UIL_ACT_BLOCK: DBG_TRACE( DbgInfo, "UIL_ACT_BLOCK\n" ); result = wvlan_uil_block( urq, lp ); break; case UIL_ACT_UNBLOCK: DBG_TRACE( DbgInfo, "UIL_ACT_UNBLOCK\n" ); result = wvlan_uil_unblock( urq, lp ); break; case UIL_ACT_SCAN: DBG_TRACE( DbgInfo, "UIL_ACT_SCAN\n" ); urq->result = hcf_action( &( lp->hcfCtx ), MDD_ACT_SCAN ); break; case UIL_ACT_APPLY: DBG_TRACE( DbgInfo, "UIL_ACT_APPLY\n" ); urq->result = wl_apply( lp ); break; case UIL_ACT_RESET: DBG_TRACE( DbgInfo, "UIL_ACT_RESET\n" ); urq->result = wl_go( lp ); break; default: DBG_WARNING( DbgInfo, "Unknown action code: 0x%x\n", ltv->typ ); break; } } else { DBG_ERROR( DbgInfo, "Bad LTV for this action\n" ); urq->result = UIL_ERR_LEN; } } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_action /*============================================================================*/ /******************************************************************************* * wvlan_uil_block() ******************************************************************************* * * DESCRIPTION: * * Sets a block in the driver to prevent access to the card by other * processes. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_block( struct uilreq *urq, struct wl_private *lp ) { int result = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_block" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { if( capable( CAP_NET_ADMIN )) { lp->flags |= WVLAN2_UIL_BUSY; netif_stop_queue(lp->dev); WL_WDS_NETIF_STOP_QUEUE( lp ); urq->result = UIL_SUCCESS; } else { DBG_ERROR( DbgInfo, "EPERM\n" ); urq->result = UIL_FAILURE; result = -EPERM; } } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_block /*============================================================================*/ /******************************************************************************* * wvlan_uil_unblock() ******************************************************************************* * * DESCRIPTION: * * Unblocks the driver to restore access to the card by other processes. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_unblock( struct uilreq *urq, struct wl_private *lp ) { int result = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_unblock" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { if( capable( CAP_NET_ADMIN )) { if (lp->flags & WVLAN2_UIL_BUSY) { lp->flags &= ~WVLAN2_UIL_BUSY; netif_wake_queue(lp->dev); WL_WDS_NETIF_WAKE_QUEUE( lp ); } } else { DBG_ERROR( DbgInfo, "EPERM\n" ); urq->result = UIL_FAILURE; result = -EPERM; } } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_unblock /*============================================================================*/ /******************************************************************************* * wvlan_uil_send_diag_msg() ******************************************************************************* * * DESCRIPTION: * * Sends a diagnostic message to the card. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp ) { int result = 0; DESC_STRCT Descp[1]; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_send_diag_msg" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { if( capable( CAP_NET_ADMIN )) { if ((urq->data != NULL) && (urq->len != 0)) { if (lp->hcfCtx.IFB_RscInd != 0) { u_char *data; // Verify the user buffer result = verify_area(VERIFY_READ, urq->data, urq->len); if (result != 0) { DBG_ERROR( DbgInfo, "verify_area failed, result: %d\n", result ); urq->result = UIL_FAILURE; DBG_LEAVE( DbgInfo ); return result; } data = kmalloc(urq->len, GFP_KERNEL); if (data != NULL) { memset( Descp, 0, sizeof( DESC_STRCT )); memcpy( data, urq->data, urq->len ); Descp[0].buf_addr = (wci_bufp)data; Descp[0].BUF_CNT = urq->len; Descp[0].next_desc_addr = 0; // terminate list hcf_send_msg( &(lp->hcfCtx), &Descp[0], HCF_PORT_0 ); kfree( data ); } else { DBG_ERROR( DbgInfo, "ENOMEM\n" ); urq->result = UIL_FAILURE; result = -ENOMEM; DBG_LEAVE( DbgInfo ); return result; } } else { urq->result = UIL_ERR_BUSY; } } else { urq->result = UIL_FAILURE; } } else { DBG_ERROR( DbgInfo, "EPERM\n" ); urq->result = UIL_FAILURE; result = -EPERM; } } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_send_diag_msg /*============================================================================*/ /******************************************************************************* * wvlan_uil_put_info() ******************************************************************************* * * DESCRIPTION: * * Sends a specific RID directly to the driver to set configuration info. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp ) { int result = 0; ltv_t *pLtv; bool_t ltvAllocated = FALSE; ENCSTRCT sEncryption; #ifdef USE_WDS hcf_16 hcfPort = HCF_PORT_0; #endif /* USE_WDS */ /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_put_info" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { if( capable( CAP_NET_ADMIN )) { if(( urq->data != NULL ) && ( urq->len != 0 )) { /* Make sure that we have at least a command and length to send. */ if( urq->len < ( sizeof( hcf_16 ) * 2 )) { urq->len = sizeof( lp->ltvRecord ); urq->result = UIL_ERR_LEN; DBG_ERROR( DbgInfo, "No Length/Type in LTV!!!\n" ); DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" ); DBG_LEAVE( DbgInfo ); return result; } /* Verify the user buffer */ result = verify_area( VERIFY_READ, urq->data, urq->len ); if( result != 0 ) { urq->result = UIL_FAILURE; DBG_ERROR( DbgInfo, "verify_area(), VERIFY_READ FAILED\n" ); DBG_LEAVE( DbgInfo ); return result; } /* Get only the command and length information. */ copy_from_user( &( lp->ltvRecord ), urq->data, sizeof( hcf_16 ) * 2 ); /* Make sure the incoming LTV record length is within the bounds of the IOCTL length */ if((( lp->ltvRecord.len + 1 ) * sizeof( hcf_16 )) > urq->len ) { urq->len = sizeof( lp->ltvRecord ); urq->result = UIL_ERR_LEN; DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" ); DBG_LEAVE( DbgInfo ); return result; } /* If the requested length is greater than the size of our local LTV record, try to allocate it from the kernel stack. Otherwise, we just use our local LTV record. */ if( urq->len > sizeof( lp->ltvRecord )) { pLtv = kmalloc(urq->len, GFP_KERNEL); if (pLtv != NULL) { ltvAllocated = TRUE; } else { DBG_ERROR( DbgInfo, "Alloc FAILED\n" ); urq->len = sizeof( lp->ltvRecord ); urq->result = UIL_ERR_LEN; result = -ENOMEM; DBG_LEAVE( DbgInfo ); return result; } } else { pLtv = &( lp->ltvRecord ); } /* Copy the data from the user's buffer into the local LTV record data area. */ copy_from_user( pLtv, urq->data, urq->len ); /* We need to snoop the commands to see if there is anything we need to store for the purposes of a reset or start/stop sequence. Perform endian translation as needed */ switch( pLtv->typ ) { case CFG_CNF_PORT_TYPE: lp->PortType = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_OWN_MAC_ADDR: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CNF_OWN_CHANNEL: lp->Channel = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; /* CFG_CNF_OWN_SSID currently same as CNF_DESIRED_SSID. Do we need separate storage for this? */ //case CFG_CNF_OWN_SSID: case CFG_CNF_OWN_ATIM_WINDOW: lp->atimWindow = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_SYSTEM_SCALE: lp->DistanceBetweenAPs = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); case CFG_CNF_MAX_DATA_LEN: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CNF_PM_ENABLED: lp->PMEnabled = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_MCAST_RX: lp->MulticastReceive = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_MAX_SLEEP_DURATION: lp->MaxSleepDuration = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_HOLDOVER_DURATION: lp->holdoverDuration = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_OWN_NAME: memset( lp->StationName, 0, sizeof( lp->StationName )); memcpy( (void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_LOAD_BALANCING: lp->loadBalancing = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_MEDIUM_DISTRIBUTION: lp->mediumDistribution = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #ifdef WARP case CFG_CNF_TX_POW_LVL: lp->txPowLevel = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; //case CFG_CNF_SHORT_RETRY_LIMIT: // Short Retry Limit //case 0xFC33: // Long Retry Limit case CFG_SUPPORTED_RATE_SET_CNTL: // Supported Rate Set Control lp->srsc[0] = pLtv->u.u16[0]; lp->srsc[1] = pLtv->u.u16[1]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); pLtv->u.u16[1] = CNV_INT_TO_LITTLE( pLtv->u.u16[1] ); break; case CFG_BASIC_RATE_SET_CNTL: // Basic Rate Set Control lp->brsc[0] = pLtv->u.u16[0]; lp->brsc[1] = pLtv->u.u16[1]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); pLtv->u.u16[1] = CNV_INT_TO_LITTLE( pLtv->u.u16[1] ); break; case CFG_CNF_CONNECTION_CNTL: lp->connectionControl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; //case CFG_PROBE_DATA_RATE: #endif // HERMES25 #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint case CFG_CNF_OWN_DTIM_PERIOD: lp->DTIMPeriod = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #ifdef WARP case CFG_CNF_OWN_BEACON_INTERVAL: // Own Beacon Interval lp->ownBeaconInterval = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #endif // WARP case CFG_COEXISTENSE_BEHAVIOUR: // Coexistence behavior lp->coexistence = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #ifdef USE_WDS case CFG_CNF_WDS_ADDR1: memcpy( &lp->wds_port[0].wdsAddress, &pLtv->u.u8[0], ETH_ALEN ); hcfPort = HCF_PORT_1; break; case CFG_CNF_WDS_ADDR2: memcpy( &lp->wds_port[1].wdsAddress, &pLtv->u.u8[0], ETH_ALEN ); hcfPort = HCF_PORT_2; break; case CFG_CNF_WDS_ADDR3: memcpy( &lp->wds_port[2].wdsAddress, &pLtv->u.u8[0], ETH_ALEN ); hcfPort = HCF_PORT_3; break; case CFG_CNF_WDS_ADDR4: memcpy( &lp->wds_port[3].wdsAddress, &pLtv->u.u8[0], ETH_ALEN ); hcfPort = HCF_PORT_4; break; case CFG_CNF_WDS_ADDR5: memcpy( &lp->wds_port[4].wdsAddress, &pLtv->u.u8[0], ETH_ALEN ); hcfPort = HCF_PORT_5; break; case CFG_CNF_WDS_ADDR6: memcpy( &lp->wds_port[5].wdsAddress, &pLtv->u.u8[0], ETH_ALEN ); hcfPort = HCF_PORT_6; break; #endif /* USE_WDS */ case CFG_CNF_MCAST_PM_BUF: lp->multicastPMBuffering = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_REJECT_ANY: lp->RejectAny = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #endif case CFG_CNF_ENCRYPTION: lp->EnableEncryption = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_CNF_AUTHENTICATION: lp->authentication = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint //case CFG_CNF_EXCL_UNENCRYPTED: //lp->ExcludeUnencrypted = pLtv->u.u16[0]; //pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); //break; case CFG_CNF_MCAST_RATE: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CNF_INTRA_BSS_RELAY: lp->intraBSSRelay = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #endif case CFG_CNF_MICRO_WAVE: /* TODO: determine if we are going to store anything based on this */ break; //case CFG_CNF_LOAD_BALANCING: /* TODO: determine if we are going to store anything based on this */ //break; //case CFG_CNF_MEDIUM_DISTRIBUTION: /* TODO: determine if we are going to store anything based on this */ //break; //case CFG_CNF_RX_ALL_GROUP_ADDRESS: // TODO: determine if we are going to store anything based on this //break; //case CFG_CNF_COUNTRY_INFO: /* TODO: determine if we are going to store anything based on this */ //break; case CFG_CNF_OWN_SSID: //case CNF_DESIRED_SSID: case CFG_DESIRED_SSID: memset( lp->NetworkName, 0, sizeof( lp->NetworkName )); memcpy( (void *)lp->NetworkName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0] ); pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); /* take care of the special network name "ANY" case */ if(( strlen( &pLtv->u.u8[2] ) == 0 ) || ( strcmp( &pLtv->u.u8[2], "ANY" ) == 0 ) || ( strcmp( &pLtv->u.u8[2], "any" ) == 0 )) { /* set the SSID_STRCT llen field (u16[0]) to zero, and the effectually null the string u8[2] */ pLtv->u.u16[0] = 0; pLtv->u.u8[2] = 0; } break; case CFG_GROUP_ADDR: /* TODO: determine if we are going to store anything based on this */ break; case CFG_CREATE_IBSS: lp->CreateIBSS = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_RTS_THRH: lp->RTSThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_TX_RATE_CNTL: lp->TxRateControl[0] = pLtv->u.u16[0]; lp->TxRateControl[1] = pLtv->u.u16[1]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); pLtv->u.u16[1] = CNV_INT_TO_LITTLE( pLtv->u.u16[1] ); break; case CFG_PROMISCUOUS_MODE: /* TODO: determine if we are going to store anything based on this */ break; //case CFG_WAKE_ON_LAN: /* TODO: determine if we are going to store anything based on this */ //break; #if 1 //;? #if (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint case CFG_RTS_THRH0: lp->RTSThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_TX_RATE_CNTL0: //;?no idea what this should be, get going so comment it out lp->TxRateControl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; #ifdef USE_WDS case CFG_RTS_THRH1: lp->wds_port[0].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_1; break; case CFG_RTS_THRH2: lp->wds_port[1].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_2; break; case CFG_RTS_THRH3: lp->wds_port[2].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_3; break; case CFG_RTS_THRH4: lp->wds_port[3].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_4; break; case CFG_RTS_THRH5: lp->wds_port[4].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_5; break; case CFG_RTS_THRH6: lp->wds_port[5].rtsThreshold = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_6; break; case CFG_TX_RATE_CNTL1: lp->wds_port[0].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_1; break; case CFG_TX_RATE_CNTL2: lp->wds_port[1].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_2; break; case CFG_TX_RATE_CNTL3: lp->wds_port[2].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_3; break; case CFG_TX_RATE_CNTL4: lp->wds_port[3].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_4; break; case CFG_TX_RATE_CNTL5: lp->wds_port[4].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_5; break; case CFG_TX_RATE_CNTL6: lp->wds_port[5].txRateCntl = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); hcfPort = HCF_PORT_6; break; #endif /* USE_WDS */ #endif /* (HCF_TYPE) & HCF_TYPE_AP */ case CFG_DEFAULT_KEYS: { CFG_DEFAULT_KEYS_STRCT *pKeys = (CFG_DEFAULT_KEYS_STRCT *)pLtv; pKeys->key[0].len = CNV_INT_TO_LITTLE( pKeys->key[0].len ); pKeys->key[1].len = CNV_INT_TO_LITTLE( pKeys->key[1].len ); pKeys->key[2].len = CNV_INT_TO_LITTLE( pKeys->key[2].len ); pKeys->key[3].len = CNV_INT_TO_LITTLE( pKeys->key[3].len ); memcpy( (void *)&(lp->DefaultKeys), (void *)pKeys, sizeof( CFG_DEFAULT_KEYS_STRCT )); } break; case CFG_TX_KEY_ID: lp->TransmitKeyID = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_SCAN_SSID: /* TODO: determine if we are going to store anything based on this */ break; case CFG_TICK_TIME: /* TODO: determine if we are going to store anything based on this */ break; /* these RIDS are Info RIDs, and should they be allowed for puts??? */ case CFG_MAX_LOAD_TIME: case CFG_DL_BUF: //case CFG_HSI_SUP_RANGE: case CFG_NIC_SERIAL_NUMBER: case CFG_NIC_IDENTITY: case CFG_NIC_MFI_SUP_RANGE: case CFG_NIC_CFI_SUP_RANGE: case CFG_NIC_TEMP_TYPE: case CFG_NIC_PROFILE: case CFG_FW_IDENTITY: case CFG_FW_SUP_RANGE: case CFG_MFI_ACT_RANGES_STA: case CFG_CFI_ACT_RANGES_STA: case CFG_PORT_STAT: case CFG_CUR_SSID: case CFG_CUR_BSSID: case CFG_COMMS_QUALITY: case CFG_CUR_TX_RATE: case CFG_CUR_BEACON_INTERVAL: case CFG_CUR_SCALE_THRH: case CFG_PROTOCOL_RSP_TIME: case CFG_CUR_SHORT_RETRY_LIMIT: case CFG_CUR_LONG_RETRY_LIMIT: case CFG_MAX_TX_LIFETIME: case CFG_MAX_RX_LIFETIME: case CFG_CF_POLLABLE: case CFG_AUTHENTICATION_ALGORITHMS: case CFG_PRIVACY_OPT_IMPLEMENTED: //case CFG_CURRENT_REMOTE_RATES: //case CFG_CURRENT_USED_RATES: //case CFG_CURRENT_SYSTEM_SCALE: //case CFG_CURRENT_TX_RATE1: //case CFG_CURRENT_TX_RATE2: //case CFG_CURRENT_TX_RATE3: //case CFG_CURRENT_TX_RATE4: //case CFG_CURRENT_TX_RATE5: //case CFG_CURRENT_TX_RATE6: case CFG_NIC_MAC_ADDR: case CFG_PCF_INFO: //case CFG_CURRENT_COUNTRY_INFO: case CFG_PHY_TYPE: case CFG_CUR_CHANNEL: //case CFG_CURRENT_POWER_STATE: //case CFG_CCAMODE: case CFG_SUPPORTED_DATA_RATES: break; case CFG_AP_MODE: //;? lp->DownloadFirmware = ( pLtv->u.u16[0] ) + 1; DBG_ERROR( DbgInfo, "set CFG_AP_MODE no longer supported\n" ); break; case CFG_ENCRYPT_STRING: /* TODO: ENDIAN TRANSLATION HERE??? */ memset( lp->szEncryption, 0, sizeof( lp->szEncryption )); memcpy( (void *)lp->szEncryption, (void *)&pLtv->u.u8[0], ( pLtv->len * sizeof( hcf_16 )) ); wl_wep_decode( CRYPT_CODE, &sEncryption, lp->szEncryption ); /* the Linux driver likes to use 1-4 for the key IDs, and then convert to 0-3 when sending to the card. The Windows code base used 0-3 in the API DLL, which was ported to Linux. For the sake of the user experience, we decided to keep 0-3 as the numbers used in the DLL; and will perform the +1 conversion here. We could have converted the entire Linux driver, but this is less obtrusive. This may be a "todo" to convert the whole driver */ lp->TransmitKeyID = sEncryption.wTxKeyID + 1; lp->EnableEncryption = sEncryption.wEnabled; memcpy( &lp->DefaultKeys, &sEncryption.EncStr, sizeof( CFG_DEFAULT_KEYS_STRCT )); break; /*case CFG_COUNTRY_STRING: memset( lp->countryString, 0, sizeof( lp->countryString )); memcpy( (void *)lp->countryString, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); break; */ case CFG_DRIVER_ENABLE: lp->driverEnable = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_WOLAS_ENABLE: lp->wolasEnable = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_SET_WPA_AUTH_KEY_MGMT_SUITE: lp->AuthKeyMgmtSuite = pLtv->u.u16[0]; pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_DISASSOCIATE_ADDR: pLtv->u.u16[ETH_ALEN / 2] = CNV_INT_TO_LITTLE( pLtv->u.u16[ETH_ALEN / 2] ); break; case CFG_ADD_TKIP_DEFAULT_KEY: case CFG_REMOVE_TKIP_DEFAULT_KEY: /* Endian convert the Tx Key Information */ pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] ); break; case CFG_ADD_TKIP_MAPPED_KEY: break; case CFG_REMOVE_TKIP_MAPPED_KEY: break; /* some RIDs just can't be put */ case CFG_MB_INFO: case CFG_IFB: default: break; } /* This code will prevent Static Configuration Entities from being sent to the card, as they require a call to UIL_ACT_APPLY to take effect. Dynamic Entities will be sent immediately */ switch( pLtv->typ ) { case CFG_CNF_PORT_TYPE: case CFG_CNF_OWN_MAC_ADDR: case CFG_CNF_OWN_CHANNEL: case CFG_CNF_OWN_SSID: case CFG_CNF_OWN_ATIM_WINDOW: case CFG_CNF_SYSTEM_SCALE: case CFG_CNF_MAX_DATA_LEN: case CFG_CNF_PM_ENABLED: case CFG_CNF_MCAST_RX: case CFG_CNF_MAX_SLEEP_DURATION: case CFG_CNF_HOLDOVER_DURATION: case CFG_CNF_OWN_NAME: case CFG_CNF_LOAD_BALANCING: case CFG_CNF_MEDIUM_DISTRIBUTION: #ifdef WARP case CFG_CNF_TX_POW_LVL: case CFG_CNF_CONNECTION_CNTL: //case CFG_PROBE_DATA_RATE: #endif // HERMES25 #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint case CFG_CNF_OWN_DTIM_PERIOD: #ifdef WARP case CFG_CNF_OWN_BEACON_INTERVAL: // Own Beacon Interval #endif // WARP #ifdef USE_WDS case CFG_CNF_WDS_ADDR1: case CFG_CNF_WDS_ADDR2: case CFG_CNF_WDS_ADDR3: case CFG_CNF_WDS_ADDR4: case CFG_CNF_WDS_ADDR5: case CFG_CNF_WDS_ADDR6: #endif case CFG_CNF_MCAST_PM_BUF: case CFG_CNF_REJECT_ANY: #endif case CFG_CNF_ENCRYPTION: case CFG_CNF_AUTHENTICATION: #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint case CFG_CNF_EXCL_UNENCRYPTED: case CFG_CNF_MCAST_RATE: case CFG_CNF_INTRA_BSS_RELAY: #endif case CFG_CNF_MICRO_WAVE: //case CFG_CNF_LOAD_BALANCING: //case CFG_CNF_MEDIUM_DISTRIBUTION: //case CFG_CNF_RX_ALL_GROUP_ADDRESS: //case CFG_CNF_COUNTRY_INFO: //case CFG_COUNTRY_STRING: case CFG_AP_MODE: case CFG_ENCRYPT_STRING: //case CFG_DRIVER_ENABLE: case CFG_WOLAS_ENABLE: case CFG_MB_INFO: case CFG_IFB: break; /* Deal with this dynamic MSF RID, as it's required for WPA */ case CFG_DRIVER_ENABLE: if( lp->driverEnable ) { //hcf_cntl_port( &( lp->hcfCtx ), // HCF_PORT_ENABLE | HCF_PORT_0 ); // //hcf_cntl( &( lp->hcfCtx ), // // HCF_PORT_ENABLE | HCF_PORT_0 ); //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_ENABLE ); // //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_CONNECT ); hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_ENABLE | HCF_PORT_0 ); hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_CONNECT ); } else { //hcf_cntl_port( &( lp->hcfCtx ), // HCF_PORT_DISABLE | HCF_PORT_0 ); // //hcf_cntl( &( lp->hcfCtx ), // // HCF_PORT_DISABLE | HCF_PORT_0 ); //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISABLE ); // //hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISCONNECT ); hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISABLE | HCF_PORT_0 ); hcf_cntl( &( lp->hcfCtx ), HCF_CNTL_DISCONNECT ); } break; default: wl_act_int_off( lp ); urq->result = hcf_put_info(&(lp->hcfCtx), (LTVP) pLtv); wl_act_int_on( lp ); break; } if( ltvAllocated ) { kfree( pLtv ); } } else { urq->result = UIL_FAILURE; } } else { DBG_ERROR( DbgInfo, "EPERM\n" ); urq->result = UIL_FAILURE; result = -EPERM; } } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_put_info /*============================================================================*/ /******************************************************************************* * wvlan_uil_get_info() ******************************************************************************* * * DESCRIPTION: * * Sends a specific RID directly to the driver to retrieve configuration * info. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp ) { int result = 0; int i; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_uil_get_info" ); DBG_ENTER( DbgInfo ); if( urq->hcfCtx == &( lp->hcfCtx )) { if(( urq->data != NULL ) && ( urq->len != 0 )) { ltv_t *pLtv; bool_t ltvAllocated = FALSE; /* Make sure that we have at least a command and length */ if( urq->len < ( sizeof( hcf_16 ) * 2 )) { urq->len = sizeof( lp->ltvRecord ); DBG_ERROR( DbgInfo, "No Length/Type in LTV!!!\n" ); DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" ); urq->result = UIL_ERR_LEN; DBG_LEAVE( DbgInfo ); return result; } /* Verify the user's LTV record header. */ result = verify_area( VERIFY_READ, urq->data, sizeof( hcf_16 ) * 2 ); if( result != 0 ) { DBG_ERROR( DbgInfo, "verify_area(), VERIFY_READ FAILED\n" ); urq->result = UIL_FAILURE; DBG_LEAVE( DbgInfo ); return result; } /* Get only the command and length information. */ result = copy_from_user( &( lp->ltvRecord ), urq->data, sizeof( hcf_16 ) * 2 ); /* Make sure the incoming LTV record length is within the bounds of the IOCTL length. */ if((( lp->ltvRecord.len + 1 ) * sizeof( hcf_16 )) > urq->len ) { DBG_ERROR( DbgInfo, "Incoming LTV too big\n" ); urq->len = sizeof( lp->ltvRecord ); urq->result = UIL_ERR_LEN; DBG_LEAVE( DbgInfo ); return result; } /* Determine if hcf_get_info() is needed or not */ switch ( lp->ltvRecord.typ ) { case CFG_NIC_IDENTITY: memcpy( &lp->ltvRecord.u.u8[0], &lp->NICIdentity, sizeof( lp->NICIdentity )); break; case CFG_PRI_IDENTITY: memcpy( &lp->ltvRecord.u.u8[0], &lp->PrimaryIdentity, sizeof( lp->PrimaryIdentity )); break; case CFG_AP_MODE: DBG_ERROR( DbgInfo, "set CFG_AP_MODE no longer supported, so is get useful ????\n" ); lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP; break; //case CFG_DRV_INFO: case CFG_ENCRYPT_STRING: case CFG_COUNTRY_STRING: case CFG_DRIVER_ENABLE: case CFG_WOLAS_ENABLE: // TODO: determine if we're going to support these urq->result = UIL_FAILURE; break; case CFG_DRV_INFO: DBG_TRACE( DbgInfo, "Intercept CFG_DRV_INFO\n" ); result = cfg_driver_info( urq, lp ); break; case CFG_DRV_IDENTITY: DBG_TRACE( DbgInfo, "Intercept CFG_DRV_IDENTITY\n" ); result = cfg_driver_identity( urq, lp ); break; case CFG_IFB: /* IFB can be a security hole */ if( !capable( CAP_NET_ADMIN )) { result = -EPERM; break; } /* Else fall through to the default */ case CFG_FW_IDENTITY: // For Hermes-1, this is cached default: /* Verify the user buffer */ result = verify_area( VERIFY_WRITE, urq->data, urq->len ); if( result != 0 ) { DBG_ERROR( DbgInfo, "verify_area(), VERIFY_WRITE FAILED\n" ); urq->result = UIL_FAILURE; break; } /* If the requested length is greater than the size of our local LTV record, try to allocate it from the kernel stack. Otherwise, we just use our local LTV record. */ if( urq->len > sizeof( lp->ltvRecord )) { pLtv = kmalloc(urq->len, GFP_KERNEL); if (pLtv != NULL) { ltvAllocated = TRUE; /* Copy the command/length information into the new buffer. */ memcpy( pLtv, &( lp->ltvRecord ), sizeof( hcf_16 ) * 2 ); } else { urq->len = sizeof( lp->ltvRecord ); urq->result = UIL_ERR_LEN; DBG_ERROR( DbgInfo, "kmalloc FAILED\n" ); DBG_ERROR( DbgInfo, "UIL_ERR_LEN\n" ); result = -ENOMEM; break; } } else { pLtv = &( lp->ltvRecord ); } wl_act_int_off( lp ); urq->result = hcf_get_info( &( lp->hcfCtx ), (LTVP) pLtv ); wl_act_int_on( lp ); // Copy the LTV into the user's buffer. //copy_to_user( urq->data, pLtv, urq->len ); //if( ltvAllocated ) //{ // kfree( pLtv ); //} //urq->result = UIL_SUCCESS; break; } /* Handle endian conversion of special fields */ switch( lp->ltvRecord.typ ) { /* simple int gets just need the first hcf_16 byte flipped */ case CFG_CNF_PORT_TYPE: case CFG_CNF_OWN_CHANNEL: case CFG_CNF_OWN_ATIM_WINDOW: case CFG_CNF_SYSTEM_SCALE: case CFG_CNF_MAX_DATA_LEN: case CFG_CNF_PM_ENABLED: case CFG_CNF_MCAST_RX: case CFG_CNF_MAX_SLEEP_DURATION: case CFG_CNF_HOLDOVER_DURATION: case CFG_CNF_OWN_DTIM_PERIOD: case CFG_CNF_MCAST_PM_BUF: case CFG_CNF_REJECT_ANY: case CFG_CNF_ENCRYPTION: case CFG_CNF_AUTHENTICATION: case CFG_CNF_EXCL_UNENCRYPTED: case CFG_CNF_INTRA_BSS_RELAY: case CFG_CNF_MICRO_WAVE: case CFG_CNF_LOAD_BALANCING: case CFG_CNF_MEDIUM_DISTRIBUTION: #ifdef WARP case CFG_CNF_TX_POW_LVL: case CFG_CNF_CONNECTION_CNTL: case CFG_CNF_OWN_BEACON_INTERVAL: // Own Beacon Interval case CFG_COEXISTENSE_BEHAVIOUR: // Coexistence Behavior //case CFG_CNF_RX_ALL_GROUP_ADDRESS: #endif // HERMES25 case CFG_CREATE_IBSS: case CFG_RTS_THRH: case CFG_PROMISCUOUS_MODE: //case CFG_WAKE_ON_LAN: case CFG_RTS_THRH0: case CFG_RTS_THRH1: case CFG_RTS_THRH2: case CFG_RTS_THRH3: case CFG_RTS_THRH4: case CFG_RTS_THRH5: case CFG_RTS_THRH6: case CFG_TX_RATE_CNTL0: case CFG_TX_RATE_CNTL1: case CFG_TX_RATE_CNTL2: case CFG_TX_RATE_CNTL3: case CFG_TX_RATE_CNTL4: case CFG_TX_RATE_CNTL5: case CFG_TX_RATE_CNTL6: case CFG_TX_KEY_ID: case CFG_TICK_TIME: case CFG_MAX_LOAD_TIME: case CFG_NIC_TEMP_TYPE: case CFG_PORT_STAT: case CFG_CUR_TX_RATE: case CFG_CUR_BEACON_INTERVAL: case CFG_PROTOCOL_RSP_TIME: case CFG_CUR_SHORT_RETRY_LIMIT: case CFG_CUR_LONG_RETRY_LIMIT: case CFG_MAX_TX_LIFETIME: case CFG_MAX_RX_LIFETIME: case CFG_CF_POLLABLE: case CFG_PRIVACY_OPT_IMPLEMENTED: //case CFG_CURRENT_REMOTE_RATES: //case CFG_CURRENT_USED_RATES: //case CFG_CURRENT_SYSTEM_SCALE: //case CFG_CURRENT_TX_RATE1: //case CFG_CURRENT_TX_RATE2: //case CFG_CURRENT_TX_RATE3: //case CFG_CURRENT_TX_RATE4: //case CFG_CURRENT_TX_RATE5: //case CFG_CURRENT_TX_RATE6: case CFG_PHY_TYPE: case CFG_CUR_CHANNEL: //case CFG_CURRENT_POWER_STATE: //case CFG_CCAMODE: // lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); // break; /* name string gets just need the first hcf_16 byte flipped (length of string) */ case CFG_CNF_OWN_SSID: case CFG_CNF_OWN_NAME: //case CNF_DESIRED_SSID: case CFG_DESIRED_SSID: case CFG_SCAN_SSID: case CFG_CUR_SSID: lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); break; /* non-length counted strings need no byte flipping */ case CFG_CNF_OWN_MAC_ADDR: /* this case is no longer valid: CFG_CNF_WDS_ADDR */ case CFG_CNF_WDS_ADDR1: case CFG_CNF_WDS_ADDR2: case CFG_CNF_WDS_ADDR3: case CFG_CNF_WDS_ADDR4: case CFG_CNF_WDS_ADDR5: case CFG_CNF_WDS_ADDR6: case CFG_GROUP_ADDR: case CFG_NIC_SERIAL_NUMBER: case CFG_CUR_BSSID: case CFG_NIC_MAC_ADDR: case CFG_SUPPORTED_DATA_RATES: /* need to ensure we can treat this as a string */ break; //case CFG_CNF_COUNTRY_INFO: /* special case, see page 75 of 022486, Rev C. */ //case CFG_CURRENT_COUNTRY_INFO: /* special case, see page 101 of 022486, Rev C. */ /* lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[3] ); for( i = 4; i < lp->ltvRecord.len; i++ ) { lp->ltvRecord.u.u16[i] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[i] ); } break; */ case CFG_DEFAULT_KEYS: { CFG_DEFAULT_KEYS_STRCT *pKeys = (CFG_DEFAULT_KEYS_STRCT *)&lp->ltvRecord.u.u8[0]; pKeys[0].len = CNV_INT_TO_LITTLE( pKeys[0].len ); pKeys[1].len = CNV_INT_TO_LITTLE( pKeys[1].len ); pKeys[2].len = CNV_INT_TO_LITTLE( pKeys[2].len ); pKeys[3].len = CNV_INT_TO_LITTLE( pKeys[3].len ); } break; case CFG_CNF_MCAST_RATE: case CFG_TX_RATE_CNTL: case CFG_SUPPORTED_RATE_SET_CNTL: // Supported Rate Set Control case CFG_BASIC_RATE_SET_CNTL: // Basic Rate Set Control lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] ); break; case CFG_DL_BUF: case CFG_NIC_IDENTITY: case CFG_COMMS_QUALITY: case CFG_PCF_INFO: lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] ); lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[2] ); break; case CFG_FW_IDENTITY: lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] ); lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[2] ); lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[3] ); break; //case CFG_HSI_SUP_RANGE: case CFG_NIC_MFI_SUP_RANGE: case CFG_NIC_CFI_SUP_RANGE: case CFG_NIC_PROFILE: case CFG_FW_SUP_RANGE: lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[1] ); lp->ltvRecord.u.u16[2] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[2] ); lp->ltvRecord.u.u16[3] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[3] ); lp->ltvRecord.u.u16[4] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[4] ); break; case CFG_MFI_ACT_RANGES_STA: case CFG_CFI_ACT_RANGES_STA: case CFG_CUR_SCALE_THRH: case CFG_AUTHENTICATION_ALGORITHMS: for( i = 0; i < ( lp->ltvRecord.len - 1 ); i++ ) { lp->ltvRecord.u.u16[i] = CNV_INT_TO_LITTLE( lp->ltvRecord.u.u16[i] ); } break; /* done at init time, and endian handled then */ case CFG_PRI_IDENTITY: break; case CFG_MB_INFO: //wvlanEndianTranslateMailbox( pLtv ); break; /* MSF and HCF RIDS */ case CFG_IFB: case CFG_DRV_INFO: case CFG_AP_MODE: case CFG_ENCRYPT_STRING: case CFG_COUNTRY_STRING: case CFG_DRIVER_ENABLE: case CFG_WOLAS_ENABLE: default: break; } // Copy the LTV into the user's buffer. copy_to_user( urq->data, &( lp->ltvRecord ), urq->len ); if( ltvAllocated ) { kfree( &( lp->ltvRecord )); } urq->result = UIL_SUCCESS; } else { urq->result = UIL_FAILURE; } } else { DBG_ERROR( DbgInfo, "UIL_ERR_WRONG_IFB\n" ); urq->result = UIL_ERR_WRONG_IFB; } DBG_LEAVE( DbgInfo ); return result; } // wvlan_uil_get_info /*============================================================================*/ /******************************************************************************* * cfg_driver_info() ******************************************************************************* * * DESCRIPTION: * * Retrieves driver information. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int cfg_driver_info( struct uilreq *urq, struct wl_private *lp ) { int result = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "cfg_driver_info" ); DBG_ENTER( DbgInfo ); /* Make sure that user buffer can handle the driver information buffer */ if( urq->len < sizeof( lp->driverInfo )) { urq->len = sizeof( lp->driverInfo ); urq->result = UIL_ERR_LEN; DBG_LEAVE( DbgInfo ); return result; } /* Verify the user buffer. */ result = verify_area( VERIFY_WRITE, urq->data, sizeof( lp->driverInfo )); if( result != 0 ) { urq->result = UIL_FAILURE; DBG_LEAVE( DbgInfo ); return result; } lp->driverInfo.card_stat = lp->hcfCtx.IFB_CardStat; // Copy the driver information into the user's buffer. urq->result = UIL_SUCCESS; copy_to_user( urq->data, &( lp->driverInfo ), sizeof( lp->driverInfo )); DBG_LEAVE( DbgInfo ); return result; } // cfg_driver_info /*============================================================================*/ /******************************************************************************* * cfg_driver_identity() ******************************************************************************* * * DESCRIPTION: * * Retrieves ID information from the card. * * PARAMETERS: * * urq - a pointer to the UIL request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * UIL_SUCCESS * UIL_ERR_xxx value otherwise * ******************************************************************************/ int cfg_driver_identity( struct uilreq *urq, struct wl_private *lp ) { int result = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_driver_identity" ); DBG_ENTER( DbgInfo ); /* Make sure that user buffer can handle the driver identity structure. */ if( urq->len < sizeof( lp->driverIdentity )) { urq->len = sizeof( lp->driverIdentity ); urq->result = UIL_ERR_LEN; DBG_LEAVE( DbgInfo ); return result; } /* Verify the user buffer. */ result = verify_area( VERIFY_WRITE, urq->data, sizeof( lp->driverIdentity )); if( result != 0 ) { urq->result = UIL_FAILURE; DBG_LEAVE( DbgInfo ); return result; } /* Copy the driver identity into the user's buffer. */ urq->result = UIL_SUCCESS; copy_to_user( urq->data, &( lp->driverIdentity ), sizeof( lp->driverIdentity )); DBG_LEAVE( DbgInfo ); return result; } // cfg_driver_identity /*============================================================================*/ #endif /* USE_UIL */ /* If WIRELESS_EXT is not defined, then the functions that follow will not be included in the build. */ /* NOTE: Are these still even needed? */ #ifdef WIRELESS_EXT /******************************************************************************* * wvlan_set_netname() ******************************************************************************* * * DESCRIPTION: * * Set the ESSID of the card. * * PARAMETERS: * * wrq - a pointer to the wireless request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_set_netname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_set_netname" ); DBG_ENTER( DbgInfo ); wl_lock(lp, &flags); memset( lp->NetworkName, 0, sizeof( lp->NetworkName )); memcpy( lp->NetworkName, extra, wrqu->data.length); /* Commit the adapter parameters */ wl_apply(lp); wl_unlock(lp, &flags); DBG_LEAVE( DbgInfo ); return ret; } // wvlan_set_netname /*============================================================================*/ /******************************************************************************* * wvlan_get_netname() ******************************************************************************* * * DESCRIPTION: * * Get the ESSID of the card. * * PARAMETERS: * * wrq - a pointer to the wireless request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_get_netname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; int status = -1; wvName_t *pName; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_get_netname" ); DBG_ENTER( DbgInfo ); wl_lock(lp, &flags); /* Get the current network name */ lp->ltvRecord.len = 1 + ( sizeof( *pName ) / sizeof( hcf_16 )); lp->ltvRecord.typ = CFG_CUR_SSID; status = hcf_get_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord )); if( status == HCF_SUCCESS ) { pName = (wvName_t *)&( lp->ltvRecord.u.u32 ); memset(extra, '\0', HCF_MAX_NAME_LEN); wrqu->data.length = pName->length; memcpy(extra, pName->name, pName->length); } else { ret = -EFAULT; } wl_unlock(lp, &flags); DBG_LEAVE( DbgInfo ); return ret; } // wvlan_get_netname /*============================================================================*/ /******************************************************************************* * wvlan_set_station_nickname() ******************************************************************************* * * DESCRIPTION: * * Set the card's station nickname. * * PARAMETERS: * * wrq - a pointer to the wireless request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_set_station_nickname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_set_station_nickname" ); DBG_ENTER( DbgInfo ); wl_lock(lp, &flags); memset( lp->StationName, 0, sizeof( lp->StationName )); memcpy( lp->StationName, extra, wrqu->data.length); /* Commit the adapter parameters */ wl_apply( lp ); wl_unlock(lp, &flags); DBG_LEAVE( DbgInfo ); return ret; } // wvlan_set_station_nickname /*============================================================================*/ /******************************************************************************* * wvlan_get_station_nickname() ******************************************************************************* * * DESCRIPTION: * * Get the card's station nickname. * * PARAMETERS: * * wrq - a pointer to the wireless request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_get_station_nickname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; int status = -1; wvName_t *pName; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_get_station_nickname" ); DBG_ENTER( DbgInfo ); wl_lock( lp, &flags ); /* Get the current station name */ lp->ltvRecord.len = 1 + ( sizeof( *pName ) / sizeof( hcf_16 )); lp->ltvRecord.typ = CFG_CNF_OWN_NAME; status = hcf_get_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord )); if( status == HCF_SUCCESS ) { pName = (wvName_t *)&( lp->ltvRecord.u.u32 ); memset(extra, '\0', HCF_MAX_NAME_LEN); wrqu->data.length = pName->length; memcpy(extra, pName->name, pName->length); } else { ret = -EFAULT; } wl_unlock(lp, &flags); //out: DBG_LEAVE( DbgInfo ); return ret; } // wvlan_get_station_nickname /*============================================================================*/ /******************************************************************************* * wvlan_set_porttype() ******************************************************************************* * * DESCRIPTION: * * Set the card's porttype * * PARAMETERS: * * wrq - a pointer to the wireless request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_set_porttype(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; hcf_16 portType; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_set_porttype" ); DBG_ENTER( DbgInfo ); wl_lock(lp, &flags); /* Validate the new value */ portType = *((__u32 *)extra); if( !(( portType == 1 ) || ( portType == 3 ))) { ret = -EINVAL; goto out_unlock; } lp->PortType = portType; /* Commit the adapter parameters */ wl_apply( lp ); out_unlock: wl_unlock(lp, &flags); //out: DBG_LEAVE( DbgInfo ); return ret; } /*============================================================================*/ /******************************************************************************* * wvlan_get_porttype() ******************************************************************************* * * DESCRIPTION: * * Get the card's porttype * * PARAMETERS: * * wrq - a pointer to the wireless request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_get_porttype(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; int status = -1; hcf_16 *pPortType; __u32 *pData = (__u32 *)extra; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_get_porttype" ); DBG_ENTER( DbgInfo ); wl_lock( lp, &flags ); /* Get the current port type */ lp->ltvRecord.len = 1 + ( sizeof( *pPortType ) / sizeof( hcf_16 )); lp->ltvRecord.typ = CFG_CNF_PORT_TYPE; status = hcf_get_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord )); if( status == HCF_SUCCESS ) { pPortType = (hcf_16 *)&( lp->ltvRecord.u.u32 ); *pData = CNV_LITTLE_TO_INT( *pPortType ); } else { ret = -EFAULT; } wl_unlock(lp, &flags); //out: DBG_LEAVE( DbgInfo ); return ret; } // wvlan_get_porttype /*============================================================================*/ #endif // WIRELESS_EXT #ifdef USE_RTS /******************************************************************************* * wvlan_rts() ******************************************************************************* * * DESCRIPTION: * * IOCTL handler for RTS commands * * PARAMETERS: * * rrq - a pointer to the rts request buffer * lp - a pointer to the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wvlan_rts( struct rtsreq *rrq, __u32 io_base ) { int ioctl_ret = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wvlan_rts" ); DBG_ENTER( DbgInfo ); DBG_PRINT( "io_base: 0x%08x\n", io_base ); switch( rrq->typ ) { case WL_IOCTL_RTS_READ: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_READ\n"); rrq->data[0] = IN_PORT_WORD( io_base + rrq->reg ); DBG_TRACE( DbgInfo, " reg 0x%04x ==> 0x%04x\n", rrq->reg, CNV_LITTLE_TO_SHORT( rrq->data[0] ) ); break; case WL_IOCTL_RTS_WRITE: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_WRITE\n"); OUT_PORT_WORD( io_base + rrq->reg, rrq->data[0] ); DBG_TRACE( DbgInfo, " reg 0x%04x <== 0x%04x\n", rrq->reg, CNV_LITTLE_TO_SHORT( rrq->data[0] ) ); break; case WL_IOCTL_RTS_BATCH_READ: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_BATCH_READ\n"); IN_PORT_STRING_16( io_base + rrq->reg, rrq->data, rrq->len ); DBG_TRACE( DbgInfo, " reg 0x%04x ==> %d bytes\n", rrq->reg, rrq->len * sizeof (__u16 ) ); break; case WL_IOCTL_RTS_BATCH_WRITE: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- WL_IOCTL_RTS_BATCH_WRITE\n"); OUT_PORT_STRING_16( io_base + rrq->reg, rrq->data, rrq->len ); DBG_TRACE( DbgInfo, " reg 0x%04x <== %d bytes\n", rrq->reg, rrq->len * sizeof (__u16) ); break; default: DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_RTS -- UNSUPPORTED RTS CODE: 0x%X", rrq->typ ); ioctl_ret = -EOPNOTSUPP; break; } DBG_LEAVE( DbgInfo ); return ioctl_ret; } // wvlan_rts /*============================================================================*/ #endif /* USE_RTS */
gpl-2.0
davidmueller13/flo_kernel
arch/arm/mach-sa1100/leds-lart.c
4786
2001
/* * linux/arch/arm/mach-sa1100/leds-lart.c * * (C) Erik Mouw (J.A.K.Mouw@its.tudelft.nl), April 21, 2000 * * LART uses the LED as follows: * - GPIO23 is the LED, on if system is not idle * You can use both CONFIG_LEDS_CPU and CONFIG_LEDS_TIMER at the same * time, but in that case the timer events will still dictate the * pace of the LED. */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; #define LED_23 GPIO_GPIO23 #define LED_MASK (LED_23) void lart_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch(evt) { case led_start: /* pin 23 is output pin */ GPDR |= LED_23; hw_led_state = LED_MASK; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= LED_23; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: /* The LART people like the LED to be off when the system is idle... */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~LED_23; break; case led_idle_end: /* ... and on if the system is not idle */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= LED_23; break; #endif case led_red_on: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~LED_23; break; case led_red_off: if (led_state & LED_STATE_CLAIMED) hw_led_state |= LED_23; break; default: break; } /* Now set the GPIO state, or nothing will happen at all */ if (led_state & LED_STATE_ENABLED) { GPSR = hw_led_state; GPCR = hw_led_state ^ LED_MASK; } local_irq_restore(flags); }
gpl-2.0
TaichiN/android_kernel_samsung_tuna
drivers/usb/host/ohci-pnx8550.c
4786
5864
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> * (C) Copyright 2002 Hewlett-Packard Company * (C) Copyright 2005 Embedded Alley Solutions, Inc. * * Bus Glue for PNX8550 * * Written by Christopher Hoover <ch@hpl.hp.com> * Based on fragments of previous driver by Russell King et al. * * Modified for LH7A404 from ohci-sa1111.c * by Durgesh Pattamatta <pattamattad@sharpsec.com> * * Modified for PNX8550 from ohci-sa1111.c and sa-omap.c * by Vitaly Wool <vitalywool@gmail.com> * * This file is licenced under the GPL. */ #include <linux/device.h> #include <linux/platform_device.h> #include <asm/mach-pnx8550/usb.h> #include <asm/mach-pnx8550/int.h> #include <asm/mach-pnx8550/pci.h> #ifndef CONFIG_PNX8550 #error "This file is PNX8550 bus glue. CONFIG_PNX8550 must be defined." #endif extern int usb_disabled(void); /*-------------------------------------------------------------------------*/ static void pnx8550_start_hc(struct platform_device *dev) { /* * Set register CLK48CTL to enable and 48MHz */ outl(0x00000003, PCI_BASE | 0x0004770c); /* * Set register CLK12CTL to enable and 48MHz */ outl(0x00000003, PCI_BASE | 0x00047710); udelay(100); } static void pnx8550_stop_hc(struct platform_device *dev) { udelay(10); } /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_pnx8550_probe - initialize pnx8550-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * */ int usb_hcd_pnx8550_probe (const struct hc_driver *driver, struct platform_device *dev) { int retval; struct usb_hcd *hcd; if (dev->resource[0].flags != IORESOURCE_MEM || dev->resource[1].flags != IORESOURCE_IRQ) { dev_err (&dev->dev,"invalid resource type\n"); return -ENOMEM; } hcd = usb_create_hcd (driver, &dev->dev, "pnx8550"); if (!hcd) return -ENOMEM; hcd->rsrc_start = dev->resource[0].start; hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { dev_err(&dev->dev, "request_mem_region [0x%08llx, 0x%08llx] " "failed\n", hcd->rsrc_start, hcd->rsrc_len); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&dev->dev, "ioremap [[0x%08llx, 0x%08llx] failed\n", hcd->rsrc_start, hcd->rsrc_len); retval = -ENOMEM; goto err2; } pnx8550_start_hc(dev); ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED); if (retval == 0) return retval; pnx8550_stop_hc(dev); iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); return retval; } /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_pnx8550_remove - shutdown processing for pnx8550-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_pnx8550_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * */ void usb_hcd_pnx8550_remove (struct usb_hcd *hcd, struct platform_device *dev) { usb_remove_hcd(hcd); pnx8550_stop_hc(dev); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); } /*-------------------------------------------------------------------------*/ static int __devinit ohci_pnx8550_start (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; ohci_dbg (ohci, "ohci_pnx8550_start, ohci:%p", ohci); if ((ret = ohci_init(ohci)) < 0) return ret; if ((ret = ohci_run (ohci)) < 0) { err ("can't start %s", hcd->self.bus_name); ohci_stop (hcd); return ret; } return 0; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_pnx8550_hc_driver = { .description = hcd_name, .product_desc = "PNX8550 OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .start = ohci_pnx8550_start, .stop = ohci_stop, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static int ohci_hcd_pnx8550_drv_probe(struct platform_device *pdev) { int ret; if (usb_disabled()) return -ENODEV; ret = usb_hcd_pnx8550_probe(&ohci_pnx8550_hc_driver, pdev); return ret; } static int ohci_hcd_pnx8550_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_hcd_pnx8550_remove(hcd, pdev); return 0; } MODULE_ALIAS("platform:pnx8550-ohci"); static struct platform_driver ohci_hcd_pnx8550_driver = { .driver = { .name = "pnx8550-ohci", .owner = THIS_MODULE, }, .probe = ohci_hcd_pnx8550_drv_probe, .remove = ohci_hcd_pnx8550_drv_remove, };
gpl-2.0
Megatron007/Megabyte_kernel_victara
tools/perf/util/annotate.c
4786
14754
/* * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> * * Parts came from builtin-annotate.c, see those files for further * copyright notes. * * Released under the GPL v2. (and only v2, not any later version) */ #include "util.h" #include "build-id.h" #include "color.h" #include "cache.h" #include "symbol.h" #include "debug.h" #include "annotate.h" #include <pthread.h> const char *disassembler_style; int symbol__annotate_init(struct map *map __used, struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); pthread_mutex_init(&notes->lock, NULL); return 0; } int symbol__alloc_hist(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); const size_t size = sym->end - sym->start + 1; size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); if (notes->src == NULL) return -1; notes->src->sizeof_sym_hist = sizeof_sym_hist; notes->src->nr_histograms = symbol_conf.nr_events; INIT_LIST_HEAD(&notes->src->source); return 0; } void symbol__annotate_zero_histograms(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); pthread_mutex_lock(&notes->lock); if (notes->src != NULL) memset(notes->src->histograms, 0, notes->src->nr_histograms * notes->src->sizeof_sym_hist); pthread_mutex_unlock(&notes->lock); } int symbol__inc_addr_samples(struct symbol *sym, struct map *map, int evidx, u64 addr) { unsigned offset; struct annotation *notes; struct sym_hist *h; notes = symbol__annotation(sym); if (notes->src == NULL) return -ENOMEM; pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); if (addr < sym->start || addr > sym->end) return -ERANGE; offset = addr - sym->start; h = annotation__histogram(notes, evidx); h->sum++; h->addr[offset]++; pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name, addr, addr - sym->start, evidx, h->addr[offset]); return 0; } static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) { struct objdump_line *self = malloc(sizeof(*self) + privsize); if (self != NULL) { self->offset = offset; self->line = line; } return self; } void objdump_line__free(struct objdump_line *self) { free(self->line); free(self); } static void objdump__add_line(struct list_head *head, struct objdump_line *line) { list_add_tail(&line->node, head); } struct objdump_line *objdump__get_next_ip_line(struct list_head *head, struct objdump_line *pos) { list_for_each_entry_continue(pos, head, node) if (pos->offset >= 0) return pos; return NULL; } static int objdump_line__print(struct objdump_line *oline, struct symbol *sym, int evidx, u64 len, int min_pcnt, int printed, int max_lines, struct objdump_line *queue) { static const char *prev_line; static const char *prev_color; if (oline->offset != -1) { const char *path = NULL; unsigned int hits = 0; double percent = 0.0; const char *color; struct annotation *notes = symbol__annotation(sym); struct source_line *src_line = notes->src->lines; struct sym_hist *h = annotation__histogram(notes, evidx); s64 offset = oline->offset; struct objdump_line *next; next = objdump__get_next_ip_line(&notes->src->source, oline); while (offset < (s64)len && (next == NULL || offset < next->offset)) { if (src_line) { if (path == NULL) path = src_line[offset].path; percent += src_line[offset].percent; } else hits += h->addr[offset]; ++offset; } if (src_line == NULL && h->sum) percent = 100.0 * hits / h->sum; if (percent < min_pcnt) return -1; if (max_lines && printed >= max_lines) return 1; if (queue != NULL) { list_for_each_entry_from(queue, &notes->src->source, node) { if (queue == oline) break; objdump_line__print(queue, sym, evidx, len, 0, 0, 1, NULL); } } color = get_percent_color(percent); /* * Also color the filename and line if needed, with * the same color than the percentage. Don't print it * twice for close colored addr with the same filename:line */ if (path) { if (!prev_line || strcmp(prev_line, path) || color != prev_color) { color_fprintf(stdout, color, " %s", path); prev_line = path; prev_color = color; } } color_fprintf(stdout, color, " %7.2f", percent); printf(" : "); color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line); } else if (max_lines && printed >= max_lines) return 1; else { if (queue) return -1; if (!*oline->line) printf(" :\n"); else printf(" : %s\n", oline->line); } return 0; } static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, FILE *file, size_t privsize) { struct annotation *notes = symbol__annotation(sym); struct objdump_line *objdump_line; char *line = NULL, *tmp, *tmp2, *c; size_t line_len; s64 line_ip, offset = -1; if (getline(&line, &line_len, file) < 0) return -1; if (!line) return -1; while (line_len != 0 && isspace(line[line_len - 1])) line[--line_len] = '\0'; c = strchr(line, '\n'); if (c) *c = 0; line_ip = -1; /* * Strip leading spaces: */ tmp = line; while (*tmp) { if (*tmp != ' ') break; tmp++; } if (*tmp) { /* * Parse hexa addresses followed by ':' */ line_ip = strtoull(tmp, &tmp2, 16); if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') line_ip = -1; } if (line_ip != -1) { u64 start = map__rip_2objdump(map, sym->start), end = map__rip_2objdump(map, sym->end); offset = line_ip - start; if (offset < 0 || (u64)line_ip > end) offset = -1; } objdump_line = objdump_line__new(offset, line, privsize); if (objdump_line == NULL) { free(line); return -1; } objdump__add_line(&notes->src->source, objdump_line); return 0; } int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) { struct dso *dso = map->dso; char *filename = dso__build_id_filename(dso, NULL, 0); bool free_filename = true; char command[PATH_MAX * 2]; FILE *file; int err = 0; char symfs_filename[PATH_MAX]; if (filename) { snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", symbol_conf.symfs, filename); } if (filename == NULL) { if (dso->has_build_id) { pr_err("Can't annotate %s: not enough memory\n", sym->name); return -ENOMEM; } goto fallback; } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || strstr(command, "[kernel.kallsyms]") || access(symfs_filename, R_OK)) { free(filename); fallback: /* * If we don't have build-ids or the build-id file isn't in the * cache, or is just a kallsyms file, well, lets hope that this * DSO is the same as when 'perf record' ran. */ filename = dso->long_name; snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", symbol_conf.symfs, filename); free_filename = false; } if (dso->symtab_type == SYMTAB__KALLSYMS) { char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; char *build_id_msg = NULL; if (dso->annotate_warned) goto out_free_filename; if (dso->has_build_id) { build_id__sprintf(dso->build_id, sizeof(dso->build_id), bf + 15); build_id_msg = bf; } err = -ENOENT; dso->annotate_warned = 1; pr_err("Can't annotate %s:\n\n" "No vmlinux file%s\nwas found in the path.\n\n" "Please use:\n\n" " perf buildid-cache -av vmlinux\n\n" "or:\n\n" " --vmlinux vmlinux\n", sym->name, build_id_msg ?: ""); goto out_free_filename; } pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, filename, sym->name, map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end)); pr_debug("annotating [%p] %30s : [%p] %30s\n", dso, dso->long_name, sym, sym->name); snprintf(command, sizeof(command), "objdump %s%s --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -d %s %s -C %s|grep -v %s|expand", disassembler_style ? "-M " : "", disassembler_style ? disassembler_style : "", map__rip_2objdump(map, sym->start), map__rip_2objdump(map, sym->end+1), symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", symbol_conf.annotate_src ? "-S" : "", symfs_filename, filename); pr_debug("Executing: %s\n", command); file = popen(command, "r"); if (!file) goto out_free_filename; while (!feof(file)) if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) break; pclose(file); out_free_filename: if (free_filename) free(filename); return err; } static void insert_source_line(struct rb_root *root, struct source_line *src_line) { struct source_line *iter; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct source_line, node); if (src_line->percent > iter->percent) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&src_line->node, parent, p); rb_insert_color(&src_line->node, root); } static void symbol__free_source_line(struct symbol *sym, int len) { struct annotation *notes = symbol__annotation(sym); struct source_line *src_line = notes->src->lines; int i; for (i = 0; i < len; i++) free(src_line[i].path); free(src_line); notes->src->lines = NULL; } /* Get the filename:line for the colored entries */ static int symbol__get_source_line(struct symbol *sym, struct map *map, int evidx, struct rb_root *root, int len, const char *filename) { u64 start; int i; char cmd[PATH_MAX * 2]; struct source_line *src_line; struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); if (!h->sum) return 0; src_line = notes->src->lines = calloc(len, sizeof(struct source_line)); if (!notes->src->lines) return -1; start = map__rip_2objdump(map, sym->start); for (i = 0; i < len; i++) { char *path = NULL; size_t line_len; u64 offset; FILE *fp; src_line[i].percent = 100.0 * h->addr[i] / h->sum; if (src_line[i].percent <= 0.5) continue; offset = start + i; sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); fp = popen(cmd, "r"); if (!fp) continue; if (getline(&path, &line_len, fp) < 0 || !line_len) goto next; src_line[i].path = malloc(sizeof(char) * line_len + 1); if (!src_line[i].path) goto next; strcpy(src_line[i].path, path); insert_source_line(root, &src_line[i]); next: pclose(fp); } return 0; } static void print_summary(struct rb_root *root, const char *filename) { struct source_line *src_line; struct rb_node *node; printf("\nSorted summary for file %s\n", filename); printf("----------------------------------------------\n\n"); if (RB_EMPTY_ROOT(root)) { printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); return; } node = rb_first(root); while (node) { double percent; const char *color; char *path; src_line = rb_entry(node, struct source_line, node); percent = src_line->percent; color = get_percent_color(percent); path = src_line->path; color_fprintf(stdout, color, " %7.2f %s", percent, path); node = rb_next(node); } } static void symbol__annotate_hits(struct symbol *sym, int evidx) { struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); u64 len = sym->end - sym->start, offset; for (offset = 0; offset < len; ++offset) if (h->addr[offset] != 0) printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, sym->start + offset, h->addr[offset]); printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); } int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, bool full_paths, int min_pcnt, int max_lines, int context) { struct dso *dso = map->dso; const char *filename = dso->long_name, *d_filename; struct annotation *notes = symbol__annotation(sym); struct objdump_line *pos, *queue = NULL; int printed = 2, queue_len = 0; int more = 0; u64 len; if (full_paths) d_filename = filename; else d_filename = basename(filename); len = sym->end - sym->start; printf(" Percent | Source code & Disassembly of %s\n", d_filename); printf("------------------------------------------------\n"); if (verbose) symbol__annotate_hits(sym, evidx); list_for_each_entry(pos, &notes->src->source, node) { if (context && queue == NULL) { queue = pos; queue_len = 0; } switch (objdump_line__print(pos, sym, evidx, len, min_pcnt, printed, max_lines, queue)) { case 0: ++printed; if (context) { printed += queue_len; queue = NULL; queue_len = 0; } break; case 1: /* filtered by max_lines */ ++more; break; case -1: default: /* * Filtered by min_pcnt or non IP lines when * context != 0 */ if (!context) break; if (queue_len == context) queue = list_entry(queue->node.next, typeof(*queue), node); else ++queue_len; break; } } return more; } void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) { struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); memset(h, 0, notes->src->sizeof_sym_hist); } void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) { struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); int len = sym->end - sym->start, offset; h->sum = 0; for (offset = 0; offset < len; ++offset) { h->addr[offset] = h->addr[offset] * 7 / 8; h->sum += h->addr[offset]; } } void objdump_line_list__purge(struct list_head *head) { struct objdump_line *pos, *n; list_for_each_entry_safe(pos, n, head, node) { list_del(&pos->node); objdump_line__free(pos); } } int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, bool print_lines, bool full_paths, int min_pcnt, int max_lines) { struct dso *dso = map->dso; const char *filename = dso->long_name; struct rb_root source_line = RB_ROOT; u64 len; if (symbol__annotate(sym, map, 0) < 0) return -1; len = sym->end - sym->start; if (print_lines) { symbol__get_source_line(sym, map, evidx, &source_line, len, filename); print_summary(&source_line, filename); } symbol__annotate_printf(sym, map, evidx, full_paths, min_pcnt, max_lines, 0); if (print_lines) symbol__free_source_line(sym, len); objdump_line_list__purge(&symbol__annotation(sym)->src->source); return 0; }
gpl-2.0
Validus-Kernel/android_kernel_oneplus_msm8974
drivers/net/wireless/brcm80211/brcmsmac/main.c
4786
229377
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/pci_ids.h> #include <linux/if_ether.h> #include <net/mac80211.h> #include <brcm_hw_ids.h> #include <aiutils.h> #include <chipcommon.h> #include "rate.h" #include "scb.h" #include "phy/phy_hal.h" #include "channel.h" #include "antsel.h" #include "stf.h" #include "ampdu.h" #include "mac80211_if.h" #include "ucode_loader.h" #include "main.h" #include "soc.h" /* * Indication for txflowcontrol that all priority bits in * TXQ_STOP_FOR_PRIOFC_MASK are to be considered. */ #define ALLPRIO -1 /* watchdog timer, in unit of ms */ #define TIMER_INTERVAL_WATCHDOG 1000 /* radio monitor timer, in unit of ms */ #define TIMER_INTERVAL_RADIOCHK 800 /* beacon interval, in unit of 1024TU */ #define BEACON_INTERVAL_DEFAULT 100 /* n-mode support capability */ /* 2x2 includes both 1x1 & 2x2 devices * reserved #define 2 for future when we want to separate 1x1 & 2x2 and * control it independently */ #define WL_11N_2x2 1 #define WL_11N_3x3 3 #define WL_11N_4x4 4 #define EDCF_ACI_MASK 0x60 #define EDCF_ACI_SHIFT 5 #define EDCF_ECWMIN_MASK 0x0f #define EDCF_ECWMAX_SHIFT 4 #define EDCF_AIFSN_MASK 0x0f #define EDCF_AIFSN_MAX 15 #define EDCF_ECWMAX_MASK 0xf0 #define EDCF_AC_BE_TXOP_STA 0x0000 #define EDCF_AC_BK_TXOP_STA 0x0000 #define EDCF_AC_VO_ACI_STA 0x62 #define EDCF_AC_VO_ECW_STA 0x32 #define EDCF_AC_VI_ACI_STA 0x42 #define EDCF_AC_VI_ECW_STA 0x43 #define EDCF_AC_BK_ECW_STA 0xA4 #define EDCF_AC_VI_TXOP_STA 0x005e #define EDCF_AC_VO_TXOP_STA 0x002f #define EDCF_AC_BE_ACI_STA 0x03 #define EDCF_AC_BE_ECW_STA 0xA4 #define EDCF_AC_BK_ACI_STA 0x27 #define EDCF_AC_VO_TXOP_AP 0x002f #define EDCF_TXOP2USEC(txop) ((txop) << 5) #define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) #define APHY_SYMBOL_TIME 4 #define APHY_PREAMBLE_TIME 16 #define APHY_SIGNAL_TIME 4 #define APHY_SIFS_TIME 16 #define APHY_SERVICE_NBITS 16 #define APHY_TAIL_NBITS 6 #define BPHY_SIFS_TIME 10 #define BPHY_PLCP_SHORT_TIME 96 #define PREN_PREAMBLE 24 #define PREN_MM_EXT 12 #define PREN_PREAMBLE_EXT 4 #define DOT11_MAC_HDR_LEN 24 #define DOT11_ACK_LEN 10 #define DOT11_BA_LEN 4 #define DOT11_OFDM_SIGNAL_EXTENSION 6 #define DOT11_MIN_FRAG_LEN 256 #define DOT11_RTS_LEN 16 #define DOT11_CTS_LEN 10 #define DOT11_BA_BITMAP_LEN 128 #define DOT11_MIN_BEACON_PERIOD 1 #define DOT11_MAX_BEACON_PERIOD 0xFFFF #define DOT11_MAXNUMFRAGS 16 #define DOT11_MAX_FRAG_LEN 2346 #define BPHY_PLCP_TIME 192 #define RIFS_11N_TIME 2 /* length of the BCN template area */ #define BCN_TMPL_LEN 512 /* brcms_bss_info flag bit values */ #define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */ /* chip rx buffer offset */ #define BRCMS_HWRXOFF 38 /* rfdisable delay timer 500 ms, runs of ALP clock */ #define RFDISABLE_DEFAULT 10000000 #define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */ /* precedences numbers for wlc queues. These are twice as may levels as * 802.1D priorities. * Odd numbers are used for HI priority traffic at same precedence levels * These constants are used ONLY by wlc_prio2prec_map. Do not use them * elsewhere. */ #define _BRCMS_PREC_NONE 0 /* None = - */ #define _BRCMS_PREC_BK 2 /* BK - Background */ #define _BRCMS_PREC_BE 4 /* BE - Best-effort */ #define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */ #define _BRCMS_PREC_CL 8 /* CL - Controlled Load */ #define _BRCMS_PREC_VI 10 /* Vi - Video */ #define _BRCMS_PREC_VO 12 /* Vo - Voice */ #define _BRCMS_PREC_NC 14 /* NC - Network Control */ /* synthpu_dly times in us */ #define SYNTHPU_DLY_APHY_US 3700 #define SYNTHPU_DLY_BPHY_US 1050 #define SYNTHPU_DLY_NPHY_US 2048 #define SYNTHPU_DLY_LPPHY_US 300 #define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */ /* Per-AC retry limit register definitions; uses defs.h bitfield macros */ #define EDCF_SHORT_S 0 #define EDCF_SFB_S 4 #define EDCF_LONG_S 8 #define EDCF_LFB_S 12 #define EDCF_SHORT_M BITFIELD_MASK(4) #define EDCF_SFB_M BITFIELD_MASK(4) #define EDCF_LONG_M BITFIELD_MASK(4) #define EDCF_LFB_M BITFIELD_MASK(4) #define RETRY_SHORT_DEF 7 /* Default Short retry Limit */ #define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */ #define RETRY_LONG_DEF 4 /* Default Long retry count */ #define RETRY_SHORT_FB 3 /* Short count for fb rate */ #define RETRY_LONG_FB 2 /* Long count for fb rate */ #define APHY_CWMIN 15 #define PHY_CWMAX 1023 #define EDCF_AIFSN_MIN 1 #define FRAGNUM_MASK 0xF #define APHY_SLOT_TIME 9 #define BPHY_SLOT_TIME 20 #define WL_SPURAVOID_OFF 0 #define WL_SPURAVOID_ON1 1 #define WL_SPURAVOID_ON2 2 /* invalid core flags, use the saved coreflags */ #define BRCMS_USE_COREFLAGS 0xffffffff /* values for PLCPHdr_override */ #define BRCMS_PLCP_AUTO -1 #define BRCMS_PLCP_SHORT 0 #define BRCMS_PLCP_LONG 1 /* values for g_protection_override and n_protection_override */ #define BRCMS_PROTECTION_AUTO -1 #define BRCMS_PROTECTION_OFF 0 #define BRCMS_PROTECTION_ON 1 #define BRCMS_PROTECTION_MMHDR_ONLY 2 #define BRCMS_PROTECTION_CTS_ONLY 3 /* values for g_protection_control and n_protection_control */ #define BRCMS_PROTECTION_CTL_OFF 0 #define BRCMS_PROTECTION_CTL_LOCAL 1 #define BRCMS_PROTECTION_CTL_OVERLAP 2 /* values for n_protection */ #define BRCMS_N_PROTECTION_OFF 0 #define BRCMS_N_PROTECTION_OPTIONAL 1 #define BRCMS_N_PROTECTION_20IN40 2 #define BRCMS_N_PROTECTION_MIXEDMODE 3 /* values for band specific 40MHz capabilities */ #define BRCMS_N_BW_20ALL 0 #define BRCMS_N_BW_40ALL 1 #define BRCMS_N_BW_20IN2G_40IN5G 2 /* bitflags for SGI support (sgi_rx iovar) */ #define BRCMS_N_SGI_20 0x01 #define BRCMS_N_SGI_40 0x02 /* defines used by the nrate iovar */ /* MSC in use,indicates b0-6 holds an mcs */ #define NRATE_MCS_INUSE 0x00000080 /* rate/mcs value */ #define NRATE_RATE_MASK 0x0000007f /* stf mode mask: siso, cdd, stbc, sdm */ #define NRATE_STF_MASK 0x0000ff00 /* stf mode shift */ #define NRATE_STF_SHIFT 8 /* bit indicate to override mcs only */ #define NRATE_OVERRIDE_MCS_ONLY 0x40000000 #define NRATE_SGI_MASK 0x00800000 /* sgi mode */ #define NRATE_SGI_SHIFT 23 /* sgi mode */ #define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */ #define NRATE_LDPC_SHIFT 22 /* ldpc shift */ #define NRATE_STF_SISO 0 /* stf mode SISO */ #define NRATE_STF_CDD 1 /* stf mode CDD */ #define NRATE_STF_STBC 2 /* stf mode STBC */ #define NRATE_STF_SDM 3 /* stf mode SDM */ #define MAX_DMA_SEGS 4 /* Max # of entries in Tx FIFO based on 4kb page size */ #define NTXD 256 /* Max # of entries in Rx FIFO based on 4kb page size */ #define NRXD 256 /* try to keep this # rbufs posted to the chip */ #define NRXBUFPOST 32 /* data msg txq hiwat mark */ #define BRCMS_DATAHIWAT 50 /* max # frames to process in brcms_c_recv() */ #define RXBND 8 /* max # tx status to process in wlc_txstatus() */ #define TXSBND 8 /* brcmu_format_flags() bit description structure */ struct brcms_c_bit_desc { u32 bit; const char *name; }; /* * The following table lists the buffer memory allocated to xmt fifos in HW. * the size is in units of 256bytes(one block), total size is HW dependent * ucode has default fifo partition, sw can overwrite if necessary * * This is documented in twiki under the topic UcodeTxFifo. Please ensure * the twiki is updated before making changes. */ /* Starting corerev for the fifo size table */ #define XMTFIFOTBL_STARTREV 20 struct d11init { __le16 addr; __le16 size; __le32 value; }; struct edcf_acparam { u8 ACI; u8 ECW; u16 TXOP; } __packed; const u8 prio2fifo[NUMPRIO] = { TX_AC_BE_FIFO, /* 0 BE AC_BE Best Effort */ TX_AC_BK_FIFO, /* 1 BK AC_BK Background */ TX_AC_BK_FIFO, /* 2 -- AC_BK Background */ TX_AC_BE_FIFO, /* 3 EE AC_BE Best Effort */ TX_AC_VI_FIFO, /* 4 CL AC_VI Video */ TX_AC_VI_FIFO, /* 5 VI AC_VI Video */ TX_AC_VO_FIFO, /* 6 VO AC_VO Voice */ TX_AC_VO_FIFO /* 7 NC AC_VO Voice */ }; /* debug/trace */ uint brcm_msg_level = #if defined(DEBUG) LOG_ERROR_VAL; #else 0; #endif /* DEBUG */ /* TX FIFO number to WME/802.1E Access Category */ static const u8 wme_fifo2ac[] = { IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_BE, IEEE80211_AC_BE }; /* ieee80211 Access Category to TX FIFO number */ static const u8 wme_ac2fifo[] = { TX_AC_VO_FIFO, TX_AC_VI_FIFO, TX_AC_BE_FIFO, TX_AC_BK_FIFO }; /* 802.1D Priority to precedence queue mapping */ const u8 wlc_prio2prec_map[] = { _BRCMS_PREC_BE, /* 0 BE - Best-effort */ _BRCMS_PREC_BK, /* 1 BK - Background */ _BRCMS_PREC_NONE, /* 2 None = - */ _BRCMS_PREC_EE, /* 3 EE - Excellent-effort */ _BRCMS_PREC_CL, /* 4 CL - Controlled Load */ _BRCMS_PREC_VI, /* 5 Vi - Video */ _BRCMS_PREC_VO, /* 6 Vo - Voice */ _BRCMS_PREC_NC, /* 7 NC - Network Control */ }; static const u16 xmtfifo_sz[][NFIFO] = { /* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */ {20, 192, 192, 21, 17, 5}, /* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */ {9, 58, 22, 14, 14, 5}, /* corerev 22: 5120, 49152, 49152, 5376, 4352, 1280 */ {20, 192, 192, 21, 17, 5}, /* corerev 23: 5120, 49152, 49152, 5376, 4352, 1280 */ {20, 192, 192, 21, 17, 5}, /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */ {9, 58, 22, 14, 14, 5}, }; #ifdef DEBUG static const char * const fifo_names[] = { "AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" }; #else static const char fifo_names[6][0]; #endif #ifdef DEBUG /* pointer to most recently allocated wl/wlc */ static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL); #endif /* Find basic rate for a given rate */ static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec) { if (is_mcs_rate(rspec)) return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK] .leg_ofdm]; return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK]; } static u16 frametype(u32 rspec, u8 mimoframe) { if (is_mcs_rate(rspec)) return mimoframe; return is_cck_rate(rspec) ? FT_CCK : FT_OFDM; } /* currently the best mechanism for determining SIFS is the band in use */ static u16 get_sifs(struct brcms_band *band) { return band->bandtype == BRCM_BAND_5G ? APHY_SIFS_TIME : BPHY_SIFS_TIME; } /* * Detect Card removed. * Even checking an sbconfig register read will not false trigger when the core * is in reset it breaks CF address mechanism. Accessing gphy phyversion will * cause SB error if aphy is in reset on 4306B0-DB. Need a simple accessible * reg with fixed 0/1 pattern (some platforms return all 0). * If clocks are present, call the sb routine which will figure out if the * device is removed. */ static bool brcms_deviceremoved(struct brcms_c_info *wlc) { u32 macctrl; if (!wlc->hw->clk) return ai_deviceremoved(wlc->hw->sih); macctrl = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol)); return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN; } /* sum the individual fifo tx pending packet counts */ static s16 brcms_txpktpendtot(struct brcms_c_info *wlc) { return wlc->core->txpktpend[0] + wlc->core->txpktpend[1] + wlc->core->txpktpend[2] + wlc->core->txpktpend[3]; } static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc) { return wlc->pub->_nbands > 1 && !wlc->bandlocked; } static int brcms_chspec_bw(u16 chanspec) { if (CHSPEC_IS40(chanspec)) return BRCMS_40_MHZ; if (CHSPEC_IS20(chanspec)) return BRCMS_20_MHZ; return BRCMS_10_MHZ; } static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg) { if (cfg == NULL) return; kfree(cfg->current_bss); kfree(cfg); } static void brcms_c_detach_mfree(struct brcms_c_info *wlc) { if (wlc == NULL) return; brcms_c_bsscfg_mfree(wlc->bsscfg); kfree(wlc->pub); kfree(wlc->modulecb); kfree(wlc->default_bss); kfree(wlc->protection); kfree(wlc->stf); kfree(wlc->bandstate[0]); kfree(wlc->corestate->macstat_snapshot); kfree(wlc->corestate); kfree(wlc->hw->bandstate[0]); kfree(wlc->hw); /* free the wlc */ kfree(wlc); wlc = NULL; } static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit) { struct brcms_bss_cfg *cfg; cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC); if (cfg == NULL) goto fail; cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC); if (cfg->current_bss == NULL) goto fail; return cfg; fail: brcms_c_bsscfg_mfree(cfg); return NULL; } static struct brcms_c_info * brcms_c_attach_malloc(uint unit, uint *err, uint devid) { struct brcms_c_info *wlc; wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC); if (wlc == NULL) { *err = 1002; goto fail; } /* allocate struct brcms_c_pub state structure */ wlc->pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC); if (wlc->pub == NULL) { *err = 1003; goto fail; } wlc->pub->wlc = wlc; /* allocate struct brcms_hardware state structure */ wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC); if (wlc->hw == NULL) { *err = 1005; goto fail; } wlc->hw->wlc = wlc; wlc->hw->bandstate[0] = kzalloc(sizeof(struct brcms_hw_band) * MAXBANDS, GFP_ATOMIC); if (wlc->hw->bandstate[0] == NULL) { *err = 1006; goto fail; } else { int i; for (i = 1; i < MAXBANDS; i++) wlc->hw->bandstate[i] = (struct brcms_hw_band *) ((unsigned long)wlc->hw->bandstate[0] + (sizeof(struct brcms_hw_band) * i)); } wlc->modulecb = kzalloc(sizeof(struct modulecb) * BRCMS_MAXMODULES, GFP_ATOMIC); if (wlc->modulecb == NULL) { *err = 1009; goto fail; } wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC); if (wlc->default_bss == NULL) { *err = 1010; goto fail; } wlc->bsscfg = brcms_c_bsscfg_malloc(unit); if (wlc->bsscfg == NULL) { *err = 1011; goto fail; } wlc->protection = kzalloc(sizeof(struct brcms_protection), GFP_ATOMIC); if (wlc->protection == NULL) { *err = 1016; goto fail; } wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC); if (wlc->stf == NULL) { *err = 1017; goto fail; } wlc->bandstate[0] = kzalloc(sizeof(struct brcms_band)*MAXBANDS, GFP_ATOMIC); if (wlc->bandstate[0] == NULL) { *err = 1025; goto fail; } else { int i; for (i = 1; i < MAXBANDS; i++) wlc->bandstate[i] = (struct brcms_band *) ((unsigned long)wlc->bandstate[0] + (sizeof(struct brcms_band)*i)); } wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC); if (wlc->corestate == NULL) { *err = 1026; goto fail; } wlc->corestate->macstat_snapshot = kzalloc(sizeof(struct macstat), GFP_ATOMIC); if (wlc->corestate->macstat_snapshot == NULL) { *err = 1027; goto fail; } return wlc; fail: brcms_c_detach_mfree(wlc); return NULL; } /* * Update the slot timing for standard 11b/g (20us slots) * or shortslot 11g (9us slots) * The PSM needs to be suspended for this call. */ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw, bool shortslot) { struct bcma_device *core = wlc_hw->d11core; if (shortslot) { /* 11g short slot: 11a timing */ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207); brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME); } else { /* 11g long slot: 11b timing */ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212); brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME); } } /* * calculate frame duration of a given rate and length, return * time in usec unit */ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec, u8 preamble_type, uint mac_len) { uint nsyms, dur = 0, Ndps, kNdps; uint rate = rspec2rate(ratespec); if (rate == 0) { wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n", wlc->pub->unit); rate = BRCM_RATE_1M; } BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n", wlc->pub->unit, ratespec, preamble_type, mac_len); if (is_mcs_rate(ratespec)) { uint mcs = ratespec & RSPEC_RATE_MASK; int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec); dur = PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT); if (preamble_type == BRCMS_MM_PREAMBLE) dur += PREN_MM_EXT; /* 1000Ndbps = kbps * 4 */ kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec), rspec_issgi(ratespec)) * 4; if (rspec_stc(ratespec) == 0) nsyms = CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS) * 1000, kNdps); else /* STBC needs to have even number of symbols */ nsyms = 2 * CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS) * 1000, 2 * kNdps); dur += APHY_SYMBOL_TIME * nsyms; if (wlc->band->bandtype == BRCM_BAND_2G) dur += DOT11_OFDM_SIGNAL_EXTENSION; } else if (is_ofdm_rate(rate)) { dur = APHY_PREAMBLE_TIME; dur += APHY_SIGNAL_TIME; /* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */ Ndps = rate * 2; /* NSyms = CEILING((SERVICE + 8*NBytes + TAIL) / Ndbps) */ nsyms = CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS), Ndps); dur += APHY_SYMBOL_TIME * nsyms; if (wlc->band->bandtype == BRCM_BAND_2G) dur += DOT11_OFDM_SIGNAL_EXTENSION; } else { /* * calc # bits * 2 so factor of 2 in rate (1/2 mbps) * will divide out */ mac_len = mac_len * 8 * 2; /* calc ceiling of bits/rate = microseconds of air time */ dur = (mac_len + rate - 1) / rate; if (preamble_type & BRCMS_SHORT_PREAMBLE) dur += BPHY_PLCP_SHORT_TIME; else dur += BPHY_PLCP_TIME; } return dur; } static void brcms_c_write_inits(struct brcms_hardware *wlc_hw, const struct d11init *inits) { struct bcma_device *core = wlc_hw->d11core; int i; uint offset; u16 size; u32 value; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) { size = le16_to_cpu(inits[i].size); offset = le16_to_cpu(inits[i].addr); value = le32_to_cpu(inits[i].value); if (size == 2) bcma_write16(core, offset, value); else if (size == 4) bcma_write32(core, offset, value); else break; } } static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs) { u8 idx; u16 addr[] = { M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4, M_HOST_FLAGS5 }; for (idx = 0; idx < MHFMAX; idx++) brcms_b_write_shm(wlc_hw, addr[idx], mhfs[idx]); } static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw) { struct wiphy *wiphy = wlc_hw->wlc->wiphy; struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; /* init microcode host flags */ brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs); /* do band-specific ucode IHR, SHM, and SCR inits */ if (D11REV_IS(wlc_hw->corerev, 23)) { if (BRCMS_ISNPHY(wlc_hw->band)) brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16); else wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev" " %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } else { if (D11REV_IS(wlc_hw->corerev, 24)) { if (BRCMS_ISLCNPHY(wlc_hw->band)) brcms_c_write_inits(wlc_hw, ucode->d11lcn0bsinitvals24); else wiphy_err(wiphy, "%s: wl%d: unsupported phy in" " core rev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } else { wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } } } static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v) { struct bcma_device *core = wlc_hw->d11core; u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m; bcma_awrite32(core, BCMA_IOCTL, ioctl | v); } static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk); wlc_hw->phyclk = clk; if (OFF == clk) { /* clear gmode bit, put phy into reset */ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE), (SICF_PRST | SICF_FGC)); udelay(1); brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST); udelay(1); } else { /* take phy out of reset */ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC); udelay(1); brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0); udelay(1); } } /* low-level band switch utility routine */ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, bandunit); wlc_hw->band = wlc_hw->bandstate[bandunit]; /* * BMAC_NOTE: * until we eliminate need for wlc->band refs in low level code */ wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit]; /* set gmode core flag */ if (wlc_hw->sbclk && !wlc_hw->noreset) { u32 gmode = 0; if (bandunit == 0) gmode = SICF_GMODE; brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode); } } /* switch to new band but leave it inactive */ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit) { struct brcms_hardware *wlc_hw = wlc->hw; u32 macintmask; u32 macctrl; BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); macctrl = bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)); WARN_ON((macctrl & MCTL_EN_MAC) != 0); /* disable interrupts */ macintmask = brcms_intrsoff(wlc->wl); /* radio off */ wlc_phy_switch_radio(wlc_hw->band->pi, OFF); brcms_b_core_phy_clk(wlc_hw, OFF); brcms_c_setxband(wlc_hw, bandunit); return macintmask; } /* process an individual struct tx_status */ static bool brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) { struct sk_buff *p; uint queue; struct d11txh *txh; struct scb *scb = NULL; bool free_pdu; int tx_rts, tx_frame_count, tx_rts_count; uint totlen, supr_status; bool lastframe; struct ieee80211_hdr *h; u16 mcl; struct ieee80211_tx_info *tx_info; struct ieee80211_tx_rate *txrate; int i; /* discard intermediate indications for ucode with one legitimate case: * e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, * but the subsequent tx of DATA failed. so it will start rts/cts * from the beginning (resetting the rts transmission count) */ if (!(txs->status & TX_STATUS_AMPDU) && (txs->status & TX_STATUS_INTERMEDIATE)) { BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n"); return false; } queue = txs->frameid & TXFID_QUEUE_MASK; if (queue >= NFIFO) { p = NULL; goto fatal; } p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED); if (p == NULL) goto fatal; txh = (struct d11txh *) (p->data); mcl = le16_to_cpu(txh->MacTxControlLow); if (txs->phyerr) { if (brcm_msg_level & LOG_ERROR_VAL) { wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n", txs->phyerr, txh->MainRates); brcms_c_print_txdesc(txh); } brcms_c_print_txstatus(txs); } if (txs->frameid != le16_to_cpu(txh->TxFrameID)) goto fatal; tx_info = IEEE80211_SKB_CB(p); h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN); if (tx_info->control.sta) scb = &wlc->pri_scb; if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs); return false; } supr_status = txs->status & TX_STATUS_SUPR_MASK; if (supr_status == TX_STATUS_SUPR_BADCH) BCMMSG(wlc->wiphy, "%s: Pkt tx suppressed, possibly channel %d\n", __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)); tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS; tx_frame_count = (txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT; tx_rts_count = (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT; lastframe = !ieee80211_has_morefrags(h->frame_control); if (!lastframe) { wiphy_err(wlc->wiphy, "Not last frame!\n"); } else { /* * Set information to be consumed by Minstrel ht. * * The "fallback limit" is the number of tx attempts a given * MPDU is sent at the "primary" rate. Tx attempts beyond that * limit are sent at the "secondary" rate. * A 'short frame' does not exceed RTS treshold. */ u16 sfbl, /* Short Frame Rate Fallback Limit */ lfbl, /* Long Frame Rate Fallback Limit */ fbl; if (queue < IEEE80211_NUM_ACS) { sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]], EDCF_SFB); lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]], EDCF_LFB); } else { sfbl = wlc->SFBL; lfbl = wlc->LFBL; } txrate = tx_info->status.rates; if (txrate[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) fbl = lfbl; else fbl = sfbl; ieee80211_tx_info_clear_status(tx_info); if ((tx_frame_count > fbl) && (txrate[1].idx >= 0)) { /* * rate selection requested a fallback rate * and we used it */ txrate[0].count = fbl; txrate[1].count = tx_frame_count - fbl; } else { /* * rate selection did not request fallback rate, or * we didn't need it */ txrate[0].count = tx_frame_count; /* * rc80211_minstrel.c:minstrel_tx_status() expects * unused rates to be marked with idx = -1 */ txrate[1].idx = -1; txrate[1].count = 0; } /* clear the rest of the rates */ for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) { txrate[i].idx = -1; txrate[i].count = 0; } if (txs->status & TX_STATUS_ACK_RCV) tx_info->flags |= IEEE80211_TX_STAT_ACK; } totlen = p->len; free_pdu = true; brcms_c_txfifo_complete(wlc, queue, 1); if (lastframe) { /* remove PLCP & Broadcom tx descriptor header */ skb_pull(p, D11_PHY_HDR_LEN); skb_pull(p, D11_TXH_LEN); ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p); } else { wiphy_err(wlc->wiphy, "%s: Not last frame => not calling " "tx_status\n", __func__); } return false; fatal: if (p) brcmu_pkt_buf_free_skb(p); return true; } /* process tx completion events in BMAC * Return true if more tx status need to be processed. false otherwise. */ static bool brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) { bool morepending = false; struct brcms_c_info *wlc = wlc_hw->wlc; struct bcma_device *core; struct tx_status txstatus, *txs; u32 s1, s2; uint n = 0; /* * Param 'max_tx_num' indicates max. # tx status to process before * break out. */ uint max_tx_num = bound ? TXSBND : -1; BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); txs = &txstatus; core = wlc_hw->d11core; *fatal = false; s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); while (!(*fatal) && (s1 & TXS_V)) { if (s1 == 0xffffffff) { wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); return morepending; } s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); txs->status = s1 & TXS_STATUS_MASK; txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; txs->sequence = s2 & TXS_SEQ_MASK; txs->phyerr = (s2 & TXS_PTX_MASK) >> TXS_PTX_SHIFT; txs->lasttxtime = 0; *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); /* !give others some time to run! */ if (++n >= max_tx_num) break; s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); } if (*fatal) return 0; if (n >= max_tx_num) morepending = true; if (!pktq_empty(&wlc->pkt_queue->q)) brcms_c_send_q(wlc); return morepending; } static void brcms_c_tbtt(struct brcms_c_info *wlc) { if (!wlc->bsscfg->BSS) /* * DirFrmQ is now valid...defer setting until end * of ATIM window */ wlc->qvalid |= MCMD_DIRFRMQVAL; } /* set initial host flags value */ static void brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init) { struct brcms_hardware *wlc_hw = wlc->hw; memset(mhfs, 0, MHFMAX * sizeof(u16)); mhfs[MHF2] |= mhf2_init; /* prohibit use of slowclock on multifunction boards */ if (wlc_hw->boardflags & BFL_NOPLLDOWN) mhfs[MHF1] |= MHF1_FORCEFASTCLK; if (BRCMS_ISNPHY(wlc_hw->band) && NREV_LT(wlc_hw->band->phyrev, 2)) { mhfs[MHF2] |= MHF2_NPHY40MHZ_WAR; mhfs[MHF1] |= MHF1_IQSWAP_WAR; } } static uint dmareg(uint direction, uint fifonum) { if (direction == DMA_TX) return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt); return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv); } static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) { uint i; char name[8]; /* * ucode host flag 2 needed for pio mode, independent of band and fifo */ u16 pio_mhf2 = 0; struct brcms_hardware *wlc_hw = wlc->hw; uint unit = wlc_hw->unit; struct wiphy *wiphy = wlc->wiphy; /* name and offsets for dma_attach */ snprintf(name, sizeof(name), "wl%d", unit); if (wlc_hw->di[0] == NULL) { /* Init FIFOs */ int dma_attach_err = 0; /* * FIFO 0 * TX: TX_AC_BK_FIFO (TX AC Background data packets) * RX: RX_FIFO (RX data packets) */ wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, (wme ? dmareg(DMA_TX, 0) : 0), dmareg(DMA_RX, 0), (wme ? NTXD : 0), NRXD, RXBUFSZ, -1, NRXBUFPOST, BRCMS_HWRXOFF, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[0]); /* * FIFO 1 * TX: TX_AC_BE_FIFO (TX AC Best-Effort data packets) * (legacy) TX_DATA_FIFO (TX data packets) * RX: UNUSED */ wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, dmareg(DMA_TX, 1), 0, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[1]); /* * FIFO 2 * TX: TX_AC_VI_FIFO (TX AC Video data packets) * RX: UNUSED */ wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, dmareg(DMA_TX, 2), 0, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[2]); /* * FIFO 3 * TX: TX_AC_VO_FIFO (TX AC Voice data packets) * (legacy) TX_CTL_FIFO (TX control & mgmt packets) */ wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, dmareg(DMA_TX, 3), 0, NTXD, 0, 0, -1, 0, 0, &brcm_msg_level); dma_attach_err |= (NULL == wlc_hw->di[3]); /* Cleaner to leave this as if with AP defined */ if (dma_attach_err) { wiphy_err(wiphy, "wl%d: wlc_attach: dma_attach failed" "\n", unit); return false; } /* get pointer to dma engine tx flow control variable */ for (i = 0; i < NFIFO; i++) if (wlc_hw->di[i]) wlc_hw->txavail[i] = (uint *) dma_getvar(wlc_hw->di[i], "&txavail"); } /* initial ucode host flags */ brcms_c_mhfdef(wlc, wlc_hw->band->mhfs, pio_mhf2); return true; } static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw) { uint j; for (j = 0; j < NFIFO; j++) { if (wlc_hw->di[j]) { dma_detach(wlc_hw->di[j]); wlc_hw->di[j] = NULL; } } } /* * Initialize brcms_c_info default values ... * may get overrides later in this function * BMAC_NOTES, move low out and resolve the dangling ones */ static void brcms_b_info_init(struct brcms_hardware *wlc_hw) { struct brcms_c_info *wlc = wlc_hw->wlc; /* set default sw macintmask value */ wlc->defmacintmask = DEF_MACINTMASK; /* various 802.11g modes */ wlc_hw->shortslot = false; wlc_hw->SFBL = RETRY_SHORT_FB; wlc_hw->LFBL = RETRY_LONG_FB; /* default mac retry limits */ wlc_hw->SRL = RETRY_SHORT_DEF; wlc_hw->LRL = RETRY_LONG_DEF; wlc_hw->chanspec = ch20mhz_chspec(1); } static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw) { /* delay before first read of ucode state */ udelay(40); /* wait until ucode is no longer asleep */ SPINWAIT((brcms_b_read_shm(wlc_hw, M_UCODE_DBGST) == DBGST_ASLEEP), wlc_hw->wlc->fastpwrup_dly); } /* control chip clock to save power, enable dynamic clock or force fast clock */ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) { if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) { /* new chips with PMU, CCS_FORCEHT will distribute the HT clock * on backplane, but mac core will still run on ALP(not HT) when * it enters powersave mode, which means the FCA bit may not be * set. Should wakeup mac if driver wants it to run on HT. */ if (wlc_hw->clk) { if (mode == CLK_FAST) { bcma_set32(wlc_hw->d11core, D11REGOFFS(clk_ctl_st), CCS_FORCEHT); udelay(64); SPINWAIT( ((bcma_read32(wlc_hw->d11core, D11REGOFFS(clk_ctl_st)) & CCS_HTAVAIL) == 0), PMU_MAX_TRANSITION_DLY); WARN_ON(!(bcma_read32(wlc_hw->d11core, D11REGOFFS(clk_ctl_st)) & CCS_HTAVAIL)); } else { if ((ai_get_pmurev(wlc_hw->sih) == 0) && (bcma_read32(wlc_hw->d11core, D11REGOFFS(clk_ctl_st)) & (CCS_FORCEHT | CCS_HTAREQ))) SPINWAIT( ((bcma_read32(wlc_hw->d11core, offsetof(struct d11regs, clk_ctl_st)) & CCS_HTAVAIL) == 0), PMU_MAX_TRANSITION_DLY); bcma_mask32(wlc_hw->d11core, D11REGOFFS(clk_ctl_st), ~CCS_FORCEHT); } } wlc_hw->forcefastclk = (mode == CLK_FAST); } else { /* old chips w/o PMU, force HT through cc, * then use FCA to verify mac is running fast clock */ wlc_hw->forcefastclk = ai_clkctl_cc(wlc_hw->sih, mode); /* check fast clock is available (if core is not in reset) */ if (wlc_hw->forcefastclk && wlc_hw->clk) WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) & SISF_FCLKA)); /* * keep the ucode wake bit on if forcefastclk is on since we * do not want ucode to put us back to slow clock when it dozes * for PM mode. Code below matches the wake override bit with * current forcefastclk state. Only setting bit in wake_override * instead of waking ucode immediately since old code had this * behavior. Older code set wlc->forcefastclk but only had the * wake happen if the wakup_ucode work (protected by an up * check) was executed just below. */ if (wlc_hw->forcefastclk) mboolset(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_FORCEFAST); else mboolclr(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_FORCEFAST); } } /* set or clear ucode host flag bits * it has an optimization for no-change write * it only writes through shared memory when the core has clock; * pre-CLK changes should use wlc_write_mhf to get around the optimization * * * bands values are: BRCM_BAND_AUTO <--- Current band only * BRCM_BAND_5G <--- 5G band only * BRCM_BAND_2G <--- 2G band only * BRCM_BAND_ALL <--- All bands */ void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val, int bands) { u16 save; u16 addr[MHFMAX] = { M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4, M_HOST_FLAGS5 }; struct brcms_hw_band *band; if ((val & ~mask) || idx >= MHFMAX) return; /* error condition */ switch (bands) { /* Current band only or all bands, * then set the band to current band */ case BRCM_BAND_AUTO: case BRCM_BAND_ALL: band = wlc_hw->band; break; case BRCM_BAND_5G: band = wlc_hw->bandstate[BAND_5G_INDEX]; break; case BRCM_BAND_2G: band = wlc_hw->bandstate[BAND_2G_INDEX]; break; default: band = NULL; /* error condition */ } if (band) { save = band->mhfs[idx]; band->mhfs[idx] = (band->mhfs[idx] & ~mask) | val; /* optimization: only write through if changed, and * changed band is the current band */ if (wlc_hw->clk && (band->mhfs[idx] != save) && (band == wlc_hw->band)) brcms_b_write_shm(wlc_hw, addr[idx], (u16) band->mhfs[idx]); } if (bands == BRCM_BAND_ALL) { wlc_hw->bandstate[0]->mhfs[idx] = (wlc_hw->bandstate[0]->mhfs[idx] & ~mask) | val; wlc_hw->bandstate[1]->mhfs[idx] = (wlc_hw->bandstate[1]->mhfs[idx] & ~mask) | val; } } /* set the maccontrol register to desired reset state and * initialize the sw cache of the register */ static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw) { /* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */ wlc_hw->maccontrol = 0; wlc_hw->suspended_fifos = 0; wlc_hw->wake_override = 0; wlc_hw->mute_override = 0; brcms_b_mctrl(wlc_hw, ~0, MCTL_IHR_EN | MCTL_WAKE); } /* * write the software state of maccontrol and * overrides to the maccontrol register */ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw) { u32 maccontrol = wlc_hw->maccontrol; /* OR in the wake bit if overridden */ if (wlc_hw->wake_override) maccontrol |= MCTL_WAKE; /* set AP and INFRA bits for mute if needed */ if (wlc_hw->mute_override) { maccontrol &= ~(MCTL_AP); maccontrol |= MCTL_INFRA; } bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol), maccontrol); } /* set or clear maccontrol bits */ void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val) { u32 maccontrol; u32 new_maccontrol; if (val & ~mask) return; /* error condition */ maccontrol = wlc_hw->maccontrol; new_maccontrol = (maccontrol & ~mask) | val; /* if the new maccontrol value is the same as the old, nothing to do */ if (new_maccontrol == maccontrol) return; /* something changed, cache the new value */ wlc_hw->maccontrol = new_maccontrol; /* write the new values with overrides applied */ brcms_c_mctrl_write(wlc_hw); } void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw, u32 override_bit) { if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE)) { mboolset(wlc_hw->wake_override, override_bit); return; } mboolset(wlc_hw->wake_override, override_bit); brcms_c_mctrl_write(wlc_hw); brcms_b_wait_for_wake(wlc_hw); } void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw, u32 override_bit) { mboolclr(wlc_hw->wake_override, override_bit); if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE)) return; brcms_c_mctrl_write(wlc_hw); } /* When driver needs ucode to stop beaconing, it has to make sure that * MCTL_AP is clear and MCTL_INFRA is set * Mode MCTL_AP MCTL_INFRA * AP 1 1 * STA 0 1 <--- This will ensure no beacons * IBSS 0 0 */ static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw) { wlc_hw->mute_override = 1; /* if maccontrol already has AP == 0 and INFRA == 1 without this * override, then there is no change to write */ if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA) return; brcms_c_mctrl_write(wlc_hw); } /* Clear the override on AP and INFRA bits */ static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw) { if (wlc_hw->mute_override == 0) return; wlc_hw->mute_override = 0; /* if maccontrol already has AP == 0 and INFRA == 1 without this * override, then there is no change to write */ if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA) return; brcms_c_mctrl_write(wlc_hw); } /* * Write a MAC address to the given match reg offset in the RXE match engine. */ static void brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset, const u8 *addr) { struct bcma_device *core = wlc_hw->d11core; u16 mac_l; u16 mac_m; u16 mac_h; BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit); mac_l = addr[0] | (addr[1] << 8); mac_m = addr[2] | (addr[3] << 8); mac_h = addr[4] | (addr[5] << 8); /* enter the MAC addr into the RXE match registers */ bcma_write16(core, D11REGOFFS(rcm_ctl), RCM_INC_DATA | match_reg_offset); bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l); bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m); bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h); } void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len, void *buf) { struct bcma_device *core = wlc_hw->d11core; u32 word; __le32 word_le; __be32 word_be; bool be_bit; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); bcma_write32(core, D11REGOFFS(tplatewrptr), offset); /* if MCTL_BIGEND bit set in mac control register, * the chip swaps data in fifo, as well as data in * template ram */ be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0; while (len > 0) { memcpy(&word, buf, sizeof(u32)); if (be_bit) { word_be = cpu_to_be32(word); word = *(u32 *)&word_be; } else { word_le = cpu_to_le32(word); word = *(u32 *)&word_le; } bcma_write32(core, D11REGOFFS(tplatewrdata), word); buf = (u8 *) buf + sizeof(u32); len -= sizeof(u32); } } static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin) { wlc_hw->band->CWmin = newmin; bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), OBJADDR_SCR_SEL | S_DOT11_CWMIN); (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin); } static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax) { wlc_hw->band->CWmax = newmax; bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), OBJADDR_SCR_SEL | S_DOT11_CWMAX); (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax); } void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw) { bool fastclk; /* request FAST clock if not on */ fastclk = wlc_hw->forcefastclk; if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); wlc_phy_bw_state_set(wlc_hw->band->pi, bw); brcms_b_phy_reset(wlc_hw); wlc_phy_init(wlc_hw->band->pi, wlc_phy_chanspec_get(wlc_hw->band->pi)); /* restore the clk */ if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC); } static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw) { u16 v; struct brcms_c_info *wlc = wlc_hw->wlc; /* update SYNTHPU_DLY */ if (BRCMS_ISLCNPHY(wlc->band)) v = SYNTHPU_DLY_LPPHY_US; else if (BRCMS_ISNPHY(wlc->band) && (NREV_GE(wlc->band->phyrev, 3))) v = SYNTHPU_DLY_NPHY_US; else v = SYNTHPU_DLY_BPHY_US; brcms_b_write_shm(wlc_hw, M_SYNTHPU_DLY, v); } static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw) { u16 phyctl; u16 phytxant = wlc_hw->bmac_phytxant; u16 mask = PHY_TXC_ANT_MASK; /* set the Probe Response frame phy control word */ phyctl = brcms_b_read_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS); phyctl = (phyctl & ~mask) | phytxant; brcms_b_write_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS, phyctl); /* set the Response (ACK/CTS) frame phy control word */ phyctl = brcms_b_read_shm(wlc_hw, M_RSP_PCTLWD); phyctl = (phyctl & ~mask) | phytxant; brcms_b_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl); } static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw, u8 rate) { uint i; u8 plcp_rate = 0; struct plcp_signal_rate_lookup { u8 rate; u8 signal_rate; }; /* OFDM RATE sub-field of PLCP SIGNAL field, per 802.11 sec 17.3.4.1 */ const struct plcp_signal_rate_lookup rate_lookup[] = { {BRCM_RATE_6M, 0xB}, {BRCM_RATE_9M, 0xF}, {BRCM_RATE_12M, 0xA}, {BRCM_RATE_18M, 0xE}, {BRCM_RATE_24M, 0x9}, {BRCM_RATE_36M, 0xD}, {BRCM_RATE_48M, 0x8}, {BRCM_RATE_54M, 0xC} }; for (i = 0; i < ARRAY_SIZE(rate_lookup); i++) { if (rate == rate_lookup[i].rate) { plcp_rate = rate_lookup[i].signal_rate; break; } } /* Find the SHM pointer to the rate table entry by looking in the * Direct-map Table */ return 2 * brcms_b_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2)); } static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw) { u8 rate; u8 rates[8] = { BRCM_RATE_6M, BRCM_RATE_9M, BRCM_RATE_12M, BRCM_RATE_18M, BRCM_RATE_24M, BRCM_RATE_36M, BRCM_RATE_48M, BRCM_RATE_54M }; u16 entry_ptr; u16 pctl1; uint i; if (!BRCMS_PHY_11N_CAP(wlc_hw->band)) return; /* walk the phy rate table and update the entries */ for (i = 0; i < ARRAY_SIZE(rates); i++) { rate = rates[i]; entry_ptr = brcms_b_ofdm_ratetable_offset(wlc_hw, rate); /* read the SHM Rate Table entry OFDM PCTL1 values */ pctl1 = brcms_b_read_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS); /* modify the value */ pctl1 &= ~PHY_TXC1_MODE_MASK; pctl1 |= (wlc_hw->hw_stf_ss_opmode << PHY_TXC1_MODE_SHIFT); /* Update the SHM Rate Table entry OFDM PCTL1 values */ brcms_b_write_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS, pctl1); } } /* band-specific init */ static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec) { struct brcms_hardware *wlc_hw = wlc->hw; BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, wlc_hw->band->bandunit); brcms_c_ucode_bsinit(wlc_hw); wlc_phy_init(wlc_hw->band->pi, chanspec); brcms_c_ucode_txant_set(wlc_hw); /* * cwmin is band-specific, update hardware * with value for current band */ brcms_b_set_cwmin(wlc_hw, wlc_hw->band->CWmin); brcms_b_set_cwmax(wlc_hw, wlc_hw->band->CWmax); brcms_b_update_slot_timing(wlc_hw, wlc_hw->band->bandtype == BRCM_BAND_5G ? true : wlc_hw->shortslot); /* write phytype and phyvers */ brcms_b_write_shm(wlc_hw, M_PHYTYPE, (u16) wlc_hw->band->phytype); brcms_b_write_shm(wlc_hw, M_PHYVER, (u16) wlc_hw->band->phyrev); /* * initialize the txphyctl1 rate table since * shmem is shared between bands */ brcms_upd_ofdm_pctl1_table(wlc_hw); brcms_b_upd_synthpu(wlc_hw); } /* Perform a soft reset of the PHY PLL */ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, 0); udelay(1); ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); udelay(1); ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), 0x4, 4); udelay(1); ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); udelay(1); } /* light way to turn on phy clock without reset for NPHY only * refer to brcms_b_core_phy_clk for full version */ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk) { /* support(necessary for NPHY and HYPHY) only */ if (!BRCMS_ISNPHY(wlc_hw->band)) return; if (ON == clk) brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC); else brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0); } void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk) { if (ON == clk) brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE); else brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0); } void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) { struct brcms_phy_pub *pih = wlc_hw->band->pi; u32 phy_bw_clkbits; bool phy_in_reset = false; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); if (pih == NULL) return; phy_bw_clkbits = wlc_phy_clk_bwbits(wlc_hw->band->pi); /* Specific reset sequence required for NPHY rev 3 and 4 */ if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) && NREV_LE(wlc_hw->band->phyrev, 4)) { /* Set the PHY bandwidth */ brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits); udelay(1); /* Perform a soft reset of the PHY PLL */ brcms_b_core_phypll_reset(wlc_hw); /* reset the PHY */ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE), (SICF_PRST | SICF_PCLKE)); phy_in_reset = true; } else { brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE | SICF_BWMASK), (SICF_PRST | SICF_PCLKE | phy_bw_clkbits)); } udelay(2); brcms_b_core_phy_clk(wlc_hw, ON); if (pih) wlc_phy_anacore(pih, ON); } /* switch to and initialize new band */ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit, u16 chanspec) { struct brcms_c_info *wlc = wlc_hw->wlc; u32 macintmask; /* Enable the d11 core before accessing it */ if (!bcma_core_is_enabled(wlc_hw->d11core)) { bcma_core_enable(wlc_hw->d11core, 0); brcms_c_mctrl_reset(wlc_hw); } macintmask = brcms_c_setband_inact(wlc, bandunit); if (!wlc_hw->up) return; brcms_b_core_phy_clk(wlc_hw, ON); /* band-specific initializations */ brcms_b_bsinit(wlc, chanspec); /* * If there are any pending software interrupt bits, * then replace these with a harmless nonzero value * so brcms_c_dpc() will re-enable interrupts when done. */ if (wlc->macintstatus) wlc->macintstatus = MI_DMAINT; /* restore macintmask */ brcms_intrsrestore(wlc->wl, macintmask); /* ucode should still be suspended.. */ WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC) != 0); } static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw) { /* reject unsupported corerev */ if (!CONF_HAS(D11CONF, wlc_hw->corerev)) { wiphy_err(wlc_hw->wlc->wiphy, "unsupported core rev %d\n", wlc_hw->corerev); return false; } return true; } /* Validate some board info parameters */ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw) { uint boardrev = wlc_hw->boardrev; /* 4 bits each for board type, major, minor, and tiny version */ uint brt = (boardrev & 0xf000) >> 12; uint b0 = (boardrev & 0xf00) >> 8; uint b1 = (boardrev & 0xf0) >> 4; uint b2 = boardrev & 0xf; /* voards from other vendors are always considered valid */ if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM) return true; /* do some boardrev sanity checks when boardvendor is Broadcom */ if (boardrev == 0) return false; if (boardrev <= 0xff) return true; if ((brt > 2) || (brt == 0) || (b0 > 9) || (b0 == 0) || (b1 > 9) || (b2 > 9)) return false; return true; } static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw) { enum brcms_srom_id var_id = BRCMS_SROM_MACADDR; char *macaddr; /* If macaddr exists, use it (Sromrev4, CIS, ...). */ macaddr = getvar(wlc_hw->sih, var_id); if (macaddr != NULL) return macaddr; if (wlc_hw->_nbands > 1) var_id = BRCMS_SROM_ET1MACADDR; else var_id = BRCMS_SROM_IL0MACADDR; macaddr = getvar(wlc_hw->sih, var_id); if (macaddr == NULL) wiphy_err(wlc_hw->wlc->wiphy, "wl%d: wlc_get_macaddr: macaddr " "getvar(%d) not found\n", wlc_hw->unit, var_id); return macaddr; } /* power both the pll and external oscillator on/off */ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want); /* * dont power down if plldown is false or * we must poll hw radio disable */ if (!want && wlc_hw->pllreq) return; if (wlc_hw->sih) ai_clkctl_xtal(wlc_hw->sih, XTAL | PLL, want); wlc_hw->sbclk = want; if (!wlc_hw->sbclk) { wlc_hw->clk = false; if (wlc_hw->band && wlc_hw->band->pi) wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false); } } /* * Return true if radio is disabled, otherwise false. * hw radio disable signal is an external pin, users activate it asynchronously * this function could be called when driver is down and w/o clock * it operates on different registers depending on corerev and boardflag. */ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw) { bool v, clk, xtal; u32 flags = 0; xtal = wlc_hw->sbclk; if (!xtal) brcms_b_xtal(wlc_hw, ON); /* may need to take core out of reset first */ clk = wlc_hw->clk; if (!clk) { /* * mac no longer enables phyclk automatically when driver * accesses phyreg throughput mac. This can be skipped since * only mac reg is accessed below */ flags |= SICF_PCLKE; /* * TODO: test suspend/resume * * AI chip doesn't restore bar0win2 on * hibernation/resume, need sw fixup */ bcma_core_enable(wlc_hw->d11core, flags); brcms_c_mctrl_reset(wlc_hw); } v = ((bcma_read32(wlc_hw->d11core, D11REGOFFS(phydebug)) & PDBG_RFD) != 0); /* put core back into reset */ if (!clk) bcma_core_disable(wlc_hw->d11core, 0); if (!xtal) brcms_b_xtal(wlc_hw, OFF); return v; } static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo) { struct dma_pub *di = wlc_hw->di[fifo]; return dma_rxreset(di); } /* d11 core reset * ensure fask clock during reset * reset dma * reset d11(out of reset) * reset phy(out of reset) * clear software macintstatus for fresh new start * one testing hack wlc_hw->noreset will bypass the d11/phy reset */ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) { uint i; bool fastclk; if (flags == BRCMS_USE_COREFLAGS) flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0); BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); /* request FAST clock if not on */ fastclk = wlc_hw->forcefastclk; if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); /* reset the dma engines except first time thru */ if (bcma_core_is_enabled(wlc_hw->d11core)) { for (i = 0; i < NFIFO; i++) if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: " "dma_txreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, i); if ((wlc_hw->di[RX_FIFO]) && (!wlc_dma_rxreset(wlc_hw, RX_FIFO))) wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: dma_rxreset" "[%d]: cannot stop dma\n", wlc_hw->unit, __func__, RX_FIFO); } /* if noreset, just stop the psm and return */ if (wlc_hw->noreset) { wlc_hw->wlc->macintstatus = 0; /* skip wl_dpc after down */ brcms_b_mctrl(wlc_hw, MCTL_PSM_RUN | MCTL_EN_MAC, 0); return; } /* * mac no longer enables phyclk automatically when driver accesses * phyreg throughput mac, AND phy_reset is skipped at early stage when * band->pi is invalid. need to enable PHY CLK */ flags |= SICF_PCLKE; /* * reset the core * In chips with PMU, the fastclk request goes through d11 core * reg 0x1e0, which is cleared by the core_reset. have to re-request it. * * This adds some delay and we can optimize it by also requesting * fastclk through chipcommon during this period if necessary. But * that has to work coordinate with other driver like mips/arm since * they may touch chipcommon as well. */ wlc_hw->clk = false; bcma_core_enable(wlc_hw->d11core, flags); wlc_hw->clk = true; if (wlc_hw->band && wlc_hw->band->pi) wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true); brcms_c_mctrl_reset(wlc_hw); if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); brcms_b_phy_reset(wlc_hw); /* turn on PHY_PLL */ brcms_b_core_phypll_ctl(wlc_hw, true); /* clear sw intstatus */ wlc_hw->wlc->macintstatus = 0; /* restore the clk setting */ if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC); } /* txfifo sizes needs to be modified(increased) since the newer cores * have more memory. */ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) { struct bcma_device *core = wlc_hw->d11core; u16 fifo_nu; u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk; u16 txfifo_def, txfifo_def1; u16 txfifo_cmd; /* tx fifos start at TXFIFO_START_BLK from the Base address */ txfifo_startblk = TXFIFO_START_BLK; /* sequence of operations: reset fifo, set fifo size, reset fifo */ for (fifo_nu = 0; fifo_nu < NFIFO; fifo_nu++) { txfifo_endblk = txfifo_startblk + wlc_hw->xmtfifo_sz[fifo_nu]; txfifo_def = (txfifo_startblk & 0xff) | (((txfifo_endblk - 1) & 0xff) << TXFIFO_FIFOTOP_SHIFT); txfifo_def1 = ((txfifo_startblk >> 8) & 0x1) | ((((txfifo_endblk - 1) >> 8) & 0x1) << TXFIFO_FIFOTOP_SHIFT); txfifo_cmd = TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT); bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd); bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def); bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1); bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd); txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu]; } /* * need to propagate to shm location to be in sync since ucode/hw won't * do this */ brcms_b_write_shm(wlc_hw, M_FIFOSIZE0, wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]); brcms_b_write_shm(wlc_hw, M_FIFOSIZE1, wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]); brcms_b_write_shm(wlc_hw, M_FIFOSIZE2, ((wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO] << 8) | wlc_hw-> xmtfifo_sz[TX_AC_BK_FIFO])); brcms_b_write_shm(wlc_hw, M_FIFOSIZE3, ((wlc_hw->xmtfifo_sz[TX_ATIM_FIFO] << 8) | wlc_hw-> xmtfifo_sz[TX_BCMC_FIFO])); } /* This function is used for changing the tsf frac register * If spur avoidance mode is off, the mac freq will be 80/120/160Mhz * If spur avoidance mode is on1, the mac freq will be 82/123/164Mhz * If spur avoidance mode is on2, the mac freq will be 84/126/168Mhz * HTPHY Formula is 2^26/freq(MHz) e.g. * For spuron2 - 126MHz -> 2^26/126 = 532610.0 * - 532610 = 0x82082 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x2082 * For spuron: 123MHz -> 2^26/123 = 545600.5 * - 545601 = 0x85341 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x5341 * For spur off: 120MHz -> 2^26/120 = 559240.5 * - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889 */ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode) { struct bcma_device *core = wlc_hw->d11core; if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) || (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) { if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082); bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341); bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); } else { /* 120Mhz */ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889); bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); } } else if (BRCMS_ISLCNPHY(wlc_hw->band)) { if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0); bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC); } else { /* 80Mhz */ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD); bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC); } } } /* Initialize GPIOs that are controlled by D11 core */ static void brcms_c_gpio_init(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; u32 gc, gm; /* use GPIO select 0 to get all gpio signals from the gpio out reg */ brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0); /* * Common GPIO setup: * G0 = LED 0 = WLAN Activity * G1 = LED 1 = WLAN 2.4 GHz Radio State * G2 = LED 2 = WLAN 5 GHz Radio State * G4 = radio disable input (HI enabled, LO disabled) */ gc = gm = 0; /* Allocate GPIOs for mimo antenna diversity feature */ if (wlc_hw->antsel_type == ANTSEL_2x3) { /* Enable antenna diversity, use 2x3 mode */ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN, MHF3_ANTSEL_EN, BRCM_BAND_ALL); brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, MHF3_ANTSEL_MODE, BRCM_BAND_ALL); /* init superswitch control */ wlc_phy_antsel_init(wlc_hw->band->pi, false); } else if (wlc_hw->antsel_type == ANTSEL_2x4) { gm |= gc |= (BOARD_GPIO_12 | BOARD_GPIO_13); /* * The board itself is powered by these GPIOs * (when not sending pattern) so set them high */ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe), (BOARD_GPIO_12 | BOARD_GPIO_13)); bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out), (BOARD_GPIO_12 | BOARD_GPIO_13)); /* Enable antenna diversity, use 2x4 mode */ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN, MHF3_ANTSEL_EN, BRCM_BAND_ALL); brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, 0, BRCM_BAND_ALL); /* Configure the desired clock to be 4Mhz */ brcms_b_write_shm(wlc_hw, M_ANTSEL_CLKDIV, ANTSEL_CLKDIV_4MHZ); } /* * gpio 9 controls the PA. ucode is responsible * for wiggling out and oe */ if (wlc_hw->boardflags & BFL_PACTRL) gm |= gc |= BOARD_GPIO_PACTRL; /* apply to gpiocontrol register */ ai_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY); } static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const __le32 ucode[], const size_t nbytes) { struct bcma_device *core = wlc_hw->d11core; uint i; uint count; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); count = (nbytes / sizeof(u32)); bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_AUTO_INC | OBJADDR_UCM_SEL); (void)bcma_read32(core, D11REGOFFS(objaddr)); for (i = 0; i < count; i++) bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i])); } static void brcms_ucode_download(struct brcms_hardware *wlc_hw) { struct brcms_c_info *wlc; struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; wlc = wlc_hw->wlc; if (wlc_hw->ucode_loaded) return; if (D11REV_IS(wlc_hw->corerev, 23)) { if (BRCMS_ISNPHY(wlc_hw->band)) { brcms_ucode_write(wlc_hw, ucode->bcm43xx_16_mimo, ucode->bcm43xx_16_mimosz); wlc_hw->ucode_loaded = true; } else wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in " "corerev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } else if (D11REV_IS(wlc_hw->corerev, 24)) { if (BRCMS_ISLCNPHY(wlc_hw->band)) { brcms_ucode_write(wlc_hw, ucode->bcm43xx_24_lcn, ucode->bcm43xx_24_lcnsz); wlc_hw->ucode_loaded = true; } else { wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in " "corerev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } } } void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant) { /* update sw state */ wlc_hw->bmac_phytxant = phytxant; /* push to ucode if up */ if (!wlc_hw->up) return; brcms_c_ucode_txant_set(wlc_hw); } u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw) { return (u16) wlc_hw->wlc->stf->txant; } void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type) { wlc_hw->antsel_type = antsel_type; /* Update the antsel type for phy module to use */ wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type); } static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw) { bool fatal = false; uint unit; uint intstatus, idx; struct bcma_device *core = wlc_hw->d11core; struct wiphy *wiphy = wlc_hw->wlc->wiphy; unit = wlc_hw->unit; for (idx = 0; idx < NFIFO; idx++) { /* read intstatus register and ignore any non-error bits */ intstatus = bcma_read32(core, D11REGOFFS(intctrlregs[idx].intstatus)) & I_ERRORS; if (!intstatus) continue; BCMMSG(wlc_hw->wlc->wiphy, "wl%d: intstatus%d 0x%x\n", unit, idx, intstatus); if (intstatus & I_RO) { wiphy_err(wiphy, "wl%d: fifo %d: receive fifo " "overflow\n", unit, idx); fatal = true; } if (intstatus & I_PC) { wiphy_err(wiphy, "wl%d: fifo %d: descriptor error\n", unit, idx); fatal = true; } if (intstatus & I_PD) { wiphy_err(wiphy, "wl%d: fifo %d: data error\n", unit, idx); fatal = true; } if (intstatus & I_DE) { wiphy_err(wiphy, "wl%d: fifo %d: descriptor protocol " "error\n", unit, idx); fatal = true; } if (intstatus & I_RU) wiphy_err(wiphy, "wl%d: fifo %d: receive descriptor " "underflow\n", idx, unit); if (intstatus & I_XU) { wiphy_err(wiphy, "wl%d: fifo %d: transmit fifo " "underflow\n", idx, unit); fatal = true; } if (fatal) { brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */ break; } else bcma_write32(core, D11REGOFFS(intctrlregs[idx].intstatus), intstatus); } } void brcms_c_intrson(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; wlc->macintmask = wlc->defmacintmask; bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask); } u32 brcms_c_intrsoff(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; u32 macintmask; if (!wlc_hw->clk) return 0; macintmask = wlc->macintmask; /* isr can still happen */ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0); (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask)); udelay(1); /* ensure int line is no longer driven */ wlc->macintmask = 0; /* return previous macintmask; resolve race between us and our isr */ return wlc->macintstatus ? 0 : macintmask; } void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask) { struct brcms_hardware *wlc_hw = wlc->hw; if (!wlc_hw->clk) return; wlc->macintmask = macintmask; bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask); } /* assumes that the d11 MAC is enabled */ static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw, uint tx_fifo) { u8 fifo = 1 << tx_fifo; /* Two clients of this code, 11h Quiet period and scanning. */ /* only suspend if not already suspended */ if ((wlc_hw->suspended_fifos & fifo) == fifo) return; /* force the core awake only if not already */ if (wlc_hw->suspended_fifos == 0) brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_TXFIFO); wlc_hw->suspended_fifos |= fifo; if (wlc_hw->di[tx_fifo]) { /* * Suspending AMPDU transmissions in the middle can cause * underflow which may result in mismatch between ucode and * driver so suspend the mac before suspending the FIFO */ if (BRCMS_PHY_11N_CAP(wlc_hw->band)) brcms_c_suspend_mac_and_wait(wlc_hw->wlc); dma_txsuspend(wlc_hw->di[tx_fifo]); if (BRCMS_PHY_11N_CAP(wlc_hw->band)) brcms_c_enable_mac(wlc_hw->wlc); } } static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw, uint tx_fifo) { /* BMAC_NOTE: BRCMS_TX_FIFO_ENAB is done in brcms_c_dpc() for DMA case * but need to be done here for PIO otherwise the watchdog will catch * the inconsistency and fire */ /* Two clients of this code, 11h Quiet period and scanning. */ if (wlc_hw->di[tx_fifo]) dma_txresume(wlc_hw->di[tx_fifo]); /* allow core to sleep again */ if (wlc_hw->suspended_fifos == 0) return; else { wlc_hw->suspended_fifos &= ~(1 << tx_fifo); if (wlc_hw->suspended_fifos == 0) brcms_c_ucode_wake_override_clear(wlc_hw, BRCMS_WAKE_OVERRIDE_TXFIFO); } } /* precondition: requires the mac core to be enabled */ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx) { static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; if (mute_tx) { /* suspend tx fifos */ brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO); brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO); brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_BK_FIFO); brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO); /* zero the address match register so we do not send ACKs */ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, null_ether_addr); } else { /* resume tx fifos */ brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO); brcms_b_tx_fifo_resume(wlc_hw, TX_CTL_FIFO); brcms_b_tx_fifo_resume(wlc_hw, TX_AC_BK_FIFO); brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO); /* Restore address */ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, wlc_hw->etheraddr); } wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0); if (mute_tx) brcms_c_ucode_mute_override_set(wlc_hw); else brcms_c_ucode_mute_override_clear(wlc_hw); } void brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx) { brcms_b_mute(wlc->hw, mute_tx); } /* * Read and clear macintmask and macintstatus and intstatus registers. * This routine should be called with interrupts off * Return: * -1 if brcms_deviceremoved(wlc) evaluates to true; * 0 if the interrupt is not for us, or we are in some special cases; * device interrupt status bits otherwise. */ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) { struct brcms_hardware *wlc_hw = wlc->hw; struct bcma_device *core = wlc_hw->d11core; u32 macintstatus; /* macintstatus includes a DMA interrupt summary bit */ macintstatus = bcma_read32(core, D11REGOFFS(macintstatus)); BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus); /* detect cardbus removed, in power down(suspend) and in reset */ if (brcms_deviceremoved(wlc)) return -1; /* brcms_deviceremoved() succeeds even when the core is still resetting, * handle that case here. */ if (macintstatus == 0xffffffff) return 0; /* defer unsolicited interrupts */ macintstatus &= (in_isr ? wlc->macintmask : wlc->defmacintmask); /* if not for us */ if (macintstatus == 0) return 0; /* interrupts are already turned off for CFE build * Caution: For CFE Turning off the interrupts again has some undesired * consequences */ /* turn off the interrupts */ bcma_write32(core, D11REGOFFS(macintmask), 0); (void)bcma_read32(core, D11REGOFFS(macintmask)); wlc->macintmask = 0; /* clear device interrupts */ bcma_write32(core, D11REGOFFS(macintstatus), macintstatus); /* MI_DMAINT is indication of non-zero intstatus */ if (macintstatus & MI_DMAINT) /* * only fifo interrupt enabled is I_RI in * RX_FIFO. If MI_DMAINT is set, assume it * is set and clear the interrupt. */ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus), DEF_RXINTMASK); return macintstatus; } /* Update wlc->macintstatus and wlc->intstatus[]. */ /* Return true if they are updated successfully. false otherwise */ bool brcms_c_intrsupd(struct brcms_c_info *wlc) { u32 macintstatus; /* read and clear macintstatus and intstatus registers */ macintstatus = wlc_intstatus(wlc, false); /* device is removed */ if (macintstatus == 0xffffffff) return false; /* update interrupt status in software */ wlc->macintstatus |= macintstatus; return true; } /* * First-level interrupt processing. * Return true if this was our interrupt, false otherwise. * *wantdpc will be set to true if further brcms_c_dpc() processing is required, * false otherwise. */ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc) { struct brcms_hardware *wlc_hw = wlc->hw; u32 macintstatus; *wantdpc = false; if (!wlc_hw->up || !wlc->macintmask) return false; /* read and clear macintstatus and intstatus registers */ macintstatus = wlc_intstatus(wlc, true); if (macintstatus == 0xffffffff) wiphy_err(wlc->wiphy, "DEVICEREMOVED detected in the ISR code" " path\n"); /* it is not for us */ if (macintstatus == 0) return false; *wantdpc = true; /* save interrupt status bits */ wlc->macintstatus = macintstatus; return true; } void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; struct bcma_device *core = wlc_hw->d11core; u32 mc, mi; struct wiphy *wiphy = wlc->wiphy; BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, wlc_hw->band->bandunit); /* * Track overlapping suspend requests */ wlc_hw->mac_suspend_depth++; if (wlc_hw->mac_suspend_depth > 1) return; /* force the core awake */ brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND); mc = bcma_read32(core, D11REGOFFS(maccontrol)); if (mc == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); brcms_down(wlc->wl); return; } WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(!(mc & MCTL_PSM_RUN)); WARN_ON(!(mc & MCTL_EN_MAC)); mi = bcma_read32(core, D11REGOFFS(macintstatus)); if (mi == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); brcms_down(wlc->wl); return; } WARN_ON(mi & MI_MACSSPNDD); brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0); SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD), BRCMS_MAX_MAC_SUSPEND); if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) { wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS" " and MI_MACSSPNDD is still not on.\n", wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND); wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, " "psm_brc 0x%04x\n", wlc_hw->unit, bcma_read32(core, D11REGOFFS(psmdebug)), bcma_read32(core, D11REGOFFS(phydebug)), bcma_read16(core, D11REGOFFS(psm_brc))); } mc = bcma_read32(core, D11REGOFFS(maccontrol)); if (mc == 0xffffffff) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); brcms_down(wlc->wl); return; } WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(!(mc & MCTL_PSM_RUN)); WARN_ON(mc & MCTL_EN_MAC); } void brcms_c_enable_mac(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; struct bcma_device *core = wlc_hw->d11core; u32 mc, mi; BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, wlc->band->bandunit); /* * Track overlapping suspend requests */ wlc_hw->mac_suspend_depth--; if (wlc_hw->mac_suspend_depth > 0) return; mc = bcma_read32(core, D11REGOFFS(maccontrol)); WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(mc & MCTL_EN_MAC); WARN_ON(!(mc & MCTL_PSM_RUN)); brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC); bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD); mc = bcma_read32(core, D11REGOFFS(maccontrol)); WARN_ON(mc & MCTL_PSM_JMP_0); WARN_ON(!(mc & MCTL_EN_MAC)); WARN_ON(!(mc & MCTL_PSM_RUN)); mi = bcma_read32(core, D11REGOFFS(macintstatus)); WARN_ON(mi & MI_MACSSPNDD); brcms_c_ucode_wake_override_clear(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND); } void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode) { wlc_hw->hw_stf_ss_opmode = stf_mode; if (wlc_hw->clk) brcms_upd_ofdm_pctl1_table(wlc_hw); } static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw) { struct bcma_device *core = wlc_hw->d11core; u32 w, val; struct wiphy *wiphy = wlc_hw->wlc->wiphy; BCMMSG(wiphy, "wl%d\n", wlc_hw->unit); /* Validate dchip register access */ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); (void)bcma_read32(core, D11REGOFFS(objaddr)); w = bcma_read32(core, D11REGOFFS(objdata)); /* Can we write and read back a 32bit register? */ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); (void)bcma_read32(core, D11REGOFFS(objaddr)); bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa); bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); (void)bcma_read32(core, D11REGOFFS(objaddr)); val = bcma_read32(core, D11REGOFFS(objdata)); if (val != (u32) 0xaa5555aa) { wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " "expected 0xaa5555aa\n", wlc_hw->unit, val); return false; } bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); (void)bcma_read32(core, D11REGOFFS(objaddr)); bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55); bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); (void)bcma_read32(core, D11REGOFFS(objaddr)); val = bcma_read32(core, D11REGOFFS(objdata)); if (val != (u32) 0x55aaaa55) { wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " "expected 0x55aaaa55\n", wlc_hw->unit, val); return false; } bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); (void)bcma_read32(core, D11REGOFFS(objaddr)); bcma_write32(core, D11REGOFFS(objdata), w); /* clear CFPStart */ bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0); w = bcma_read32(core, D11REGOFFS(maccontrol)); if ((w != (MCTL_IHR_EN | MCTL_WAKE)) && (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) { wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = " "0x%x, expected 0x%x or 0x%x\n", wlc_hw->unit, w, (MCTL_IHR_EN | MCTL_WAKE), (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE)); return false; } return true; } #define PHYPLL_WAIT_US 100000 void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on) { struct bcma_device *core = wlc_hw->d11core; u32 tmp; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); tmp = 0; if (on) { if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { bcma_set32(core, D11REGOFFS(clk_ctl_st), CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL); SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT, PHYPLL_WAIT_US); tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT) wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY" " PLL failed\n", __func__); } else { bcma_set32(core, D11REGOFFS(clk_ctl_st), tmp | CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL); SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) & (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) != (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US); tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); if ((tmp & (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) != (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on " "PHY PLL failed\n", __func__); } } else { /* * Since the PLL may be shared, other cores can still * be requesting it; so we'll deassert the request but * not wait for status to comply. */ bcma_mask32(core, D11REGOFFS(clk_ctl_st), ~CCS_ERSRC_REQ_PHYPLL); (void)bcma_read32(core, D11REGOFFS(clk_ctl_st)); } } static void brcms_c_coredisable(struct brcms_hardware *wlc_hw) { bool dev_gone; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); dev_gone = brcms_deviceremoved(wlc_hw->wlc); if (dev_gone) return; if (wlc_hw->noreset) return; /* radio off */ wlc_phy_switch_radio(wlc_hw->band->pi, OFF); /* turn off analog core */ wlc_phy_anacore(wlc_hw->band->pi, OFF); /* turn off PHYPLL to save power */ brcms_b_core_phypll_ctl(wlc_hw, false); wlc_hw->clk = false; bcma_core_disable(wlc_hw->d11core, 0); wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false); } static void brcms_c_flushqueues(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; uint i; /* free any posted tx packets */ for (i = 0; i < NFIFO; i++) if (wlc_hw->di[i]) { dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL); wlc->core->txpktpend[i] = 0; BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i); } /* free any posted rx packets */ dma_rxreclaim(wlc_hw->di[RX_FIFO]); } static u16 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel) { struct bcma_device *core = wlc_hw->d11core; u16 objoff = D11REGOFFS(objdata); bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2)); (void)bcma_read32(core, D11REGOFFS(objaddr)); if (offset & 2) objoff += 2; return bcma_read16(core, objoff); } static void brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v, u32 sel) { struct bcma_device *core = wlc_hw->d11core; u16 objoff = D11REGOFFS(objdata); bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2)); (void)bcma_read32(core, D11REGOFFS(objaddr)); if (offset & 2) objoff += 2; bcma_write16(core, objoff, v); } /* * Read a single u16 from shared memory. * SHM 'offset' needs to be an even address */ u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset) { return brcms_b_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL); } /* * Write a single u16 to shared memory. * SHM 'offset' needs to be an even address */ void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v) { brcms_b_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL); } /* * Copy a buffer to shared memory of specified type . * SHM 'offset' needs to be an even address and * Buffer length 'len' must be an even number of bytes * 'sel' selects the type of memory */ void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset, const void *buf, int len, u32 sel) { u16 v; const u8 *p = (const u8 *)buf; int i; if (len <= 0 || (offset & 1) || (len & 1)) return; for (i = 0; i < len; i += 2) { v = p[i] | (p[i + 1] << 8); brcms_b_write_objmem(wlc_hw, offset + i, v, sel); } } /* * Copy a piece of shared memory of specified type to a buffer . * SHM 'offset' needs to be an even address and * Buffer length 'len' must be an even number of bytes * 'sel' selects the type of memory */ void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset, void *buf, int len, u32 sel) { u16 v; u8 *p = (u8 *) buf; int i; if (len <= 0 || (offset & 1) || (len & 1)) return; for (i = 0; i < len; i += 2) { v = brcms_b_read_objmem(wlc_hw, offset + i, sel); p[i] = v & 0xFF; p[i + 1] = (v >> 8) & 0xFF; } } /* Copy a buffer to shared memory. * SHM 'offset' needs to be an even address and * Buffer length 'len' must be an even number of bytes */ static void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset, const void *buf, int len) { brcms_b_copyto_objmem(wlc->hw, offset, buf, len, OBJADDR_SHM_SEL); } static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, u16 SRL, u16 LRL) { wlc_hw->SRL = SRL; wlc_hw->LRL = LRL; /* write retry limit to SCR, shouldn't need to suspend */ if (wlc_hw->up) { bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL); bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr), OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr)); bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL); } } static void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set, u32 req_bit) { if (set) { if (mboolisset(wlc_hw->pllreq, req_bit)) return; mboolset(wlc_hw->pllreq, req_bit); if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) { if (!wlc_hw->sbclk) brcms_b_xtal(wlc_hw, ON); } } else { if (!mboolisset(wlc_hw->pllreq, req_bit)) return; mboolclr(wlc_hw->pllreq, req_bit); if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) { if (wlc_hw->sbclk) brcms_b_xtal(wlc_hw, OFF); } } } static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail) { wlc_hw->antsel_avail = antsel_avail; } /* * conditions under which the PM bit should be set in outgoing frames * and STAY_AWAKE is meaningful */ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) { struct brcms_bss_cfg *cfg = wlc->bsscfg; /* disallow PS when one of the following global conditions meets */ if (!wlc->pub->associated) return false; /* disallow PS when one of these meets when not scanning */ if (wlc->filter_flags & FIF_PROMISC_IN_BSS) return false; if (cfg->associated) { /* * disallow PS when one of the following * bsscfg specific conditions meets */ if (!cfg->BSS) return false; return false; } return true; } static void brcms_c_statsupd(struct brcms_c_info *wlc) { int i; struct macstat macstats; #ifdef DEBUG u16 delta; u16 rxf0ovfl; u16 txfunfl[NFIFO]; #endif /* DEBUG */ /* if driver down, make no sense to update stats */ if (!wlc->pub->up) return; #ifdef DEBUG /* save last rx fifo 0 overflow count */ rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl; /* save last tx fifo underflow count */ for (i = 0; i < NFIFO; i++) txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i]; #endif /* DEBUG */ /* Read mac stats from contiguous shared memory */ brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats, sizeof(struct macstat), OBJADDR_SHM_SEL); #ifdef DEBUG /* check for rx fifo 0 overflow */ delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl); if (delta) wiphy_err(wlc->wiphy, "wl%d: %u rx fifo 0 overflows!\n", wlc->pub->unit, delta); /* check for tx fifo underflows */ for (i = 0; i < NFIFO; i++) { delta = (u16) (wlc->core->macstat_snapshot->txfunfl[i] - txfunfl[i]); if (delta) wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!" "\n", wlc->pub->unit, delta, i); } #endif /* DEBUG */ /* merge counters from dma module */ for (i = 0; i < NFIFO; i++) { if (wlc->hw->di[i]) dma_counterreset(wlc->hw->di[i]); } } static void brcms_b_reset(struct brcms_hardware *wlc_hw) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); /* reset the core */ if (!brcms_deviceremoved(wlc_hw->wlc)) brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); /* purge the dma rings */ brcms_c_flushqueues(wlc_hw->wlc); } void brcms_c_reset(struct brcms_c_info *wlc) { BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); /* slurp up hw mac counters before core reset */ brcms_c_statsupd(wlc); /* reset our snapshot of macstat counters */ memset((char *)wlc->core->macstat_snapshot, 0, sizeof(struct macstat)); brcms_b_reset(wlc->hw); } /* Return the channel the driver should initialize during brcms_c_init. * the channel may have to be changed from the currently configured channel * if other configurations are in conflict (bandlocked, 11n mode disabled, * invalid channel for current country, etc.) */ static u16 brcms_c_init_chanspec(struct brcms_c_info *wlc) { u16 chanspec = 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | WL_CHANSPEC_BAND_2G; return chanspec; } void brcms_c_init_scb(struct scb *scb) { int i; memset(scb, 0, sizeof(struct scb)); scb->flags = SCB_WMECAP | SCB_HTCAP; for (i = 0; i < NUMPRIO; i++) { scb->seqnum[i] = 0; scb->seqctl[i] = 0xFFFF; } scb->seqctl_nonqos = 0xFFFF; scb->magic = SCB_MAGIC; } /* d11 core init * reset PSM * download ucode/PCM * let ucode run to suspended * download ucode inits * config other core registers * init dma */ static void brcms_b_coreinit(struct brcms_c_info *wlc) { struct brcms_hardware *wlc_hw = wlc->hw; struct bcma_device *core = wlc_hw->d11core; u32 sflags; u32 bcnint_us; uint i = 0; bool fifosz_fixup = false; int err = 0; u16 buf[NFIFO]; struct wiphy *wiphy = wlc->wiphy; struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); /* reset PSM */ brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE)); brcms_ucode_download(wlc_hw); /* * FIFOSZ fixup. driver wants to controls the fifo allocation. */ fifosz_fixup = true; /* let the PSM run to the suspended state, set mode to BSS STA */ bcma_write32(core, D11REGOFFS(macintstatus), -1); brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE)); /* wait for ucode to self-suspend after auto-init */ SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0), 1000 * 1000); if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0) wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-" "suspend!\n", wlc_hw->unit); brcms_c_gpio_init(wlc); sflags = bcma_aread32(core, BCMA_IOST); if (D11REV_IS(wlc_hw->corerev, 23)) { if (BRCMS_ISNPHY(wlc_hw->band)) brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16); else wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev" " %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } else if (D11REV_IS(wlc_hw->corerev, 24)) { if (BRCMS_ISLCNPHY(wlc_hw->band)) brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24); else wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev" " %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } else { wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev); } /* For old ucode, txfifo sizes needs to be modified(increased) */ if (fifosz_fixup) brcms_b_corerev_fifofixup(wlc_hw); /* check txfifo allocations match between ucode and driver */ buf[TX_AC_BE_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE0); if (buf[TX_AC_BE_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]) { i = TX_AC_BE_FIFO; err = -1; } buf[TX_AC_VI_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE1); if (buf[TX_AC_VI_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]) { i = TX_AC_VI_FIFO; err = -1; } buf[TX_AC_BK_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE2); buf[TX_AC_VO_FIFO] = (buf[TX_AC_BK_FIFO] >> 8) & 0xff; buf[TX_AC_BK_FIFO] &= 0xff; if (buf[TX_AC_BK_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BK_FIFO]) { i = TX_AC_BK_FIFO; err = -1; } if (buf[TX_AC_VO_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO]) { i = TX_AC_VO_FIFO; err = -1; } buf[TX_BCMC_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE3); buf[TX_ATIM_FIFO] = (buf[TX_BCMC_FIFO] >> 8) & 0xff; buf[TX_BCMC_FIFO] &= 0xff; if (buf[TX_BCMC_FIFO] != wlc_hw->xmtfifo_sz[TX_BCMC_FIFO]) { i = TX_BCMC_FIFO; err = -1; } if (buf[TX_ATIM_FIFO] != wlc_hw->xmtfifo_sz[TX_ATIM_FIFO]) { i = TX_ATIM_FIFO; err = -1; } if (err != 0) wiphy_err(wiphy, "wlc_coreinit: txfifo mismatch: ucode size %d" " driver size %d index %d\n", buf[i], wlc_hw->xmtfifo_sz[i], i); /* make sure we can still talk to the mac */ WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff); /* band-specific inits done by wlc_bsinit() */ /* Set up frame burst size and antenna swap threshold init values */ brcms_b_write_shm(wlc_hw, M_MBURST_SIZE, MAXTXFRAMEBURST); brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT); /* enable one rx interrupt per received frame */ bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT)); /* set the station mode (BSS STA) */ brcms_b_mctrl(wlc_hw, (MCTL_INFRA | MCTL_DISCARD_PMQ | MCTL_AP), (MCTL_INFRA | MCTL_DISCARD_PMQ)); /* set up Beacon interval */ bcnint_us = 0x8000 << 10; bcma_write32(core, D11REGOFFS(tsf_cfprep), (bcnint_us << CFPREP_CBI_SHIFT)); bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us); bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1); /* write interrupt mask */ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask), DEF_RXINTMASK); /* allow the MAC to control the PHY clock (dynamic on/off) */ brcms_b_macphyclk_set(wlc_hw, ON); /* program dynamic clock control fast powerup delay register */ wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih); bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly); /* tell the ucode the corerev */ brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev); /* tell the ucode MAC capabilities */ brcms_b_write_shm(wlc_hw, M_MACHW_CAP_L, (u16) (wlc_hw->machwcap & 0xffff)); brcms_b_write_shm(wlc_hw, M_MACHW_CAP_H, (u16) ((wlc_hw-> machwcap >> 16) & 0xffff)); /* write retry limits to SCR, this done after PSM init */ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); (void)bcma_read32(core, D11REGOFFS(objaddr)); bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL); bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); (void)bcma_read32(core, D11REGOFFS(objaddr)); bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL); /* write rate fallback retry limits */ brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL); brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL); bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF); bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN); /* init the tx dma engines */ for (i = 0; i < NFIFO; i++) { if (wlc_hw->di[i]) dma_txinit(wlc_hw->di[i]); } /* init the rx dma engine(s) and post receive buffers */ dma_rxinit(wlc_hw->di[RX_FIFO]); dma_rxfill(wlc_hw->di[RX_FIFO]); } void static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) { u32 macintmask; bool fastclk; struct brcms_c_info *wlc = wlc_hw->wlc; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); /* request FAST clock if not on */ fastclk = wlc_hw->forcefastclk; if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_FAST); /* disable interrupts */ macintmask = brcms_intrsoff(wlc->wl); /* set up the specified band and chanspec */ brcms_c_setxband(wlc_hw, chspec_bandunit(chanspec)); wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec); /* do one-time phy inits and calibration */ wlc_phy_cal_init(wlc_hw->band->pi); /* core-specific initialization */ brcms_b_coreinit(wlc); /* band-specific inits */ brcms_b_bsinit(wlc, chanspec); /* restore macintmask */ brcms_intrsrestore(wlc->wl, macintmask); /* seed wake_override with BRCMS_WAKE_OVERRIDE_MACSUSPEND since the mac * is suspended and brcms_c_enable_mac() will clear this override bit. */ mboolset(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_MACSUSPEND); /* * initialize mac_suspend_depth to 1 to match ucode * initial suspended state */ wlc_hw->mac_suspend_depth = 1; /* restore the clk */ if (!fastclk) brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC); } static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc, u16 chanspec) { /* Save our copy of the chanspec */ wlc->chanspec = chanspec; /* Set the chanspec and power limits for this locale */ brcms_c_channel_set_chanspec(wlc->cmi, chanspec, BRCMS_TXPWR_MAX); if (wlc->stf->ss_algosel_auto) brcms_c_stf_ss_algo_channel_get(wlc, &wlc->stf->ss_algo_channel, chanspec); brcms_c_stf_ss_update(wlc, wlc->band); } static void brcms_default_rateset(struct brcms_c_info *wlc, struct brcms_c_rateset *rs) { brcms_c_rateset_default(rs, NULL, wlc->band->phytype, wlc->band->bandtype, false, BRCMS_RATE_MASK_FULL, (bool) (wlc->pub->_n_enab & SUPPORT_11N), brcms_chspec_bw(wlc->default_bss->chanspec), wlc->stf->txstreams); } /* derive wlc->band->basic_rate[] table from 'rateset' */ static void brcms_c_rate_lookup_init(struct brcms_c_info *wlc, struct brcms_c_rateset *rateset) { u8 rate; u8 mandatory; u8 cck_basic = 0; u8 ofdm_basic = 0; u8 *br = wlc->band->basic_rate; uint i; /* incoming rates are in 500kbps units as in 802.11 Supported Rates */ memset(br, 0, BRCM_MAXRATE + 1); /* For each basic rate in the rates list, make an entry in the * best basic lookup. */ for (i = 0; i < rateset->count; i++) { /* only make an entry for a basic rate */ if (!(rateset->rates[i] & BRCMS_RATE_FLAG)) continue; /* mask off basic bit */ rate = (rateset->rates[i] & BRCMS_RATE_MASK); if (rate > BRCM_MAXRATE) { wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: " "invalid rate 0x%X in rate set\n", rateset->rates[i]); continue; } br[rate] = rate; } /* The rate lookup table now has non-zero entries for each * basic rate, equal to the basic rate: br[basicN] = basicN * * To look up the best basic rate corresponding to any * particular rate, code can use the basic_rate table * like this * * basic_rate = wlc->band->basic_rate[tx_rate] * * Make sure there is a best basic rate entry for * every rate by walking up the table from low rates * to high, filling in holes in the lookup table */ for (i = 0; i < wlc->band->hw_rateset.count; i++) { rate = wlc->band->hw_rateset.rates[i]; if (br[rate] != 0) { /* This rate is a basic rate. * Keep track of the best basic rate so far by * modulation type. */ if (is_ofdm_rate(rate)) ofdm_basic = rate; else cck_basic = rate; continue; } /* This rate is not a basic rate so figure out the * best basic rate less than this rate and fill in * the hole in the table */ br[rate] = is_ofdm_rate(rate) ? ofdm_basic : cck_basic; if (br[rate] != 0) continue; if (is_ofdm_rate(rate)) { /* * In 11g and 11a, the OFDM mandatory rates * are 6, 12, and 24 Mbps */ if (rate >= BRCM_RATE_24M) mandatory = BRCM_RATE_24M; else if (rate >= BRCM_RATE_12M) mandatory = BRCM_RATE_12M; else mandatory = BRCM_RATE_6M; } else { /* In 11b, all CCK rates are mandatory 1 - 11 Mbps */ mandatory = rate; } br[rate] = mandatory; } } static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc, u16 chanspec) { struct brcms_c_rateset default_rateset; uint parkband; uint i, band_order[2]; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); /* * We might have been bandlocked during down and the chip * power-cycled (hibernate). Figure out the right band to park on */ if (wlc->bandlocked || wlc->pub->_nbands == 1) { /* updated in brcms_c_bandlock() */ parkband = wlc->band->bandunit; band_order[0] = band_order[1] = parkband; } else { /* park on the band of the specified chanspec */ parkband = chspec_bandunit(chanspec); /* order so that parkband initialize last */ band_order[0] = parkband ^ 1; band_order[1] = parkband; } /* make each band operational, software state init */ for (i = 0; i < wlc->pub->_nbands; i++) { uint j = band_order[i]; wlc->band = wlc->bandstate[j]; brcms_default_rateset(wlc, &default_rateset); /* fill in hw_rate */ brcms_c_rateset_filter(&default_rateset, &wlc->band->hw_rateset, false, BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK, (bool) (wlc->pub->_n_enab & SUPPORT_11N)); /* init basic rate lookup */ brcms_c_rate_lookup_init(wlc, &default_rateset); } /* sync up phy/radio chanspec */ brcms_c_set_phy_chanspec(wlc, chanspec); } /* * Set or clear filtering related maccontrol bits based on * specified filter flags */ void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags) { u32 promisc_bits = 0; wlc->filter_flags = filter_flags; if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) promisc_bits |= MCTL_PROMISC; if (filter_flags & FIF_BCN_PRBRESP_PROMISC) promisc_bits |= MCTL_BCNS_PROMISC; if (filter_flags & FIF_FCSFAIL) promisc_bits |= MCTL_KEEPBADFCS; if (filter_flags & (FIF_CONTROL | FIF_PSPOLL)) promisc_bits |= MCTL_KEEPCONTROL; brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL | MCTL_KEEPBADFCS, promisc_bits); } /* * ucode, hwmac update * Channel dependent updates for ucode and hw */ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc) { /* enable or disable any active IBSSs depending on whether or not * we are on the home channel */ if (wlc->home_chanspec == wlc_phy_chanspec_get(wlc->band->pi)) { if (wlc->pub->associated) { /* * BMAC_NOTE: This is something that should be fixed * in ucode inits. I think that the ucode inits set * up the bcn templates and shm values with a bogus * beacon. This should not be done in the inits. If * ucode needs to set up a beacon for testing, the * test routines should write it down, not expect the * inits to populate a bogus beacon. */ if (BRCMS_PHY_11N_CAP(wlc->band)) brcms_b_write_shm(wlc->hw, M_BCN_TXTSF_OFFSET, 0); } } else { /* disable an active IBSS if we are not on the home channel */ } } static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate, u8 basic_rate) { u8 phy_rate, index; u8 basic_phy_rate, basic_index; u16 dir_table, basic_table; u16 basic_ptr; /* Shared memory address for the table we are reading */ dir_table = is_ofdm_rate(basic_rate) ? M_RT_DIRMAP_A : M_RT_DIRMAP_B; /* Shared memory address for the table we are writing */ basic_table = is_ofdm_rate(rate) ? M_RT_BBRSMAP_A : M_RT_BBRSMAP_B; /* * for a given rate, the LS-nibble of the PLCP SIGNAL field is * the index into the rate table. */ phy_rate = rate_info[rate] & BRCMS_RATE_MASK; basic_phy_rate = rate_info[basic_rate] & BRCMS_RATE_MASK; index = phy_rate & 0xf; basic_index = basic_phy_rate & 0xf; /* Find the SHM pointer to the ACK rate entry by looking in the * Direct-map Table */ basic_ptr = brcms_b_read_shm(wlc->hw, (dir_table + basic_index * 2)); /* Update the SHM BSS-basic-rate-set mapping table with the pointer * to the correct basic rate for the given incoming rate */ brcms_b_write_shm(wlc->hw, (basic_table + index * 2), basic_ptr); } static const struct brcms_c_rateset * brcms_c_rateset_get_hwrs(struct brcms_c_info *wlc) { const struct brcms_c_rateset *rs_dflt; if (BRCMS_PHY_11N_CAP(wlc->band)) { if (wlc->band->bandtype == BRCM_BAND_5G) rs_dflt = &ofdm_mimo_rates; else rs_dflt = &cck_ofdm_mimo_rates; } else if (wlc->band->gmode) rs_dflt = &cck_ofdm_rates; else rs_dflt = &cck_rates; return rs_dflt; } static void brcms_c_set_ratetable(struct brcms_c_info *wlc) { const struct brcms_c_rateset *rs_dflt; struct brcms_c_rateset rs; u8 rate, basic_rate; uint i; rs_dflt = brcms_c_rateset_get_hwrs(wlc); brcms_c_rateset_copy(rs_dflt, &rs); brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams); /* walk the phy rate table and update SHM basic rate lookup table */ for (i = 0; i < rs.count; i++) { rate = rs.rates[i] & BRCMS_RATE_MASK; /* for a given rate brcms_basic_rate returns the rate at * which a response ACK/CTS should be sent. */ basic_rate = brcms_basic_rate(wlc, rate); if (basic_rate == 0) /* This should only happen if we are using a * restricted rateset. */ basic_rate = rs.rates[0] & BRCMS_RATE_MASK; brcms_c_write_rate_shm(wlc, rate, basic_rate); } } /* band-specific init */ static void brcms_c_bsinit(struct brcms_c_info *wlc) { BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc->pub->unit, wlc->band->bandunit); /* write ucode ACK/CTS rate table */ brcms_c_set_ratetable(wlc); /* update some band specific mac configuration */ brcms_c_ucode_mac_upd(wlc); /* init antenna selection */ brcms_c_antsel_init(wlc->asi); } /* formula: IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */ static int brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM, bool writeToShm) { int idle_busy_ratio_x_16 = 0; uint offset = isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM : M_TX_IDLE_BUSY_RATIO_X_16_CCK; if (duty_cycle > 100 || duty_cycle < 0) { wiphy_err(wlc->wiphy, "wl%d: duty cycle value off limit\n", wlc->pub->unit); return -EINVAL; } if (duty_cycle) idle_busy_ratio_x_16 = (100 - duty_cycle) * 16 / duty_cycle; /* Only write to shared memory when wl is up */ if (writeToShm) brcms_b_write_shm(wlc->hw, offset, (u16) idle_busy_ratio_x_16); if (isOFDM) wlc->tx_duty_cycle_ofdm = (u16) duty_cycle; else wlc->tx_duty_cycle_cck = (u16) duty_cycle; return 0; } /* * Initialize the base precedence map for dequeueing * from txq based on WME settings */ static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc) { wlc->tx_prec_map = BRCMS_PREC_BMP_ALL; memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16)); wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK; wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE; wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI; wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO; } static void brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc, struct brcms_txq_info *qi, bool on, int prio) { /* transmit flowcontrol is not yet implemented */ } static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc) { struct brcms_txq_info *qi; for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) { if (qi->stopped) { brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO); qi->stopped = 0; } } } /* push sw hps and wake state through hardware */ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc) { u32 v1, v2; bool hps; bool awake_before; hps = brcms_c_ps_allowed(wlc); BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps); v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol)); v2 = MCTL_WAKE; if (hps) v2 |= MCTL_HPS; brcms_b_mctrl(wlc->hw, MCTL_WAKE | MCTL_HPS, v2); awake_before = ((v1 & MCTL_WAKE) || ((v1 & MCTL_HPS) == 0)); if (!awake_before) brcms_b_wait_for_wake(wlc->hw); } /* * Write this BSS config's MAC address to core. * Updates RXE match engine. */ static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg) { int err = 0; struct brcms_c_info *wlc = bsscfg->wlc; /* enter the MAC addr into the RXE match registers */ brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, bsscfg->cur_etheraddr); brcms_c_ampdu_macaddr_upd(wlc); return err; } /* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl). * Updates RXE match engine. */ static void brcms_c_set_bssid(struct brcms_bss_cfg *bsscfg) { /* we need to update BSSID in RXE match registers */ brcms_c_set_addrmatch(bsscfg->wlc, RCM_BSSID_OFFSET, bsscfg->BSSID); } static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot) { wlc_hw->shortslot = shortslot; if (wlc_hw->band->bandtype == BRCM_BAND_2G && wlc_hw->up) { brcms_c_suspend_mac_and_wait(wlc_hw->wlc); brcms_b_update_slot_timing(wlc_hw, shortslot); brcms_c_enable_mac(wlc_hw->wlc); } } /* * Suspend the the MAC and update the slot timing * for standard 11b/g (20us slots) or shortslot 11g (9us slots). */ static void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot) { /* use the override if it is set */ if (wlc->shortslot_override != BRCMS_SHORTSLOT_AUTO) shortslot = (wlc->shortslot_override == BRCMS_SHORTSLOT_ON); if (wlc->shortslot == shortslot) return; wlc->shortslot = shortslot; brcms_b_set_shortslot(wlc->hw, shortslot); } static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec) { if (wlc->home_chanspec != chanspec) { wlc->home_chanspec = chanspec; if (wlc->bsscfg->associated) wlc->bsscfg->current_bss->chanspec = chanspec; } } void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec, bool mute_tx, struct txpwr_limits *txpwr) { uint bandunit; BCMMSG(wlc_hw->wlc->wiphy, "wl%d: 0x%x\n", wlc_hw->unit, chanspec); wlc_hw->chanspec = chanspec; /* Switch bands if necessary */ if (wlc_hw->_nbands > 1) { bandunit = chspec_bandunit(chanspec); if (wlc_hw->band->bandunit != bandunit) { /* brcms_b_setband disables other bandunit, * use light band switch if not up yet */ if (wlc_hw->up) { wlc_phy_chanspec_radio_set(wlc_hw-> bandstate[bandunit]-> pi, chanspec); brcms_b_setband(wlc_hw, bandunit, chanspec); } else { brcms_c_setxband(wlc_hw, bandunit); } } } wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx); if (!wlc_hw->up) { if (wlc_hw->clk) wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec); wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec); } else { wlc_phy_chanspec_set(wlc_hw->band->pi, chanspec); wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec); /* Update muting of the channel */ brcms_b_mute(wlc_hw, mute_tx); } } /* switch to and initialize new band */ static void brcms_c_setband(struct brcms_c_info *wlc, uint bandunit) { wlc->band = wlc->bandstate[bandunit]; if (!wlc->pub->up) return; /* wait for at least one beacon before entering sleeping state */ brcms_c_set_ps_ctrl(wlc); /* band-specific initializations */ brcms_c_bsinit(wlc); } static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec) { uint bandunit; bool switchband = false; u16 old_chanspec = wlc->chanspec; if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) { wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n", wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)); return; } /* Switch bands if necessary */ if (wlc->pub->_nbands > 1) { bandunit = chspec_bandunit(chanspec); if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) { switchband = true; if (wlc->bandlocked) { wiphy_err(wlc->wiphy, "wl%d: %s: chspec %d " "band is locked!\n", wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)); return; } /* * should the setband call come after the * brcms_b_chanspec() ? if the setband updates * (brcms_c_bsinit) use low level calls to inspect and * set state, the state inspected may be from the wrong * band, or the following brcms_b_set_chanspec() may * undo the work. */ brcms_c_setband(wlc, bandunit); } } /* sync up phy/radio chanspec */ brcms_c_set_phy_chanspec(wlc, chanspec); /* init antenna selection */ if (brcms_chspec_bw(old_chanspec) != brcms_chspec_bw(chanspec)) { brcms_c_antsel_init(wlc->asi); /* Fix the hardware rateset based on bw. * Mainly add MCS32 for 40Mhz, remove MCS 32 for 20Mhz */ brcms_c_rateset_bw_mcs_filter(&wlc->band->hw_rateset, wlc->band->mimo_cap_40 ? brcms_chspec_bw(chanspec) : 0); } /* update some mac configuration since chanspec changed */ brcms_c_ucode_mac_upd(wlc); } /* * This function changes the phytxctl for beacon based on current * beacon ratespec AND txant setting as per this table: * ratespec CCK ant = wlc->stf->txant * OFDM ant = 3 */ void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rspec) { u16 phyctl; u16 phytxant = wlc->stf->phytxant; u16 mask = PHY_TXC_ANT_MASK; /* for non-siso rates or default setting, use the available chains */ if (BRCMS_PHY_11N_CAP(wlc->band)) phytxant = brcms_c_stf_phytxchain_sel(wlc, bcn_rspec); phyctl = brcms_b_read_shm(wlc->hw, M_BCN_PCTLWD); phyctl = (phyctl & ~mask) | phytxant; brcms_b_write_shm(wlc->hw, M_BCN_PCTLWD, phyctl); } /* * centralized protection config change function to simplify debugging, no * consistency checking this should be called only on changes to avoid overhead * in periodic function */ void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val) { BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val); switch (idx) { case BRCMS_PROT_G_SPEC: wlc->protection->_g = (bool) val; break; case BRCMS_PROT_G_OVR: wlc->protection->g_override = (s8) val; break; case BRCMS_PROT_G_USER: wlc->protection->gmode_user = (u8) val; break; case BRCMS_PROT_OVERLAP: wlc->protection->overlap = (s8) val; break; case BRCMS_PROT_N_USER: wlc->protection->nmode_user = (s8) val; break; case BRCMS_PROT_N_CFG: wlc->protection->n_cfg = (s8) val; break; case BRCMS_PROT_N_CFG_OVR: wlc->protection->n_cfg_override = (s8) val; break; case BRCMS_PROT_N_NONGF: wlc->protection->nongf = (bool) val; break; case BRCMS_PROT_N_NONGF_OVR: wlc->protection->nongf_override = (s8) val; break; case BRCMS_PROT_N_PAM_OVR: wlc->protection->n_pam_override = (s8) val; break; case BRCMS_PROT_N_OBSS: wlc->protection->n_obss = (bool) val; break; default: break; } } static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val) { if (wlc->pub->up) { brcms_c_update_beacon(wlc); brcms_c_update_probe_resp(wlc, true); } } static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val) { wlc->stf->ldpc = val; if (wlc->pub->up) { brcms_c_update_beacon(wlc); brcms_c_update_probe_resp(wlc, true); wlc_phy_ldpc_override_set(wlc->band->pi, (val ? true : false)); } } void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci, const struct ieee80211_tx_queue_params *params, bool suspend) { int i; struct shm_acparams acp_shm; u16 *shm_entry; /* Only apply params if the core is out of reset and has clocks */ if (!wlc->clk) { wiphy_err(wlc->wiphy, "wl%d: %s : no-clock\n", wlc->pub->unit, __func__); return; } memset((char *)&acp_shm, 0, sizeof(struct shm_acparams)); /* fill in shm ac params struct */ acp_shm.txop = params->txop; /* convert from units of 32us to us for ucode */ wlc->edcf_txop[aci & 0x3] = acp_shm.txop = EDCF_TXOP2USEC(acp_shm.txop); acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK); if (aci == IEEE80211_AC_VI && acp_shm.txop == 0 && acp_shm.aifs < EDCF_AIFSN_MAX) acp_shm.aifs++; if (acp_shm.aifs < EDCF_AIFSN_MIN || acp_shm.aifs > EDCF_AIFSN_MAX) { wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad " "aifs %d\n", wlc->pub->unit, acp_shm.aifs); } else { acp_shm.cwmin = params->cw_min; acp_shm.cwmax = params->cw_max; acp_shm.cwcur = acp_shm.cwmin; acp_shm.bslots = bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) & acp_shm.cwcur; acp_shm.reggap = acp_shm.bslots + acp_shm.aifs; /* Indicate the new params to the ucode */ acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO + wme_ac2fifo[aci] * M_EDCF_QLEN + M_EDCF_STATUS_OFF)); acp_shm.status |= WME_STATUS_NEWAC; /* Fill in shm acparam table */ shm_entry = (u16 *) &acp_shm; for (i = 0; i < (int)sizeof(struct shm_acparams); i += 2) brcms_b_write_shm(wlc->hw, M_EDCF_QINFO + wme_ac2fifo[aci] * M_EDCF_QLEN + i, *shm_entry++); } if (suspend) { brcms_c_suspend_mac_and_wait(wlc); brcms_c_enable_mac(wlc); } } static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) { u16 aci; int i_ac; struct ieee80211_tx_queue_params txq_pars; static const struct edcf_acparam default_edcf_acparams[] = { {EDCF_AC_BE_ACI_STA, EDCF_AC_BE_ECW_STA, EDCF_AC_BE_TXOP_STA}, {EDCF_AC_BK_ACI_STA, EDCF_AC_BK_ECW_STA, EDCF_AC_BK_TXOP_STA}, {EDCF_AC_VI_ACI_STA, EDCF_AC_VI_ECW_STA, EDCF_AC_VI_TXOP_STA}, {EDCF_AC_VO_ACI_STA, EDCF_AC_VO_ECW_STA, EDCF_AC_VO_TXOP_STA} }; /* ucode needs these parameters during its initialization */ const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0]; for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) { /* find out which ac this set of params applies to */ aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT; /* fill in shm ac params struct */ txq_pars.txop = edcf_acp->TXOP; txq_pars.aifs = edcf_acp->ACI; /* CWmin = 2^(ECWmin) - 1 */ txq_pars.cw_min = EDCF_ECW2CW(edcf_acp->ECW & EDCF_ECWMIN_MASK); /* CWmax = 2^(ECWmax) - 1 */ txq_pars.cw_max = EDCF_ECW2CW((edcf_acp->ECW & EDCF_ECWMAX_MASK) >> EDCF_ECWMAX_SHIFT); brcms_c_wme_setparams(wlc, aci, &txq_pars, suspend); } if (suspend) { brcms_c_suspend_mac_and_wait(wlc); brcms_c_enable_mac(wlc); } } static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc) { /* Don't start the timer if HWRADIO feature is disabled */ if (wlc->radio_monitor) return; wlc->radio_monitor = true; brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_RADIO_MON); brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true); } static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc) { if (!wlc->radio_monitor) return true; wlc->radio_monitor = false; brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_RADIO_MON); return brcms_del_timer(wlc->radio_timer); } /* read hwdisable state and propagate to wlc flag */ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc) { if (wlc->pub->hw_off) return; if (brcms_b_radio_read_hwdisabled(wlc->hw)) mboolset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE); else mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE); } /* update hwradio status and return it */ bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc) { brcms_c_radio_hwdisable_upd(wlc); return mboolisset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE) ? true : false; } /* periodical query hw radio button while driver is "down" */ static void brcms_c_radio_timer(void *arg) { struct brcms_c_info *wlc = (struct brcms_c_info *) arg; if (brcms_deviceremoved(wlc)) { wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit, __func__); brcms_down(wlc->wl); return; } brcms_c_radio_hwdisable_upd(wlc); } /* common low-level watchdog code */ static void brcms_b_watchdog(void *arg) { struct brcms_c_info *wlc = (struct brcms_c_info *) arg; struct brcms_hardware *wlc_hw = wlc->hw; BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); if (!wlc_hw->up) return; /* increment second count */ wlc_hw->now++; /* Check for FIFO error interrupts */ brcms_b_fifoerrors(wlc_hw); /* make sure RX dma has buffers */ dma_rxfill(wlc->hw->di[RX_FIFO]); wlc_phy_watchdog(wlc_hw->band->pi); } /* common watchdog code */ static void brcms_c_watchdog(void *arg) { struct brcms_c_info *wlc = (struct brcms_c_info *) arg; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); if (!wlc->pub->up) return; if (brcms_deviceremoved(wlc)) { wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit, __func__); brcms_down(wlc->wl); return; } /* increment second count */ wlc->pub->now++; brcms_c_radio_hwdisable_upd(wlc); /* if radio is disable, driver may be down, quit here */ if (wlc->pub->radio_disabled) return; brcms_b_watchdog(wlc); /* * occasionally sample mac stat counters to * detect 16-bit counter wrap */ if ((wlc->pub->now % SW_TIMER_MAC_STAT_UPD) == 0) brcms_c_statsupd(wlc); if (BRCMS_ISNPHY(wlc->band) && ((wlc->pub->now - wlc->tempsense_lasttime) >= BRCMS_TEMPSENSE_PERIOD)) { wlc->tempsense_lasttime = wlc->pub->now; brcms_c_tempsense_upd(wlc); } } static void brcms_c_watchdog_by_timer(void *arg) { brcms_c_watchdog(arg); } static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit) { wlc->wdtimer = brcms_init_timer(wlc->wl, brcms_c_watchdog_by_timer, wlc, "watchdog"); if (!wlc->wdtimer) { wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for wdtimer " "failed\n", unit); goto fail; } wlc->radio_timer = brcms_init_timer(wlc->wl, brcms_c_radio_timer, wlc, "radio"); if (!wlc->radio_timer) { wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for radio_timer " "failed\n", unit); goto fail; } return true; fail: return false; } /* * Initialize brcms_c_info default values ... * may get overrides later in this function */ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit) { int i; /* Save our copy of the chanspec */ wlc->chanspec = ch20mhz_chspec(1); /* various 802.11g modes */ wlc->shortslot = false; wlc->shortslot_override = BRCMS_SHORTSLOT_AUTO; brcms_c_protection_upd(wlc, BRCMS_PROT_G_OVR, BRCMS_PROTECTION_AUTO); brcms_c_protection_upd(wlc, BRCMS_PROT_G_SPEC, false); brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG_OVR, BRCMS_PROTECTION_AUTO); brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG, BRCMS_N_PROTECTION_OFF); brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF_OVR, BRCMS_PROTECTION_AUTO); brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF, false); brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, AUTO); brcms_c_protection_upd(wlc, BRCMS_PROT_OVERLAP, BRCMS_PROTECTION_CTL_OVERLAP); /* 802.11g draft 4.0 NonERP elt advertisement */ wlc->include_legacy_erp = true; wlc->stf->ant_rx_ovr = ANT_RX_DIV_DEF; wlc->stf->txant = ANT_TX_DEF; wlc->prb_resp_timeout = BRCMS_PRB_RESP_TIMEOUT; wlc->usr_fragthresh = DOT11_DEFAULT_FRAG_LEN; for (i = 0; i < NFIFO; i++) wlc->fragthresh[i] = DOT11_DEFAULT_FRAG_LEN; wlc->RTSThresh = DOT11_DEFAULT_RTS_LEN; /* default rate fallback retry limits */ wlc->SFBL = RETRY_SHORT_FB; wlc->LFBL = RETRY_LONG_FB; /* default mac retry limits */ wlc->SRL = RETRY_SHORT_DEF; wlc->LRL = RETRY_LONG_DEF; /* WME QoS mode is Auto by default */ wlc->pub->_ampdu = AMPDU_AGG_HOST; wlc->pub->bcmerror = 0; } static uint brcms_c_attach_module(struct brcms_c_info *wlc) { uint err = 0; uint unit; unit = wlc->pub->unit; wlc->asi = brcms_c_antsel_attach(wlc); if (wlc->asi == NULL) { wiphy_err(wlc->wiphy, "wl%d: attach: antsel_attach " "failed\n", unit); err = 44; goto fail; } wlc->ampdu = brcms_c_ampdu_attach(wlc); if (wlc->ampdu == NULL) { wiphy_err(wlc->wiphy, "wl%d: attach: ampdu_attach " "failed\n", unit); err = 50; goto fail; } if ((brcms_c_stf_attach(wlc) != 0)) { wiphy_err(wlc->wiphy, "wl%d: attach: stf_attach " "failed\n", unit); err = 68; goto fail; } fail: return err; } struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc) { return wlc->pub; } /* low level attach * run backplane attach, init nvram * run phy attach * initialize software state for each core and band * put the whole chip in reset(driver down state), no clock */ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, uint unit, bool piomode) { struct brcms_hardware *wlc_hw; char *macaddr = NULL; uint err = 0; uint j; bool wme = false; struct shared_phy_params sha_params; struct wiphy *wiphy = wlc->wiphy; struct pci_dev *pcidev = core->bus->host_pci; BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, pcidev->vendor, pcidev->device); wme = true; wlc_hw = wlc->hw; wlc_hw->wlc = wlc; wlc_hw->unit = unit; wlc_hw->band = wlc_hw->bandstate[0]; wlc_hw->_piomode = piomode; /* populate struct brcms_hardware with default values */ brcms_b_info_init(wlc_hw); /* * Do the hardware portion of the attach. Also initialize software * state that depends on the particular hardware we are running. */ wlc_hw->sih = ai_attach(core->bus); if (wlc_hw->sih == NULL) { wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n", unit); err = 11; goto fail; } /* verify again the device is supported */ if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported " "vendor/device (0x%x/0x%x)\n", unit, pcidev->vendor, pcidev->device); err = 12; goto fail; } wlc_hw->vendorid = pcidev->vendor; wlc_hw->deviceid = pcidev->device; wlc_hw->d11core = core; wlc_hw->corerev = core->id.rev; /* validate chip, chiprev and corerev */ if (!brcms_c_isgoodchip(wlc_hw)) { err = 13; goto fail; } /* initialize power control registers */ ai_clkctl_init(wlc_hw->sih); /* request fastclock and force fastclock for the rest of attach * bring the d11 core out of reset. * For PMU chips, the first wlc_clkctl_clk is no-op since core-clk * is still false; But it will be called again inside wlc_corereset, * after d11 is out of reset. */ brcms_b_clkctl_clk(wlc_hw, CLK_FAST); brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); if (!brcms_b_validate_chip_access(wlc_hw)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: validate_chip_access " "failed\n", unit); err = 14; goto fail; } /* get the board rev, used just below */ j = getintvar(wlc_hw->sih, BRCMS_SROM_BOARDREV); /* promote srom boardrev of 0xFF to 1 */ if (j == BOARDREV_PROMOTABLE) j = BOARDREV_PROMOTED; wlc_hw->boardrev = (u16) j; if (!brcms_c_validboardtype(wlc_hw)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom " "board type (0x%x)" " or revision level (0x%x)\n", unit, ai_get_boardtype(wlc_hw->sih), wlc_hw->boardrev); err = 15; goto fail; } wlc_hw->sromrev = (u8) getintvar(wlc_hw->sih, BRCMS_SROM_REV); wlc_hw->boardflags = (u32) getintvar(wlc_hw->sih, BRCMS_SROM_BOARDFLAGS); wlc_hw->boardflags2 = (u32) getintvar(wlc_hw->sih, BRCMS_SROM_BOARDFLAGS2); if (wlc_hw->boardflags & BFL_NOPLLDOWN) brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED); /* check device id(srom, nvram etc.) to set bands */ if (wlc_hw->deviceid == BCM43224_D11N_ID || wlc_hw->deviceid == BCM43224_D11N_ID_VEN1) /* Dualband boards */ wlc_hw->_nbands = 2; else wlc_hw->_nbands = 1; if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) wlc_hw->_nbands = 1; /* BMAC_NOTE: remove init of pub values when brcms_c_attach() * unconditionally does the init of these values */ wlc->vendorid = wlc_hw->vendorid; wlc->deviceid = wlc_hw->deviceid; wlc->pub->sih = wlc_hw->sih; wlc->pub->corerev = wlc_hw->corerev; wlc->pub->sromrev = wlc_hw->sromrev; wlc->pub->boardrev = wlc_hw->boardrev; wlc->pub->boardflags = wlc_hw->boardflags; wlc->pub->boardflags2 = wlc_hw->boardflags2; wlc->pub->_nbands = wlc_hw->_nbands; wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc); if (wlc_hw->physhim == NULL) { wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_shim_attach " "failed\n", unit); err = 25; goto fail; } /* pass all the parameters to wlc_phy_shared_attach in one struct */ sha_params.sih = wlc_hw->sih; sha_params.physhim = wlc_hw->physhim; sha_params.unit = unit; sha_params.corerev = wlc_hw->corerev; sha_params.vid = wlc_hw->vendorid; sha_params.did = wlc_hw->deviceid; sha_params.chip = ai_get_chip_id(wlc_hw->sih); sha_params.chiprev = ai_get_chiprev(wlc_hw->sih); sha_params.chippkg = ai_get_chippkg(wlc_hw->sih); sha_params.sromrev = wlc_hw->sromrev; sha_params.boardtype = ai_get_boardtype(wlc_hw->sih); sha_params.boardrev = wlc_hw->boardrev; sha_params.boardflags = wlc_hw->boardflags; sha_params.boardflags2 = wlc_hw->boardflags2; /* alloc and save pointer to shared phy state area */ wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params); if (!wlc_hw->phy_sh) { err = 16; goto fail; } /* initialize software state for each core and band */ for (j = 0; j < wlc_hw->_nbands; j++) { /* * band0 is always 2.4Ghz * band1, if present, is 5Ghz */ brcms_c_setxband(wlc_hw, j); wlc_hw->band->bandunit = j; wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; wlc->band->bandunit = j; wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; wlc->core->coreidx = core->core_index; wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap)); wlc_hw->machwcap_backup = wlc_hw->machwcap; /* init tx fifo size */ wlc_hw->xmtfifo_sz = xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)]; /* Get a phy for this band */ wlc_hw->band->pi = wlc_phy_attach(wlc_hw->phy_sh, core, wlc_hw->band->bandtype, wlc->wiphy); if (wlc_hw->band->pi == NULL) { wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_" "attach failed\n", unit); err = 17; goto fail; } wlc_phy_machwcap_set(wlc_hw->band->pi, wlc_hw->machwcap); wlc_phy_get_phyversion(wlc_hw->band->pi, &wlc_hw->band->phytype, &wlc_hw->band->phyrev, &wlc_hw->band->radioid, &wlc_hw->band->radiorev); wlc_hw->band->abgphy_encore = wlc_phy_get_encore(wlc_hw->band->pi); wlc->band->abgphy_encore = wlc_phy_get_encore(wlc_hw->band->pi); wlc_hw->band->core_flags = wlc_phy_get_coreflags(wlc_hw->band->pi); /* verify good phy_type & supported phy revision */ if (BRCMS_ISNPHY(wlc_hw->band)) { if (NCONF_HAS(wlc_hw->band->phyrev)) goto good_phy; else goto bad_phy; } else if (BRCMS_ISLCNPHY(wlc_hw->band)) { if (LCNCONF_HAS(wlc_hw->band->phyrev)) goto good_phy; else goto bad_phy; } else { bad_phy: wiphy_err(wiphy, "wl%d: brcms_b_attach: unsupported " "phy type/rev (%d/%d)\n", unit, wlc_hw->band->phytype, wlc_hw->band->phyrev); err = 18; goto fail; } good_phy: /* * BMAC_NOTE: wlc->band->pi should not be set below and should * be done in the high level attach. However we can not make * that change until all low level access is changed to * wlc_hw->band->pi. Instead do the wlc->band->pi init below, * keeping wlc_hw->band->pi as well for incremental update of * low level fns, and cut over low only init when all fns * updated. */ wlc->band->pi = wlc_hw->band->pi; wlc->band->phytype = wlc_hw->band->phytype; wlc->band->phyrev = wlc_hw->band->phyrev; wlc->band->radioid = wlc_hw->band->radioid; wlc->band->radiorev = wlc_hw->band->radiorev; /* default contention windows size limits */ wlc_hw->band->CWmin = APHY_CWMIN; wlc_hw->band->CWmax = PHY_CWMAX; if (!brcms_b_attach_dmapio(wlc, j, wme)) { err = 19; goto fail; } } /* disable core to match driver "down" state */ brcms_c_coredisable(wlc_hw); /* Match driver "down" state */ ai_pci_down(wlc_hw->sih); /* turn off pll and xtal to match driver "down" state */ brcms_b_xtal(wlc_hw, OFF); /* ******************************************************************* * The hardware is in the DOWN state at this point. D11 core * or cores are in reset with clocks off, and the board PLLs * are off if possible. * * Beyond this point, wlc->sbclk == false and chip registers * should not be touched. ********************************************************************* */ /* init etheraddr state variables */ macaddr = brcms_c_get_macaddr(wlc_hw); if (macaddr == NULL) { wiphy_err(wiphy, "wl%d: brcms_b_attach: macaddr not found\n", unit); err = 21; goto fail; } if (!mac_pton(macaddr, wlc_hw->etheraddr) || is_broadcast_ether_addr(wlc_hw->etheraddr) || is_zero_ether_addr(wlc_hw->etheraddr)) { wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr %s\n", unit, macaddr); err = 22; goto fail; } BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n", wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih), macaddr); return err; fail: wiphy_err(wiphy, "wl%d: brcms_b_attach: failed with err %d\n", unit, err); return err; } static void brcms_c_attach_antgain_init(struct brcms_c_info *wlc) { uint unit; unit = wlc->pub->unit; if ((wlc->band->antgain == -1) && (wlc->pub->sromrev == 1)) { /* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */ wlc->band->antgain = 8; } else if (wlc->band->antgain == -1) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in" " srom, using 2dB\n", unit, __func__); wlc->band->antgain = 8; } else { s8 gain, fract; /* Older sroms specified gain in whole dbm only. In order * be able to specify qdbm granularity and remain backward * compatible the whole dbms are now encoded in only * low 6 bits and remaining qdbms are encoded in the hi 2 bits. * 6 bit signed number ranges from -32 - 31. * * Examples: * 0x1 = 1 db, * 0xc1 = 1.75 db (1 + 3 quarters), * 0x3f = -1 (-1 + 0 quarters), * 0x7f = -.75 (-1 + 1 quarters) = -3 qdbm. * 0xbf = -.50 (-1 + 2 quarters) = -2 qdbm. */ gain = wlc->band->antgain & 0x3f; gain <<= 2; /* Sign extend */ gain >>= 2; fract = (wlc->band->antgain & 0xc0) >> 6; wlc->band->antgain = 4 * gain + fract; } } static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc) { int aa; uint unit; int bandtype; struct si_pub *sih = wlc->hw->sih; unit = wlc->pub->unit; bandtype = wlc->band->bandtype; /* get antennas available */ if (bandtype == BRCM_BAND_5G) aa = (s8) getintvar(sih, BRCMS_SROM_AA5G); else aa = (s8) getintvar(sih, BRCMS_SROM_AA2G); if ((aa < 1) || (aa > 15)) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in" " srom (0x%x), using 3\n", unit, __func__, aa); aa = 3; } /* reset the defaults if we have a single antenna */ if (aa == 1) { wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_0; wlc->stf->txant = ANT_TX_FORCE_0; } else if (aa == 2) { wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_1; wlc->stf->txant = ANT_TX_FORCE_1; } else { } /* Compute Antenna Gain */ if (bandtype == BRCM_BAND_5G) wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG1); else wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG0); brcms_c_attach_antgain_init(wlc); return true; } static void brcms_c_bss_default_init(struct brcms_c_info *wlc) { u16 chanspec; struct brcms_band *band; struct brcms_bss_info *bi = wlc->default_bss; /* init default and target BSS with some sane initial values */ memset((char *)(bi), 0, sizeof(struct brcms_bss_info)); bi->beacon_period = BEACON_INTERVAL_DEFAULT; /* fill the default channel as the first valid channel * starting from the 2G channels */ chanspec = ch20mhz_chspec(1); wlc->home_chanspec = bi->chanspec = chanspec; /* find the band of our default channel */ band = wlc->band; if (wlc->pub->_nbands > 1 && band->bandunit != chspec_bandunit(chanspec)) band = wlc->bandstate[OTHERBANDUNIT(wlc)]; /* init bss rates to the band specific default rate set */ brcms_c_rateset_default(&bi->rateset, NULL, band->phytype, band->bandtype, false, BRCMS_RATE_MASK_FULL, (bool) (wlc->pub->_n_enab & SUPPORT_11N), brcms_chspec_bw(chanspec), wlc->stf->txstreams); if (wlc->pub->_n_enab & SUPPORT_11N) bi->flags |= BRCMS_BSS_HT; } static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc) { struct brcms_txq_info *qi, *p; qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC); if (qi != NULL) { /* * Have enough room for control packets along with HI watermark * Also, add room to txq for total psq packets if all the SCBs * leave PS mode. The watermark for flowcontrol to OS packets * will remain the same */ brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT, 2 * BRCMS_DATAHIWAT + PKTQ_LEN_DEFAULT); /* add this queue to the the global list */ p = wlc->tx_queues; if (p == NULL) { wlc->tx_queues = qi; } else { while (p->next != NULL) p = p->next; p->next = qi; } } return qi; } static void brcms_c_txq_free(struct brcms_c_info *wlc, struct brcms_txq_info *qi) { struct brcms_txq_info *p; if (qi == NULL) return; /* remove the queue from the linked list */ p = wlc->tx_queues; if (p == qi) wlc->tx_queues = p->next; else { while (p != NULL && p->next != qi) p = p->next; if (p != NULL) p->next = p->next->next; } kfree(qi); } static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap) { uint i; struct brcms_band *band; for (i = 0; i < wlc->pub->_nbands; i++) { band = wlc->bandstate[i]; if (band->bandtype == BRCM_BAND_5G) { if ((bwcap == BRCMS_N_BW_40ALL) || (bwcap == BRCMS_N_BW_20IN2G_40IN5G)) band->mimo_cap_40 = true; else band->mimo_cap_40 = false; } else { if (bwcap == BRCMS_N_BW_40ALL) band->mimo_cap_40 = true; else band->mimo_cap_40 = false; } } } static void brcms_c_timers_deinit(struct brcms_c_info *wlc) { /* free timer state */ if (wlc->wdtimer) { brcms_free_timer(wlc->wdtimer); wlc->wdtimer = NULL; } if (wlc->radio_timer) { brcms_free_timer(wlc->radio_timer); wlc->radio_timer = NULL; } } static void brcms_c_detach_module(struct brcms_c_info *wlc) { if (wlc->asi) { brcms_c_antsel_detach(wlc->asi); wlc->asi = NULL; } if (wlc->ampdu) { brcms_c_ampdu_detach(wlc->ampdu); wlc->ampdu = NULL; } brcms_c_stf_detach(wlc); } /* * low level detach */ static int brcms_b_detach(struct brcms_c_info *wlc) { uint i; struct brcms_hw_band *band; struct brcms_hardware *wlc_hw = wlc->hw; int callbacks; callbacks = 0; if (wlc_hw->sih) { /* * detach interrupt sync mechanism since interrupt is disabled * and per-port interrupt object may has been freed. this must * be done before sb core switch */ ai_pci_sleep(wlc_hw->sih); } brcms_b_detach_dmapio(wlc_hw); band = wlc_hw->band; for (i = 0; i < wlc_hw->_nbands; i++) { if (band->pi) { /* Detach this band's phy */ wlc_phy_detach(band->pi); band->pi = NULL; } band = wlc_hw->bandstate[OTHERBANDUNIT(wlc)]; } /* Free shared phy state */ kfree(wlc_hw->phy_sh); wlc_phy_shim_detach(wlc_hw->physhim); if (wlc_hw->sih) { ai_detach(wlc_hw->sih); wlc_hw->sih = NULL; } return callbacks; } /* * Return a count of the number of driver callbacks still pending. * * General policy is that brcms_c_detach can only dealloc/free software states. * It can NOT touch hardware registers since the d11core may be in reset and * clock may not be available. * One exception is sb register access, which is possible if crystal is turned * on after "down" state, driver should avoid software timer with the exception * of radio_monitor. */ uint brcms_c_detach(struct brcms_c_info *wlc) { uint callbacks = 0; if (wlc == NULL) return 0; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); callbacks += brcms_b_detach(wlc); /* delete software timers */ if (!brcms_c_radio_monitor_stop(wlc)) callbacks++; brcms_c_channel_mgr_detach(wlc->cmi); brcms_c_timers_deinit(wlc); brcms_c_detach_module(wlc); while (wlc->tx_queues != NULL) brcms_c_txq_free(wlc, wlc->tx_queues); brcms_c_detach_mfree(wlc); return callbacks; } /* update state that depends on the current value of "ap" */ static void brcms_c_ap_upd(struct brcms_c_info *wlc) { /* STA-BSS; short capable */ wlc->PLCPHdr_override = BRCMS_PLCP_SHORT; } /* Initialize just the hardware when coming out of POR or S3/S5 system states */ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw) { if (wlc_hw->wlc->pub->hw_up) return; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); /* * Enable pll and xtal, initialize the power control registers, * and force fastclock for the remainder of brcms_c_up(). */ brcms_b_xtal(wlc_hw, ON); ai_clkctl_init(wlc_hw->sih); brcms_b_clkctl_clk(wlc_hw, CLK_FAST); ai_pci_fixcfg(wlc_hw->sih); /* * TODO: test suspend/resume * * AI chip doesn't restore bar0win2 on * hibernation/resume, need sw fixup */ /* * Inform phy that a POR reset has occurred so * it does a complete phy init */ wlc_phy_por_inform(wlc_hw->band->pi); wlc_hw->ucode_loaded = false; wlc_hw->wlc->pub->hw_up = true; if ((wlc_hw->boardflags & BFL_FEM) && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { if (! (wlc_hw->boardrev >= 0x1250 && (wlc_hw->boardflags & BFL_FEM_BT))) ai_epa_4313war(wlc_hw->sih); } } static int brcms_b_up_prep(struct brcms_hardware *wlc_hw) { uint coremask; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); /* * Enable pll and xtal, initialize the power control registers, * and force fastclock for the remainder of brcms_c_up(). */ brcms_b_xtal(wlc_hw, ON); ai_clkctl_init(wlc_hw->sih); brcms_b_clkctl_clk(wlc_hw, CLK_FAST); /* * Configure pci/pcmcia here instead of in brcms_c_attach() * to allow mfg hotswap: down, hotswap (chip power cycle), up. */ coremask = (1 << wlc_hw->wlc->core->coreidx); ai_pci_setup(wlc_hw->sih, coremask); /* * Need to read the hwradio status here to cover the case where the * system is loaded with the hw radio disabled. We do not want to * bring the driver up in this case. */ if (brcms_b_radio_read_hwdisabled(wlc_hw)) { /* put SB PCI in down state again */ ai_pci_down(wlc_hw->sih); brcms_b_xtal(wlc_hw, OFF); return -ENOMEDIUM; } ai_pci_up(wlc_hw->sih); /* reset the d11 core */ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); return 0; } static int brcms_b_up_finish(struct brcms_hardware *wlc_hw) { BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); wlc_hw->up = true; wlc_phy_hw_state_upd(wlc_hw->band->pi, true); /* FULLY enable dynamic power control and d11 core interrupt */ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC); brcms_intrson(wlc_hw->wlc->wl); return 0; } /* * Write WME tunable parameters for retransmit/max rate * from wlc struct to ucode */ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc) { int ac; /* Need clock to do this */ if (!wlc->clk) return; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac), wlc->wme_retries[ac]); } /* make interface operational */ int brcms_c_up(struct brcms_c_info *wlc) { BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); /* HW is turned off so don't try to access it */ if (wlc->pub->hw_off || brcms_deviceremoved(wlc)) return -ENOMEDIUM; if (!wlc->pub->hw_up) { brcms_b_hw_up(wlc->hw); wlc->pub->hw_up = true; } if ((wlc->pub->boardflags & BFL_FEM) && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) { if (wlc->pub->boardrev >= 0x1250 && (wlc->pub->boardflags & BFL_FEM_BT)) brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL, MHF5_4313_GPIOCTRL, BRCM_BAND_ALL); else brcms_b_mhf(wlc->hw, MHF4, MHF4_EXTPA_ENABLE, MHF4_EXTPA_ENABLE, BRCM_BAND_ALL); } /* * Need to read the hwradio status here to cover the case where the * system is loaded with the hw radio disabled. We do not want to bring * the driver up in this case. If radio is disabled, abort up, lower * power, start radio timer and return 0(for NDIS) don't call * radio_update to avoid looping brcms_c_up. * * brcms_b_up_prep() returns either 0 or -BCME_RADIOOFF only */ if (!wlc->pub->radio_disabled) { int status = brcms_b_up_prep(wlc->hw); if (status == -ENOMEDIUM) { if (!mboolisset (wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE)) { struct brcms_bss_cfg *bsscfg = wlc->bsscfg; mboolset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE); if (bsscfg->enable && bsscfg->BSS) wiphy_err(wlc->wiphy, "wl%d: up" ": rfdisable -> " "bsscfg_disable()\n", wlc->pub->unit); } } } if (wlc->pub->radio_disabled) { brcms_c_radio_monitor_start(wlc); return 0; } /* brcms_b_up_prep has done brcms_c_corereset(). so clk is on, set it */ wlc->clk = true; brcms_c_radio_monitor_stop(wlc); /* Set EDCF hostflags */ brcms_b_mhf(wlc->hw, MHF1, MHF1_EDCF, MHF1_EDCF, BRCM_BAND_ALL); brcms_init(wlc->wl); wlc->pub->up = true; if (wlc->bandinit_pending) { brcms_c_suspend_mac_and_wait(wlc); brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec); wlc->bandinit_pending = false; brcms_c_enable_mac(wlc); } brcms_b_up_finish(wlc->hw); /* Program the TX wme params with the current settings */ brcms_c_wme_retries_write(wlc); /* start one second watchdog timer */ brcms_add_timer(wlc->wdtimer, TIMER_INTERVAL_WATCHDOG, true); wlc->WDarmed = true; /* ensure antenna config is up to date */ brcms_c_stf_phy_txant_upd(wlc); /* ensure LDPC config is in sync */ brcms_c_ht_update_ldpc(wlc, wlc->stf->ldpc); return 0; } static uint brcms_c_down_del_timer(struct brcms_c_info *wlc) { uint callbacks = 0; return callbacks; } static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw) { bool dev_gone; uint callbacks = 0; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); if (!wlc_hw->up) return callbacks; dev_gone = brcms_deviceremoved(wlc_hw->wlc); /* disable interrupts */ if (dev_gone) wlc_hw->wlc->macintmask = 0; else { /* now disable interrupts */ brcms_intrsoff(wlc_hw->wlc->wl); /* ensure we're running on the pll clock again */ brcms_b_clkctl_clk(wlc_hw, CLK_FAST); } /* down phy at the last of this stage */ callbacks += wlc_phy_down(wlc_hw->band->pi); return callbacks; } static int brcms_b_down_finish(struct brcms_hardware *wlc_hw) { uint callbacks = 0; bool dev_gone; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); if (!wlc_hw->up) return callbacks; wlc_hw->up = false; wlc_phy_hw_state_upd(wlc_hw->band->pi, false); dev_gone = brcms_deviceremoved(wlc_hw->wlc); if (dev_gone) { wlc_hw->sbclk = false; wlc_hw->clk = false; wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false); /* reclaim any posted packets */ brcms_c_flushqueues(wlc_hw->wlc); } else { /* Reset and disable the core */ if (bcma_core_is_enabled(wlc_hw->d11core)) { if (bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC) brcms_c_suspend_mac_and_wait(wlc_hw->wlc); callbacks += brcms_reset(wlc_hw->wlc->wl); brcms_c_coredisable(wlc_hw); } /* turn off primary xtal and pll */ if (!wlc_hw->noreset) { ai_pci_down(wlc_hw->sih); brcms_b_xtal(wlc_hw, OFF); } } return callbacks; } /* * Mark the interface nonoperational, stop the software mechanisms, * disable the hardware, free any transient buffer state. * Return a count of the number of driver callbacks still pending. */ uint brcms_c_down(struct brcms_c_info *wlc) { uint callbacks = 0; int i; bool dev_gone = false; struct brcms_txq_info *qi; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); /* check if we are already in the going down path */ if (wlc->going_down) { wiphy_err(wlc->wiphy, "wl%d: %s: Driver going down so return" "\n", wlc->pub->unit, __func__); return 0; } if (!wlc->pub->up) return callbacks; wlc->going_down = true; callbacks += brcms_b_bmac_down_prep(wlc->hw); dev_gone = brcms_deviceremoved(wlc); /* Call any registered down handlers */ for (i = 0; i < BRCMS_MAXMODULES; i++) { if (wlc->modulecb[i].down_fn) callbacks += wlc->modulecb[i].down_fn(wlc->modulecb[i].hdl); } /* cancel the watchdog timer */ if (wlc->WDarmed) { if (!brcms_del_timer(wlc->wdtimer)) callbacks++; wlc->WDarmed = false; } /* cancel all other timers */ callbacks += brcms_c_down_del_timer(wlc); wlc->pub->up = false; wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL); /* clear txq flow control */ brcms_c_txflowcontrol_reset(wlc); /* flush tx queues */ for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) brcmu_pktq_flush(&qi->q, true, NULL, NULL); callbacks += brcms_b_down_finish(wlc->hw); /* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */ wlc->clk = false; wlc->going_down = false; return callbacks; } /* Set the current gmode configuration */ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config) { int ret = 0; uint i; struct brcms_c_rateset rs; /* Default to 54g Auto */ /* Advertise and use shortslot (-1/0/1 Auto/Off/On) */ s8 shortslot = BRCMS_SHORTSLOT_AUTO; bool shortslot_restrict = false; /* Restrict association to stations * that support shortslot */ bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */ /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */ int preamble = BRCMS_PLCP_LONG; bool preamble_restrict = false; /* Restrict association to stations * that support short preambles */ struct brcms_band *band; /* if N-support is enabled, allow Gmode set as long as requested * Gmode is not GMODE_LEGACY_B */ if ((wlc->pub->_n_enab & SUPPORT_11N) && gmode == GMODE_LEGACY_B) return -ENOTSUPP; /* verify that we are dealing with 2G band and grab the band pointer */ if (wlc->band->bandtype == BRCM_BAND_2G) band = wlc->band; else if ((wlc->pub->_nbands > 1) && (wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype == BRCM_BAND_2G)) band = wlc->bandstate[OTHERBANDUNIT(wlc)]; else return -EINVAL; /* Legacy or bust when no OFDM is supported by regulatory */ if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) & BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B)) return -EINVAL; /* update configuration value */ if (config) brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode); /* Clear rateset override */ memset(&rs, 0, sizeof(struct brcms_c_rateset)); switch (gmode) { case GMODE_LEGACY_B: shortslot = BRCMS_SHORTSLOT_OFF; brcms_c_rateset_copy(&gphy_legacy_rates, &rs); break; case GMODE_LRS: break; case GMODE_AUTO: /* Accept defaults */ break; case GMODE_ONLY: ofdm_basic = true; preamble = BRCMS_PLCP_SHORT; preamble_restrict = true; break; case GMODE_PERFORMANCE: shortslot = BRCMS_SHORTSLOT_ON; shortslot_restrict = true; ofdm_basic = true; preamble = BRCMS_PLCP_SHORT; preamble_restrict = true; break; default: /* Error */ wiphy_err(wlc->wiphy, "wl%d: %s: invalid gmode %d\n", wlc->pub->unit, __func__, gmode); return -ENOTSUPP; } band->gmode = gmode; wlc->shortslot_override = shortslot; /* Use the default 11g rateset */ if (!rs.count) brcms_c_rateset_copy(&cck_ofdm_rates, &rs); if (ofdm_basic) { for (i = 0; i < rs.count; i++) { if (rs.rates[i] == BRCM_RATE_6M || rs.rates[i] == BRCM_RATE_12M || rs.rates[i] == BRCM_RATE_24M) rs.rates[i] |= BRCMS_RATE_FLAG; } } /* Set default bss rateset */ wlc->default_bss->rateset.count = rs.count; memcpy(wlc->default_bss->rateset.rates, rs.rates, sizeof(wlc->default_bss->rateset.rates)); return ret; } int brcms_c_set_nmode(struct brcms_c_info *wlc) { uint i; s32 nmode = AUTO; if (wlc->stf->txstreams == WL_11N_3x3) nmode = WL_11N_3x3; else nmode = WL_11N_2x2; /* force GMODE_AUTO if NMODE is ON */ brcms_c_set_gmode(wlc, GMODE_AUTO, true); if (nmode == WL_11N_3x3) wlc->pub->_n_enab = SUPPORT_HT; else wlc->pub->_n_enab = SUPPORT_11N; wlc->default_bss->flags |= BRCMS_BSS_HT; /* add the mcs rates to the default and hw ratesets */ brcms_c_rateset_mcs_build(&wlc->default_bss->rateset, wlc->stf->txstreams); for (i = 0; i < wlc->pub->_nbands; i++) memcpy(wlc->bandstate[i]->hw_rateset.mcs, wlc->default_bss->rateset.mcs, MCSSET_LEN); return 0; } static int brcms_c_set_internal_rateset(struct brcms_c_info *wlc, struct brcms_c_rateset *rs_arg) { struct brcms_c_rateset rs, new; uint bandunit; memcpy(&rs, rs_arg, sizeof(struct brcms_c_rateset)); /* check for bad count value */ if ((rs.count == 0) || (rs.count > BRCMS_NUMRATES)) return -EINVAL; /* try the current band */ bandunit = wlc->band->bandunit; memcpy(&new, &rs, sizeof(struct brcms_c_rateset)); if (brcms_c_rate_hwrs_filter_sort_validate (&new, &wlc->bandstate[bandunit]->hw_rateset, true, wlc->stf->txstreams)) goto good; /* try the other band */ if (brcms_is_mband_unlocked(wlc)) { bandunit = OTHERBANDUNIT(wlc); memcpy(&new, &rs, sizeof(struct brcms_c_rateset)); if (brcms_c_rate_hwrs_filter_sort_validate(&new, &wlc-> bandstate[bandunit]-> hw_rateset, true, wlc->stf->txstreams)) goto good; } return -EBADE; good: /* apply new rateset */ memcpy(&wlc->default_bss->rateset, &new, sizeof(struct brcms_c_rateset)); memcpy(&wlc->bandstate[bandunit]->defrateset, &new, sizeof(struct brcms_c_rateset)); return 0; } static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc) { u8 r; bool war = false; if (wlc->bsscfg->associated) r = wlc->bsscfg->current_bss->rateset.rates[0]; else r = wlc->default_bss->rateset.rates[0]; wlc_phy_ofdm_rateset_war(wlc->band->pi, war); } int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel) { u16 chspec = ch20mhz_chspec(channel); if (channel < 0 || channel > MAXCHANNEL) return -EINVAL; if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec)) return -EINVAL; if (!wlc->pub->up && brcms_is_mband_unlocked(wlc)) { if (wlc->band->bandunit != chspec_bandunit(chspec)) wlc->bandinit_pending = true; else wlc->bandinit_pending = false; } wlc->default_bss->chanspec = chspec; /* brcms_c_BSSinit() will sanitize the rateset before * using it.. */ if (wlc->pub->up && (wlc_phy_chanspec_get(wlc->band->pi) != chspec)) { brcms_c_set_home_chanspec(wlc, chspec); brcms_c_suspend_mac_and_wait(wlc); brcms_c_set_chanspec(wlc, chspec); brcms_c_enable_mac(wlc); } return 0; } int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl) { int ac; if (srl < 1 || srl > RETRY_SHORT_MAX || lrl < 1 || lrl > RETRY_SHORT_MAX) return -EINVAL; wlc->SRL = srl; wlc->LRL = lrl; brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SHORT, wlc->SRL); wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_LONG, wlc->LRL); } brcms_c_wme_retries_write(wlc); return 0; } void brcms_c_get_current_rateset(struct brcms_c_info *wlc, struct brcm_rateset *currs) { struct brcms_c_rateset *rs; if (wlc->pub->associated) rs = &wlc->bsscfg->current_bss->rateset; else rs = &wlc->default_bss->rateset; /* Copy only legacy rateset section */ currs->count = rs->count; memcpy(&currs->rates, &rs->rates, rs->count); } int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs) { struct brcms_c_rateset internal_rs; int bcmerror; if (rs->count > BRCMS_NUMRATES) return -ENOBUFS; memset(&internal_rs, 0, sizeof(struct brcms_c_rateset)); /* Copy only legacy rateset section */ internal_rs.count = rs->count; memcpy(&internal_rs.rates, &rs->rates, internal_rs.count); /* merge rateset coming in with the current mcsset */ if (wlc->pub->_n_enab & SUPPORT_11N) { struct brcms_bss_info *mcsset_bss; if (wlc->bsscfg->associated) mcsset_bss = wlc->bsscfg->current_bss; else mcsset_bss = wlc->default_bss; memcpy(internal_rs.mcs, &mcsset_bss->rateset.mcs[0], MCSSET_LEN); } bcmerror = brcms_c_set_internal_rateset(wlc, &internal_rs); if (!bcmerror) brcms_c_ofdm_rateset_war(wlc); return bcmerror; } int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period) { if (period < DOT11_MIN_BEACON_PERIOD || period > DOT11_MAX_BEACON_PERIOD) return -EINVAL; wlc->default_bss->beacon_period = period; return 0; } u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx) { return wlc->band->phytype; } void brcms_c_set_shortslot_override(struct brcms_c_info *wlc, s8 sslot_override) { wlc->shortslot_override = sslot_override; /* * shortslot is an 11g feature, so no more work if we are * currently on the 5G band */ if (wlc->band->bandtype == BRCM_BAND_5G) return; if (wlc->pub->up && wlc->pub->associated) { /* let watchdog or beacon processing update shortslot */ } else if (wlc->pub->up) { /* unassociated shortslot is off */ brcms_c_switch_shortslot(wlc, false); } else { /* driver is down, so just update the brcms_c_info * value */ if (wlc->shortslot_override == BRCMS_SHORTSLOT_AUTO) wlc->shortslot = false; else wlc->shortslot = (wlc->shortslot_override == BRCMS_SHORTSLOT_ON); } } /* * register watchdog and down handlers. */ int brcms_c_module_register(struct brcms_pub *pub, const char *name, struct brcms_info *hdl, int (*d_fn)(void *handle)) { struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc; int i; /* find an empty entry and just add, no duplication check! */ for (i = 0; i < BRCMS_MAXMODULES; i++) { if (wlc->modulecb[i].name[0] == '\0') { strncpy(wlc->modulecb[i].name, name, sizeof(wlc->modulecb[i].name) - 1); wlc->modulecb[i].hdl = hdl; wlc->modulecb[i].down_fn = d_fn; return 0; } } return -ENOSR; } /* unregister module callbacks */ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name, struct brcms_info *hdl) { struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc; int i; if (wlc == NULL) return -ENODATA; for (i = 0; i < BRCMS_MAXMODULES; i++) { if (!strcmp(wlc->modulecb[i].name, name) && (wlc->modulecb[i].hdl == hdl)) { memset(&wlc->modulecb[i], 0, sizeof(struct modulecb)); return 0; } } /* table not found! */ return -ENODATA; } void brcms_c_print_txstatus(struct tx_status *txs) { pr_debug("\ntxpkt (MPDU) Complete\n"); pr_debug("FrameID: %04x TxStatus: %04x\n", txs->frameid, txs->status); pr_debug("[15:12] %d frame attempts\n", (txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT); pr_debug(" [11:8] %d rts attempts\n", (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT); pr_debug(" [7] %d PM mode indicated\n", txs->status & TX_STATUS_PMINDCTD ? 1 : 0); pr_debug(" [6] %d intermediate status\n", txs->status & TX_STATUS_INTERMEDIATE ? 1 : 0); pr_debug(" [5] %d AMPDU\n", txs->status & TX_STATUS_AMPDU ? 1 : 0); pr_debug(" [4:2] %d Frame Suppressed Reason (%s)\n", (txs->status & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT, (const char *[]) { "None", "PMQ Entry", "Flush request", "Previous frag failure", "Channel mismatch", "Lifetime Expiry", "Underflow" } [(txs->status & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT]); pr_debug(" [1] %d acked\n", txs->status & TX_STATUS_ACK_RCV ? 1 : 0); pr_debug("LastTxTime: %04x Seq: %04x PHYTxStatus: %04x RxAckRSSI: %04x RxAckSQ: %04x\n", txs->lasttxtime, txs->sequence, txs->phyerr, (txs->ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT, (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT); } bool brcms_c_chipmatch(u16 vendor, u16 device) { if (vendor != PCI_VENDOR_ID_BROADCOM) { pr_err("unknown vendor id %04x\n", vendor); return false; } if (device == BCM43224_D11N_ID_VEN1) return true; if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID)) return true; if (device == BCM4313_D11N2G_ID) return true; if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID)) return true; pr_err("unknown device id %04x\n", device); return false; } #if defined(DEBUG) void brcms_c_print_txdesc(struct d11txh *txh) { u16 mtcl = le16_to_cpu(txh->MacTxControlLow); u16 mtch = le16_to_cpu(txh->MacTxControlHigh); u16 mfc = le16_to_cpu(txh->MacFrameControl); u16 tfest = le16_to_cpu(txh->TxFesTimeNormal); u16 ptcw = le16_to_cpu(txh->PhyTxControlWord); u16 ptcw_1 = le16_to_cpu(txh->PhyTxControlWord_1); u16 ptcw_1_Fbr = le16_to_cpu(txh->PhyTxControlWord_1_Fbr); u16 ptcw_1_Rts = le16_to_cpu(txh->PhyTxControlWord_1_Rts); u16 ptcw_1_FbrRts = le16_to_cpu(txh->PhyTxControlWord_1_FbrRts); u16 mainrates = le16_to_cpu(txh->MainRates); u16 xtraft = le16_to_cpu(txh->XtraFrameTypes); u8 *iv = txh->IV; u8 *ra = txh->TxFrameRA; u16 tfestfb = le16_to_cpu(txh->TxFesTimeFallback); u8 *rtspfb = txh->RTSPLCPFallback; u16 rtsdfb = le16_to_cpu(txh->RTSDurFallback); u8 *fragpfb = txh->FragPLCPFallback; u16 fragdfb = le16_to_cpu(txh->FragDurFallback); u16 mmodelen = le16_to_cpu(txh->MModeLen); u16 mmodefbrlen = le16_to_cpu(txh->MModeFbrLen); u16 tfid = le16_to_cpu(txh->TxFrameID); u16 txs = le16_to_cpu(txh->TxStatus); u16 mnmpdu = le16_to_cpu(txh->MaxNMpdus); u16 mabyte = le16_to_cpu(txh->MaxABytes_MRT); u16 mabyte_f = le16_to_cpu(txh->MaxABytes_FBR); u16 mmbyte = le16_to_cpu(txh->MinMBytes); u8 *rtsph = txh->RTSPhyHeader; struct ieee80211_rts rts = txh->rts_frame; /* add plcp header along with txh descriptor */ brcmu_dbg_hex_dump(txh, sizeof(struct d11txh) + 48, "Raw TxDesc + plcp header:\n"); pr_debug("TxCtlLow: %04x ", mtcl); pr_debug("TxCtlHigh: %04x ", mtch); pr_debug("FC: %04x ", mfc); pr_debug("FES Time: %04x\n", tfest); pr_debug("PhyCtl: %04x%s ", ptcw, (ptcw & PHY_TXC_SHORT_HDR) ? " short" : ""); pr_debug("PhyCtl_1: %04x ", ptcw_1); pr_debug("PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr); pr_debug("PhyCtl_1_Rts: %04x ", ptcw_1_Rts); pr_debug("PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts); pr_debug("MainRates: %04x ", mainrates); pr_debug("XtraFrameTypes: %04x ", xtraft); pr_debug("\n"); print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV)); print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET, ra, sizeof(txh->TxFrameRA)); pr_debug("Fb FES Time: %04x ", tfestfb); print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET, rtspfb, sizeof(txh->RTSPLCPFallback)); pr_debug("RTS DUR: %04x ", rtsdfb); print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET, fragpfb, sizeof(txh->FragPLCPFallback)); pr_debug("DUR: %04x", fragdfb); pr_debug("\n"); pr_debug("MModeLen: %04x ", mmodelen); pr_debug("MModeFbrLen: %04x\n", mmodefbrlen); pr_debug("FrameID: %04x\n", tfid); pr_debug("TxStatus: %04x\n", txs); pr_debug("MaxNumMpdu: %04x\n", mnmpdu); pr_debug("MaxAggbyte: %04x\n", mabyte); pr_debug("MaxAggbyte_fb: %04x\n", mabyte_f); pr_debug("MinByte: %04x\n", mmbyte); print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET, rtsph, sizeof(txh->RTSPhyHeader)); print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET, (u8 *)&rts, sizeof(txh->rts_frame)); pr_debug("\n"); } #endif /* defined(DEBUG) */ #if defined(DEBUG) static int brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf, int len) { int i; char *p = buf; char hexstr[16]; int slen = 0, nlen = 0; u32 bit; const char *name; if (len < 2 || !buf) return 0; buf[0] = '\0'; for (i = 0; flags != 0; i++) { bit = bd[i].bit; name = bd[i].name; if (bit == 0 && flags != 0) { /* print any unnamed bits */ snprintf(hexstr, 16, "0x%X", flags); name = hexstr; flags = 0; /* exit loop */ } else if ((flags & bit) == 0) continue; flags &= ~bit; nlen = strlen(name); slen += nlen; /* count btwn flag space */ if (flags != 0) slen += 1; /* need NULL char as well */ if (len <= slen) break; /* copy NULL char but don't count it */ strncpy(p, name, nlen + 1); p += nlen; /* copy btwn flag space and NULL char */ if (flags != 0) p += snprintf(p, 2, " "); len -= slen; } /* indicate the str was too short */ if (flags != 0) { if (len < 2) p -= 2 - len; /* overwrite last char */ p += snprintf(p, 2, ">"); } return (int)(p - buf); } #endif /* defined(DEBUG) */ #if defined(DEBUG) void brcms_c_print_rxh(struct d11rxhdr *rxh) { u16 len = rxh->RxFrameSize; u16 phystatus_0 = rxh->PhyRxStatus_0; u16 phystatus_1 = rxh->PhyRxStatus_1; u16 phystatus_2 = rxh->PhyRxStatus_2; u16 phystatus_3 = rxh->PhyRxStatus_3; u16 macstatus1 = rxh->RxStatus1; u16 macstatus2 = rxh->RxStatus2; char flagstr[64]; char lenbuf[20]; static const struct brcms_c_bit_desc macstat_flags[] = { {RXS_FCSERR, "FCSErr"}, {RXS_RESPFRAMETX, "Reply"}, {RXS_PBPRES, "PADDING"}, {RXS_DECATMPT, "DeCr"}, {RXS_DECERR, "DeCrErr"}, {RXS_BCNSENT, "Bcn"}, {0, NULL} }; brcmu_dbg_hex_dump(rxh, sizeof(struct d11rxhdr), "Raw RxDesc:\n"); brcms_c_format_flags(macstat_flags, macstatus1, flagstr, 64); snprintf(lenbuf, sizeof(lenbuf), "0x%x", len); pr_debug("RxFrameSize: %6s (%d)%s\n", lenbuf, len, (rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : ""); pr_debug("RxPHYStatus: %04x %04x %04x %04x\n", phystatus_0, phystatus_1, phystatus_2, phystatus_3); pr_debug("RxMACStatus: %x %s\n", macstatus1, flagstr); pr_debug("RXMACaggtype: %x\n", (macstatus2 & RXS_AGGTYPE_MASK)); pr_debug("RxTSFTime: %04x\n", rxh->RxTSFTime); } #endif /* defined(DEBUG) */ u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate) { u16 table_ptr; u8 phy_rate, index; /* get the phy specific rate encoding for the PLCP SIGNAL field */ if (is_ofdm_rate(rate)) table_ptr = M_RT_DIRMAP_A; else table_ptr = M_RT_DIRMAP_B; /* for a given rate, the LS-nibble of the PLCP SIGNAL field is * the index into the rate table. */ phy_rate = rate_info[rate] & BRCMS_RATE_MASK; index = phy_rate & 0xf; /* Find the SHM pointer to the rate table entry by looking in the * Direct-map Table */ return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2)); } static bool brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q, struct sk_buff *pkt, int prec, bool head) { struct sk_buff *p; int eprec = -1; /* precedence to evict from */ /* Determine precedence from which to evict packet, if any */ if (pktq_pfull(q, prec)) eprec = prec; else if (pktq_full(q)) { p = brcmu_pktq_peek_tail(q, &eprec); if (eprec > prec) { wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d" "\n", __func__, eprec, prec); return false; } } /* Evict if needed */ if (eprec >= 0) { bool discard_oldest; discard_oldest = ac_bitmap_tst(0, eprec); /* Refuse newer packet unless configured to discard oldest */ if (eprec == prec && !discard_oldest) { wiphy_err(wlc->wiphy, "%s: No where to go, prec == %d" "\n", __func__, prec); return false; } /* Evict packet according to discard policy */ p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) : brcmu_pktq_pdeq_tail(q, eprec); brcmu_pkt_buf_free_skb(p); } /* Enqueue */ if (head) p = brcmu_pktq_penq_head(q, prec, pkt); else p = brcmu_pktq_penq(q, prec, pkt); return true; } /* * Attempts to queue a packet onto a multiple-precedence queue, * if necessary evicting a lower precedence packet from the queue. * * 'prec' is the precedence number that has already been mapped * from the packet priority. * * Returns true if packet consumed (queued), false if not. */ static bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q, struct sk_buff *pkt, int prec) { return brcms_c_prec_enq_head(wlc, q, pkt, prec, false); } void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb, struct sk_buff *sdu, uint prec) { struct brcms_txq_info *qi = wlc->pkt_queue; /* Check me */ struct pktq *q = &qi->q; int prio; prio = sdu->priority; if (!brcms_c_prec_enq(wlc, q, sdu, prec)) { /* * we might hit this condtion in case * packet flooding from mac80211 stack */ brcmu_pkt_buf_free_skb(sdu); } } /* * bcmc_fid_generate: * Generate frame ID for a BCMC packet. The frag field is not used * for MC frames so is used as part of the sequence number. */ static inline u16 bcmc_fid_generate(struct brcms_c_info *wlc, struct brcms_bss_cfg *bsscfg, struct d11txh *txh) { u16 frameid; frameid = le16_to_cpu(txh->TxFrameID) & ~(TXFID_SEQ_MASK | TXFID_QUEUE_MASK); frameid |= (((wlc-> mc_fid_counter++) << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) | TX_BCMC_FIFO; return frameid; } static uint brcms_c_calc_ack_time(struct brcms_c_info *wlc, u32 rspec, u8 preamble_type) { uint dur = 0; BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d\n", wlc->pub->unit, rspec, preamble_type); /* * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that * is less than or equal to the rate of the immediately previous * frame in the FES */ rspec = brcms_basic_rate(wlc, rspec); /* ACK frame len == 14 == 2(fc) + 2(dur) + 6(ra) + 4(fcs) */ dur = brcms_c_calc_frame_time(wlc, rspec, preamble_type, (DOT11_ACK_LEN + FCS_LEN)); return dur; } static uint brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec, u8 preamble_type) { BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n", wlc->pub->unit, rspec, preamble_type); return brcms_c_calc_ack_time(wlc, rspec, preamble_type); } static uint brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec, u8 preamble_type) { BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, " "preamble_type %d\n", wlc->pub->unit, rspec, preamble_type); /* * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that * is less than or equal to the rate of the immediately previous * frame in the FES */ rspec = brcms_basic_rate(wlc, rspec); /* BA len == 32 == 16(ctl hdr) + 4(ba len) + 8(bitmap) + 4(fcs) */ return brcms_c_calc_frame_time(wlc, rspec, preamble_type, (DOT11_BA_LEN + DOT11_BA_BITMAP_LEN + FCS_LEN)); } /* brcms_c_compute_frame_dur() * * Calculate the 802.11 MAC header DUR field for MPDU * DUR for a single frame = 1 SIFS + 1 ACK * DUR for a frame with following frags = 3 SIFS + 2 ACK + next frag time * * rate MPDU rate in unit of 500kbps * next_frag_len next MPDU length in bytes * preamble_type use short/GF or long/MM PLCP header */ static u16 brcms_c_compute_frame_dur(struct brcms_c_info *wlc, u32 rate, u8 preamble_type, uint next_frag_len) { u16 dur, sifs; sifs = get_sifs(wlc->band); dur = sifs; dur += (u16) brcms_c_calc_ack_time(wlc, rate, preamble_type); if (next_frag_len) { /* Double the current DUR to get 2 SIFS + 2 ACKs */ dur *= 2; /* add another SIFS and the frag time */ dur += sifs; dur += (u16) brcms_c_calc_frame_time(wlc, rate, preamble_type, next_frag_len); } return dur; } /* The opposite of brcms_c_calc_frame_time */ static uint brcms_c_calc_frame_len(struct brcms_c_info *wlc, u32 ratespec, u8 preamble_type, uint dur) { uint nsyms, mac_len, Ndps, kNdps; uint rate = rspec2rate(ratespec); BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, dur %d\n", wlc->pub->unit, ratespec, preamble_type, dur); if (is_mcs_rate(ratespec)) { uint mcs = ratespec & RSPEC_RATE_MASK; int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec); dur -= PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT); /* payload calculation matches that of regular ofdm */ if (wlc->band->bandtype == BRCM_BAND_2G) dur -= DOT11_OFDM_SIGNAL_EXTENSION; /* kNdbps = kbps * 4 */ kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec), rspec_issgi(ratespec)) * 4; nsyms = dur / APHY_SYMBOL_TIME; mac_len = ((nsyms * kNdps) - ((APHY_SERVICE_NBITS + APHY_TAIL_NBITS) * 1000)) / 8000; } else if (is_ofdm_rate(ratespec)) { dur -= APHY_PREAMBLE_TIME; dur -= APHY_SIGNAL_TIME; /* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */ Ndps = rate * 2; nsyms = dur / APHY_SYMBOL_TIME; mac_len = ((nsyms * Ndps) - (APHY_SERVICE_NBITS + APHY_TAIL_NBITS)) / 8; } else { if (preamble_type & BRCMS_SHORT_PREAMBLE) dur -= BPHY_PLCP_SHORT_TIME; else dur -= BPHY_PLCP_TIME; mac_len = dur * rate; /* divide out factor of 2 in rate (1/2 mbps) */ mac_len = mac_len / 8 / 2; } return mac_len; } /* * Return true if the specified rate is supported by the specified band. * BRCM_BAND_AUTO indicates the current band. */ static bool brcms_c_valid_rate(struct brcms_c_info *wlc, u32 rspec, int band, bool verbose) { struct brcms_c_rateset *hw_rateset; uint i; if ((band == BRCM_BAND_AUTO) || (band == wlc->band->bandtype)) hw_rateset = &wlc->band->hw_rateset; else if (wlc->pub->_nbands > 1) hw_rateset = &wlc->bandstate[OTHERBANDUNIT(wlc)]->hw_rateset; else /* other band specified and we are a single band device */ return false; /* check if this is a mimo rate */ if (is_mcs_rate(rspec)) { if ((rspec & RSPEC_RATE_MASK) >= MCS_TABLE_SIZE) goto error; return isset(hw_rateset->mcs, (rspec & RSPEC_RATE_MASK)); } for (i = 0; i < hw_rateset->count; i++) if (hw_rateset->rates[i] == rspec2rate(rspec)) return true; error: if (verbose) wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x " "not in hw_rateset\n", wlc->pub->unit, rspec); return false; } static u32 mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band, u32 int_val) { u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT; u8 rate = int_val & NRATE_RATE_MASK; u32 rspec; bool ismcs = ((int_val & NRATE_MCS_INUSE) == NRATE_MCS_INUSE); bool issgi = ((int_val & NRATE_SGI_MASK) >> NRATE_SGI_SHIFT); bool override_mcs_only = ((int_val & NRATE_OVERRIDE_MCS_ONLY) == NRATE_OVERRIDE_MCS_ONLY); int bcmerror = 0; if (!ismcs) return (u32) rate; /* validate the combination of rate/mcs/stf is allowed */ if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) { /* mcs only allowed when nmode */ if (stf > PHY_TXC1_MODE_SDM) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } /* mcs 32 is a special case, DUP mode 40 only */ if (rate == 32) { if (!CHSPEC_IS40(wlc->home_chanspec) || ((stf != PHY_TXC1_MODE_SISO) && (stf != PHY_TXC1_MODE_CDD))) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs " "32\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } /* mcs > 7 must use stf SDM */ } else if (rate > HIGHEST_SINGLE_STREAM_MCS) { /* mcs > 7 must use stf SDM */ if (stf != PHY_TXC1_MODE_SDM) { BCMMSG(wlc->wiphy, "wl%d: enabling " "SDM mode for mcs %d\n", wlc->pub->unit, rate); stf = PHY_TXC1_MODE_SDM; } } else { /* * MCS 0-7 may use SISO, CDD, and for * phy_rev >= 3 STBC */ if ((stf > PHY_TXC1_MODE_STBC) || (!BRCMS_STBC_CAP_PHY(wlc) && (stf == PHY_TXC1_MODE_STBC))) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC" "\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } } } else if (is_ofdm_rate(rate)) { if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } } else if (is_cck_rate(rate)) { if ((cur_band->bandtype != BRCM_BAND_2G) || (stf != PHY_TXC1_MODE_SISO)) { wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } } else { wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } /* make sure multiple antennae are available for non-siso rates */ if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) { wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO " "request\n", wlc->pub->unit, __func__); bcmerror = -EINVAL; goto done; } rspec = rate; if (ismcs) { rspec |= RSPEC_MIMORATE; /* For STBC populate the STC field of the ratespec */ if (stf == PHY_TXC1_MODE_STBC) { u8 stc; stc = 1; /* Nss for single stream is always 1 */ rspec |= (stc << RSPEC_STC_SHIFT); } } rspec |= (stf << RSPEC_STF_SHIFT); if (override_mcs_only) rspec |= RSPEC_OVERRIDE_MCS_ONLY; if (issgi) rspec |= RSPEC_SHORT_GI; if ((rate != 0) && !brcms_c_valid_rate(wlc, rspec, cur_band->bandtype, true)) return rate; return rspec; done: return rate; } /* * Compute PLCP, but only requires actual rate and length of pkt. * Rate is given in the driver standard multiple of 500 kbps. * le is set for 11 Mbps rate if necessary. * Broken out for PRQ. */ static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500, uint length, u8 *plcp) { u16 usec = 0; u8 le = 0; switch (rate_500) { case BRCM_RATE_1M: usec = length << 3; break; case BRCM_RATE_2M: usec = length << 2; break; case BRCM_RATE_5M5: usec = (length << 4) / 11; if ((length << 4) - (usec * 11) > 0) usec++; break; case BRCM_RATE_11M: usec = (length << 3) / 11; if ((length << 3) - (usec * 11) > 0) { usec++; if ((usec * 11) - (length << 3) >= 8) le = D11B_PLCP_SIGNAL_LE; } break; default: wiphy_err(wlc->wiphy, "brcms_c_cck_plcp_set: unsupported rate %d\n", rate_500); rate_500 = BRCM_RATE_1M; usec = length << 3; break; } /* PLCP signal byte */ plcp[0] = rate_500 * 5; /* r (500kbps) * 5 == r (100kbps) */ /* PLCP service byte */ plcp[1] = (u8) (le | D11B_PLCP_SIGNAL_LOCKED); /* PLCP length u16, little endian */ plcp[2] = usec & 0xff; plcp[3] = (usec >> 8) & 0xff; /* PLCP CRC16 */ plcp[4] = 0; plcp[5] = 0; } /* Rate: 802.11 rate code, length: PSDU length in octets */ static void brcms_c_compute_mimo_plcp(u32 rspec, uint length, u8 *plcp) { u8 mcs = (u8) (rspec & RSPEC_RATE_MASK); plcp[0] = mcs; if (rspec_is40mhz(rspec) || (mcs == 32)) plcp[0] |= MIMO_PLCP_40MHZ; BRCMS_SET_MIMO_PLCP_LEN(plcp, length); plcp[3] = rspec_mimoplcp3(rspec); /* rspec already holds this byte */ plcp[3] |= 0x7; /* set smoothing, not sounding ppdu & reserved */ plcp[4] = 0; /* number of extension spatial streams bit 0 & 1 */ plcp[5] = 0; } /* Rate: 802.11 rate code, length: PSDU length in octets */ static void brcms_c_compute_ofdm_plcp(u32 rspec, u32 length, u8 *plcp) { u8 rate_signal; u32 tmp = 0; int rate = rspec2rate(rspec); /* * encode rate per 802.11a-1999 sec 17.3.4.1, with lsb * transmitted first */ rate_signal = rate_info[rate] & BRCMS_RATE_MASK; memset(plcp, 0, D11_PHY_HDR_LEN); D11A_PHY_HDR_SRATE((struct ofdm_phy_hdr *) plcp, rate_signal); tmp = (length & 0xfff) << 5; plcp[2] |= (tmp >> 16) & 0xff; plcp[1] |= (tmp >> 8) & 0xff; plcp[0] |= tmp & 0xff; } /* Rate: 802.11 rate code, length: PSDU length in octets */ static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, u32 rspec, uint length, u8 *plcp) { int rate = rspec2rate(rspec); brcms_c_cck_plcp_set(wlc, rate, length, plcp); } static void brcms_c_compute_plcp(struct brcms_c_info *wlc, u32 rspec, uint length, u8 *plcp) { if (is_mcs_rate(rspec)) brcms_c_compute_mimo_plcp(rspec, length, plcp); else if (is_ofdm_rate(rspec)) brcms_c_compute_ofdm_plcp(rspec, length, plcp); else brcms_c_compute_cck_plcp(wlc, rspec, length, plcp); } /* brcms_c_compute_rtscts_dur() * * Calculate the 802.11 MAC header DUR field for an RTS or CTS frame * DUR for normal RTS/CTS w/ frame = 3 SIFS + 1 CTS + next frame time + 1 ACK * DUR for CTS-TO-SELF w/ frame = 2 SIFS + next frame time + 1 ACK * * cts cts-to-self or rts/cts * rts_rate rts or cts rate in unit of 500kbps * rate next MPDU rate in unit of 500kbps * frame_len next MPDU frame length in bytes */ u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only, u32 rts_rate, u32 frame_rate, u8 rts_preamble_type, u8 frame_preamble_type, uint frame_len, bool ba) { u16 dur, sifs; sifs = get_sifs(wlc->band); if (!cts_only) { /* RTS/CTS */ dur = 3 * sifs; dur += (u16) brcms_c_calc_cts_time(wlc, rts_rate, rts_preamble_type); } else { /* CTS-TO-SELF */ dur = 2 * sifs; } dur += (u16) brcms_c_calc_frame_time(wlc, frame_rate, frame_preamble_type, frame_len); if (ba) dur += (u16) brcms_c_calc_ba_time(wlc, frame_rate, BRCMS_SHORT_PREAMBLE); else dur += (u16) brcms_c_calc_ack_time(wlc, frame_rate, frame_preamble_type); return dur; } static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec) { u16 phyctl1 = 0; u16 bw; if (BRCMS_ISLCNPHY(wlc->band)) { bw = PHY_TXC1_BW_20MHZ; } else { bw = rspec_get_bw(rspec); /* 10Mhz is not supported yet */ if (bw < PHY_TXC1_BW_20MHZ) { wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is " "not supported yet, set to 20L\n", bw); bw = PHY_TXC1_BW_20MHZ; } } if (is_mcs_rate(rspec)) { uint mcs = rspec & RSPEC_RATE_MASK; /* bw, stf, coding-type is part of rspec_phytxbyte2 returns */ phyctl1 = rspec_phytxbyte2(rspec); /* set the upper byte of phyctl1 */ phyctl1 |= (mcs_table[mcs].tx_phy_ctl3 << 8); } else if (is_cck_rate(rspec) && !BRCMS_ISLCNPHY(wlc->band) && !BRCMS_ISSSLPNPHY(wlc->band)) { /* * In CCK mode LPPHY overloads OFDM Modulation bits with CCK * Data Rate. Eventually MIMOPHY would also be converted to * this format */ /* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */ phyctl1 = (bw | (rspec_stf(rspec) << PHY_TXC1_MODE_SHIFT)); } else { /* legacy OFDM/CCK */ s16 phycfg; /* get the phyctl byte from rate phycfg table */ phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec)); if (phycfg == -1) { wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong " "legacy OFDM/CCK rate\n"); phycfg = 0; } /* set the upper byte of phyctl1 */ phyctl1 = (bw | (phycfg << 8) | (rspec_stf(rspec) << PHY_TXC1_MODE_SHIFT)); } return phyctl1; } /* * Add struct d11txh, struct cck_phy_hdr. * * 'p' data must start with 802.11 MAC header * 'p' must allow enough bytes of local headers to be "pushed" onto the packet * * headroom == D11_PHY_HDR_LEN + D11_TXH_LEN (D11_TXH_LEN is now 104 bytes) * */ static u16 brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw, struct sk_buff *p, struct scb *scb, uint frag, uint nfrags, uint queue, uint next_frag_len) { struct ieee80211_hdr *h; struct d11txh *txh; u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN]; int len, phylen, rts_phylen; u16 mch, phyctl, xfts, mainrates; u16 seq = 0, mcl = 0, status = 0, frameid = 0; u32 rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M }; u32 rts_rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M }; bool use_rts = false; bool use_cts = false; bool use_rifs = false; bool short_preamble[2] = { false, false }; u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE }; u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE }; u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN]; struct ieee80211_rts *rts = NULL; bool qos; uint ac; bool hwtkmic = false; u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ; #define ANTCFG_NONE 0xFF u8 antcfg = ANTCFG_NONE; u8 fbantcfg = ANTCFG_NONE; uint phyctl1_stf = 0; u16 durid = 0; struct ieee80211_tx_rate *txrate[2]; int k; struct ieee80211_tx_info *tx_info; bool is_mcs; u16 mimo_txbw; u8 mimo_preamble_type; /* locate 802.11 MAC header */ h = (struct ieee80211_hdr *)(p->data); qos = ieee80211_is_data_qos(h->frame_control); /* compute length of frame in bytes for use in PLCP computations */ len = p->len; phylen = len + FCS_LEN; /* Get tx_info */ tx_info = IEEE80211_SKB_CB(p); /* add PLCP */ plcp = skb_push(p, D11_PHY_HDR_LEN); /* add Broadcom tx descriptor header */ txh = (struct d11txh *) skb_push(p, D11_TXH_LEN); memset(txh, 0, D11_TXH_LEN); /* setup frameid */ if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { /* non-AP STA should never use BCMC queue */ if (queue == TX_BCMC_FIFO) { wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == " "TX_BCMC!\n", wlc->pub->unit, __func__); frameid = bcmc_fid_generate(wlc, NULL, txh); } else { /* Increment the counter for first fragment */ if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) scb->seqnum[p->priority]++; /* extract fragment number from frame first */ seq = le16_to_cpu(h->seq_ctrl) & FRAGNUM_MASK; seq |= (scb->seqnum[p->priority] << SEQNUM_SHIFT); h->seq_ctrl = cpu_to_le16(seq); frameid = ((seq << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) | (queue & TXFID_QUEUE_MASK); } } frameid |= queue & TXFID_QUEUE_MASK; /* set the ignpmq bit for all pkts tx'd in PS mode and for beacons */ if (ieee80211_is_beacon(h->frame_control)) mcl |= TXC_IGNOREPMQ; txrate[0] = tx_info->control.rates; txrate[1] = txrate[0] + 1; /* * if rate control algorithm didn't give us a fallback * rate, use the primary rate */ if (txrate[1]->idx < 0) txrate[1] = txrate[0]; for (k = 0; k < hw->max_rates; k++) { is_mcs = txrate[k]->flags & IEEE80211_TX_RC_MCS ? true : false; if (!is_mcs) { if ((txrate[k]->idx >= 0) && (txrate[k]->idx < hw->wiphy->bands[tx_info->band]->n_bitrates)) { rspec[k] = hw->wiphy->bands[tx_info->band]-> bitrates[txrate[k]->idx].hw_value; short_preamble[k] = txrate[k]-> flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ? true : false; } else { rspec[k] = BRCM_RATE_1M; } } else { rspec[k] = mac80211_wlc_set_nrate(wlc, wlc->band, NRATE_MCS_INUSE | txrate[k]->idx); } /* * Currently only support same setting for primay and * fallback rates. Unify flags for each rate into a * single value for the frame */ use_rts |= txrate[k]-> flags & IEEE80211_TX_RC_USE_RTS_CTS ? true : false; use_cts |= txrate[k]-> flags & IEEE80211_TX_RC_USE_CTS_PROTECT ? true : false; /* * (1) RATE: * determine and validate primary rate * and fallback rates */ if (!rspec_active(rspec[k])) { rspec[k] = BRCM_RATE_1M; } else { if (!is_multicast_ether_addr(h->addr1)) { /* set tx antenna config */ brcms_c_antsel_antcfg_get(wlc->asi, false, false, 0, 0, &antcfg, &fbantcfg); } } } phyctl1_stf = wlc->stf->ss_opmode; if (wlc->pub->_n_enab & SUPPORT_11N) { for (k = 0; k < hw->max_rates; k++) { /* * apply siso/cdd to single stream mcs's or ofdm * if rspec is auto selected */ if (((is_mcs_rate(rspec[k]) && is_single_stream(rspec[k] & RSPEC_RATE_MASK)) || is_ofdm_rate(rspec[k])) && ((rspec[k] & RSPEC_OVERRIDE_MCS_ONLY) || !(rspec[k] & RSPEC_OVERRIDE))) { rspec[k] &= ~(RSPEC_STF_MASK | RSPEC_STC_MASK); /* For SISO MCS use STBC if possible */ if (is_mcs_rate(rspec[k]) && BRCMS_STF_SS_STBC_TX(wlc, scb)) { u8 stc; /* Nss for single stream is always 1 */ stc = 1; rspec[k] |= (PHY_TXC1_MODE_STBC << RSPEC_STF_SHIFT) | (stc << RSPEC_STC_SHIFT); } else rspec[k] |= (phyctl1_stf << RSPEC_STF_SHIFT); } /* * Is the phy configured to use 40MHZ frames? If * so then pick the desired txbw */ if (brcms_chspec_bw(wlc->chanspec) == BRCMS_40_MHZ) { /* default txbw is 20in40 SB */ mimo_ctlchbw = mimo_txbw = CHSPEC_SB_UPPER(wlc_phy_chanspec_get( wlc->band->pi)) ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ; if (is_mcs_rate(rspec[k])) { /* mcs 32 must be 40b/w DUP */ if ((rspec[k] & RSPEC_RATE_MASK) == 32) { mimo_txbw = PHY_TXC1_BW_40MHZ_DUP; /* use override */ } else if (wlc->mimo_40txbw != AUTO) mimo_txbw = wlc->mimo_40txbw; /* else check if dst is using 40 Mhz */ else if (scb->flags & SCB_IS40) mimo_txbw = PHY_TXC1_BW_40MHZ; } else if (is_ofdm_rate(rspec[k])) { if (wlc->ofdm_40txbw != AUTO) mimo_txbw = wlc->ofdm_40txbw; } else if (wlc->cck_40txbw != AUTO) { mimo_txbw = wlc->cck_40txbw; } } else { /* * mcs32 is 40 b/w only. * This is possible for probe packets on * a STA during SCAN */ if ((rspec[k] & RSPEC_RATE_MASK) == 32) /* mcs 0 */ rspec[k] = RSPEC_MIMORATE; mimo_txbw = PHY_TXC1_BW_20MHZ; } /* Set channel width */ rspec[k] &= ~RSPEC_BW_MASK; if ((k == 0) || ((k > 0) && is_mcs_rate(rspec[k]))) rspec[k] |= (mimo_txbw << RSPEC_BW_SHIFT); else rspec[k] |= (mimo_ctlchbw << RSPEC_BW_SHIFT); /* Disable short GI, not supported yet */ rspec[k] &= ~RSPEC_SHORT_GI; mimo_preamble_type = BRCMS_MM_PREAMBLE; if (txrate[k]->flags & IEEE80211_TX_RC_GREEN_FIELD) mimo_preamble_type = BRCMS_GF_PREAMBLE; if ((txrate[k]->flags & IEEE80211_TX_RC_MCS) && (!is_mcs_rate(rspec[k]))) { wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_" "RC_MCS != is_mcs_rate(rspec)\n", wlc->pub->unit, __func__); } if (is_mcs_rate(rspec[k])) { preamble_type[k] = mimo_preamble_type; /* * if SGI is selected, then forced mm * for single stream */ if ((rspec[k] & RSPEC_SHORT_GI) && is_single_stream(rspec[k] & RSPEC_RATE_MASK)) preamble_type[k] = BRCMS_MM_PREAMBLE; } /* should be better conditionalized */ if (!is_mcs_rate(rspec[0]) && (tx_info->control.rates[0]. flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)) preamble_type[k] = BRCMS_SHORT_PREAMBLE; } } else { for (k = 0; k < hw->max_rates; k++) { /* Set ctrlchbw as 20Mhz */ rspec[k] &= ~RSPEC_BW_MASK; rspec[k] |= (PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT); /* for nphy, stf of ofdm frames must follow policies */ if (BRCMS_ISNPHY(wlc->band) && is_ofdm_rate(rspec[k])) { rspec[k] &= ~RSPEC_STF_MASK; rspec[k] |= phyctl1_stf << RSPEC_STF_SHIFT; } } } /* Reset these for use with AMPDU's */ txrate[0]->count = 0; txrate[1]->count = 0; /* (2) PROTECTION, may change rspec */ if ((ieee80211_is_data(h->frame_control) || ieee80211_is_mgmt(h->frame_control)) && (phylen > wlc->RTSThresh) && !is_multicast_ether_addr(h->addr1)) use_rts = true; /* (3) PLCP: determine PLCP header and MAC duration, * fill struct d11txh */ brcms_c_compute_plcp(wlc, rspec[0], phylen, plcp); brcms_c_compute_plcp(wlc, rspec[1], phylen, plcp_fallback); memcpy(&txh->FragPLCPFallback, plcp_fallback, sizeof(txh->FragPLCPFallback)); /* Length field now put in CCK FBR CRC field */ if (is_cck_rate(rspec[1])) { txh->FragPLCPFallback[4] = phylen & 0xff; txh->FragPLCPFallback[5] = (phylen & 0xff00) >> 8; } /* MIMO-RATE: need validation ?? */ mainrates = is_ofdm_rate(rspec[0]) ? D11A_PHY_HDR_GRATE((struct ofdm_phy_hdr *) plcp) : plcp[0]; /* DUR field for main rate */ if (!ieee80211_is_pspoll(h->frame_control) && !is_multicast_ether_addr(h->addr1) && !use_rifs) { durid = brcms_c_compute_frame_dur(wlc, rspec[0], preamble_type[0], next_frag_len); h->duration_id = cpu_to_le16(durid); } else if (use_rifs) { /* NAV protect to end of next max packet size */ durid = (u16) brcms_c_calc_frame_time(wlc, rspec[0], preamble_type[0], DOT11_MAX_FRAG_LEN); durid += RIFS_11N_TIME; h->duration_id = cpu_to_le16(durid); } /* DUR field for fallback rate */ if (ieee80211_is_pspoll(h->frame_control)) txh->FragDurFallback = h->duration_id; else if (is_multicast_ether_addr(h->addr1) || use_rifs) txh->FragDurFallback = 0; else { durid = brcms_c_compute_frame_dur(wlc, rspec[1], preamble_type[1], next_frag_len); txh->FragDurFallback = cpu_to_le16(durid); } /* (4) MAC-HDR: MacTxControlLow */ if (frag == 0) mcl |= TXC_STARTMSDU; if (!is_multicast_ether_addr(h->addr1)) mcl |= TXC_IMMEDACK; if (wlc->band->bandtype == BRCM_BAND_5G) mcl |= TXC_FREQBAND_5G; if (CHSPEC_IS40(wlc_phy_chanspec_get(wlc->band->pi))) mcl |= TXC_BW_40; /* set AMIC bit if using hardware TKIP MIC */ if (hwtkmic) mcl |= TXC_AMIC; txh->MacTxControlLow = cpu_to_le16(mcl); /* MacTxControlHigh */ mch = 0; /* Set fallback rate preamble type */ if ((preamble_type[1] == BRCMS_SHORT_PREAMBLE) || (preamble_type[1] == BRCMS_GF_PREAMBLE)) { if (rspec2rate(rspec[1]) != BRCM_RATE_1M) mch |= TXC_PREAMBLE_DATA_FB_SHORT; } /* MacFrameControl */ memcpy(&txh->MacFrameControl, &h->frame_control, sizeof(u16)); txh->TxFesTimeNormal = cpu_to_le16(0); txh->TxFesTimeFallback = cpu_to_le16(0); /* TxFrameRA */ memcpy(&txh->TxFrameRA, &h->addr1, ETH_ALEN); /* TxFrameID */ txh->TxFrameID = cpu_to_le16(frameid); /* * TxStatus, Note the case of recreating the first frag of a suppressed * frame then we may need to reset the retry cnt's via the status reg */ txh->TxStatus = cpu_to_le16(status); /* * extra fields for ucode AMPDU aggregation, the new fields are added to * the END of previous structure so that it's compatible in driver. */ txh->MaxNMpdus = cpu_to_le16(0); txh->MaxABytes_MRT = cpu_to_le16(0); txh->MaxABytes_FBR = cpu_to_le16(0); txh->MinMBytes = cpu_to_le16(0); /* (5) RTS/CTS: determine RTS/CTS PLCP header and MAC duration, * furnish struct d11txh */ /* RTS PLCP header and RTS frame */ if (use_rts || use_cts) { if (use_rts && use_cts) use_cts = false; for (k = 0; k < 2; k++) { rts_rspec[k] = brcms_c_rspec_to_rts_rspec(wlc, rspec[k], false, mimo_ctlchbw); } if (!is_ofdm_rate(rts_rspec[0]) && !((rspec2rate(rts_rspec[0]) == BRCM_RATE_1M) || (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) { rts_preamble_type[0] = BRCMS_SHORT_PREAMBLE; mch |= TXC_PREAMBLE_RTS_MAIN_SHORT; } if (!is_ofdm_rate(rts_rspec[1]) && !((rspec2rate(rts_rspec[1]) == BRCM_RATE_1M) || (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) { rts_preamble_type[1] = BRCMS_SHORT_PREAMBLE; mch |= TXC_PREAMBLE_RTS_FB_SHORT; } /* RTS/CTS additions to MacTxControlLow */ if (use_cts) { txh->MacTxControlLow |= cpu_to_le16(TXC_SENDCTS); } else { txh->MacTxControlLow |= cpu_to_le16(TXC_SENDRTS); txh->MacTxControlLow |= cpu_to_le16(TXC_LONGFRAME); } /* RTS PLCP header */ rts_plcp = txh->RTSPhyHeader; if (use_cts) rts_phylen = DOT11_CTS_LEN + FCS_LEN; else rts_phylen = DOT11_RTS_LEN + FCS_LEN; brcms_c_compute_plcp(wlc, rts_rspec[0], rts_phylen, rts_plcp); /* fallback rate version of RTS PLCP header */ brcms_c_compute_plcp(wlc, rts_rspec[1], rts_phylen, rts_plcp_fallback); memcpy(&txh->RTSPLCPFallback, rts_plcp_fallback, sizeof(txh->RTSPLCPFallback)); /* RTS frame fields... */ rts = (struct ieee80211_rts *)&txh->rts_frame; durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec[0], rspec[0], rts_preamble_type[0], preamble_type[0], phylen, false); rts->duration = cpu_to_le16(durid); /* fallback rate version of RTS DUR field */ durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec[1], rspec[1], rts_preamble_type[1], preamble_type[1], phylen, false); txh->RTSDurFallback = cpu_to_le16(durid); if (use_cts) { rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); memcpy(&rts->ra, &h->addr2, ETH_ALEN); } else { rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN); } /* mainrate * low 8 bits: main frag rate/mcs, * high 8 bits: rts/cts rate/mcs */ mainrates |= (is_ofdm_rate(rts_rspec[0]) ? D11A_PHY_HDR_GRATE( (struct ofdm_phy_hdr *) rts_plcp) : rts_plcp[0]) << 8; } else { memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN); memset((char *)&txh->rts_frame, 0, sizeof(struct ieee80211_rts)); memset((char *)txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback)); txh->RTSDurFallback = 0; } #ifdef SUPPORT_40MHZ /* add null delimiter count */ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && is_mcs_rate(rspec)) txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = brcm_c_ampdu_null_delim_cnt(wlc->ampdu, scb, rspec, phylen); #endif /* * Now that RTS/RTS FB preamble types are updated, write * the final value */ txh->MacTxControlHigh = cpu_to_le16(mch); /* * MainRates (both the rts and frag plcp rates have * been calculated now) */ txh->MainRates = cpu_to_le16(mainrates); /* XtraFrameTypes */ xfts = frametype(rspec[1], wlc->mimoft); xfts |= (frametype(rts_rspec[0], wlc->mimoft) << XFTS_RTS_FT_SHIFT); xfts |= (frametype(rts_rspec[1], wlc->mimoft) << XFTS_FBRRTS_FT_SHIFT); xfts |= CHSPEC_CHANNEL(wlc_phy_chanspec_get(wlc->band->pi)) << XFTS_CHANNEL_SHIFT; txh->XtraFrameTypes = cpu_to_le16(xfts); /* PhyTxControlWord */ phyctl = frametype(rspec[0], wlc->mimoft); if ((preamble_type[0] == BRCMS_SHORT_PREAMBLE) || (preamble_type[0] == BRCMS_GF_PREAMBLE)) { if (rspec2rate(rspec[0]) != BRCM_RATE_1M) phyctl |= PHY_TXC_SHORT_HDR; } /* phytxant is properly bit shifted */ phyctl |= brcms_c_stf_d11hdrs_phyctl_txant(wlc, rspec[0]); txh->PhyTxControlWord = cpu_to_le16(phyctl); /* PhyTxControlWord_1 */ if (BRCMS_PHY_11N_CAP(wlc->band)) { u16 phyctl1 = 0; phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[0]); txh->PhyTxControlWord_1 = cpu_to_le16(phyctl1); phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[1]); txh->PhyTxControlWord_1_Fbr = cpu_to_le16(phyctl1); if (use_rts || use_cts) { phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[0]); txh->PhyTxControlWord_1_Rts = cpu_to_le16(phyctl1); phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[1]); txh->PhyTxControlWord_1_FbrRts = cpu_to_le16(phyctl1); } /* * For mcs frames, if mixedmode(overloaded with long preamble) * is going to be set, fill in non-zero MModeLen and/or * MModeFbrLen it will be unnecessary if they are separated */ if (is_mcs_rate(rspec[0]) && (preamble_type[0] == BRCMS_MM_PREAMBLE)) { u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec[0], phylen); txh->MModeLen = cpu_to_le16(mmodelen); } if (is_mcs_rate(rspec[1]) && (preamble_type[1] == BRCMS_MM_PREAMBLE)) { u16 mmodefbrlen = brcms_c_calc_lsig_len(wlc, rspec[1], phylen); txh->MModeFbrLen = cpu_to_le16(mmodefbrlen); } } ac = skb_get_queue_mapping(p); if ((scb->flags & SCB_WMECAP) && qos && wlc->edcf_txop[ac]) { uint frag_dur, dur, dur_fallback; /* WME: Update TXOP threshold */ if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) && frag == 0) { frag_dur = brcms_c_calc_frame_time(wlc, rspec[0], preamble_type[0], phylen); if (rts) { /* 1 RTS or CTS-to-self frame */ dur = brcms_c_calc_cts_time(wlc, rts_rspec[0], rts_preamble_type[0]); dur_fallback = brcms_c_calc_cts_time(wlc, rts_rspec[1], rts_preamble_type[1]); /* (SIFS + CTS) + SIFS + frame + SIFS + ACK */ dur += le16_to_cpu(rts->duration); dur_fallback += le16_to_cpu(txh->RTSDurFallback); } else if (use_rifs) { dur = frag_dur; dur_fallback = 0; } else { /* frame + SIFS + ACK */ dur = frag_dur; dur += brcms_c_compute_frame_dur(wlc, rspec[0], preamble_type[0], 0); dur_fallback = brcms_c_calc_frame_time(wlc, rspec[1], preamble_type[1], phylen); dur_fallback += brcms_c_compute_frame_dur(wlc, rspec[1], preamble_type[1], 0); } /* NEED to set TxFesTimeNormal (hard) */ txh->TxFesTimeNormal = cpu_to_le16((u16) dur); /* * NEED to set fallback rate version of * TxFesTimeNormal (hard) */ txh->TxFesTimeFallback = cpu_to_le16((u16) dur_fallback); /* * update txop byte threshold (txop minus intraframe * overhead) */ if (wlc->edcf_txop[ac] >= (dur - frag_dur)) { uint newfragthresh; newfragthresh = brcms_c_calc_frame_len(wlc, rspec[0], preamble_type[0], (wlc->edcf_txop[ac] - (dur - frag_dur))); /* range bound the fragthreshold */ if (newfragthresh < DOT11_MIN_FRAG_LEN) newfragthresh = DOT11_MIN_FRAG_LEN; else if (newfragthresh > wlc->usr_fragthresh) newfragthresh = wlc->usr_fragthresh; /* update the fragthresh and do txc update */ if (wlc->fragthresh[queue] != (u16) newfragthresh) wlc->fragthresh[queue] = (u16) newfragthresh; } else { wiphy_err(wlc->wiphy, "wl%d: %s txop invalid " "for rate %d\n", wlc->pub->unit, fifo_names[queue], rspec2rate(rspec[0])); } if (dur > wlc->edcf_txop[ac]) wiphy_err(wlc->wiphy, "wl%d: %s: %s txop " "exceeded phylen %d/%d dur %d/%d\n", wlc->pub->unit, __func__, fifo_names[queue], phylen, wlc->fragthresh[queue], dur, wlc->edcf_txop[ac]); } } return 0; } void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu, struct ieee80211_hw *hw) { u8 prio; uint fifo; struct scb *scb = &wlc->pri_scb; struct ieee80211_hdr *d11_header = (struct ieee80211_hdr *)(sdu->data); /* * 802.11 standard requires management traffic * to go at highest priority */ prio = ieee80211_is_data(d11_header->frame_control) ? sdu->priority : MAXPRIO; fifo = prio2fifo[prio]; if (brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0)) return; brcms_c_txq_enq(wlc, scb, sdu, BRCMS_PRIO_TO_PREC(prio)); brcms_c_send_q(wlc); } void brcms_c_send_q(struct brcms_c_info *wlc) { struct sk_buff *pkt[DOT11_MAXNUMFRAGS]; int prec; u16 prec_map; int err = 0, i, count; uint fifo; struct brcms_txq_info *qi = wlc->pkt_queue; struct pktq *q = &qi->q; struct ieee80211_tx_info *tx_info; prec_map = wlc->tx_prec_map; /* Send all the enq'd pkts that we can. * Dequeue packets with precedence with empty HW fifo only */ while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) { tx_info = IEEE80211_SKB_CB(pkt[0]); if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec); } else { count = 1; err = brcms_c_prep_pdu(wlc, pkt[0], &fifo); if (!err) { for (i = 0; i < count; i++) brcms_c_txfifo(wlc, fifo, pkt[i], true, 1); } } if (err == -EBUSY) { brcmu_pktq_penq_head(q, prec, pkt[0]); /* * If send failed due to any other reason than a * change in HW FIFO condition, quit. Otherwise, * read the new prec_map! */ if (prec_map == wlc->tx_prec_map) break; prec_map = wlc->tx_prec_map; } } } void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p, bool commit, s8 txpktpend) { u16 frameid = INVALIDFID; struct d11txh *txh; txh = (struct d11txh *) (p->data); /* When a BC/MC frame is being committed to the BCMC fifo * via DMA (NOT PIO), update ucode or BSS info as appropriate. */ if (fifo == TX_BCMC_FIFO) frameid = le16_to_cpu(txh->TxFrameID); /* * Bump up pending count for if not using rpc. If rpc is * used, this will be handled in brcms_b_txfifo() */ if (commit) { wlc->core->txpktpend[fifo] += txpktpend; BCMMSG(wlc->wiphy, "pktpend inc %d to %d\n", txpktpend, wlc->core->txpktpend[fifo]); } /* Commit BCMC sequence number in the SHM frame ID location */ if (frameid != INVALIDFID) { /* * To inform the ucode of the last mcast frame posted * so that it can clear moredata bit */ brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid); } if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0) wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n"); } u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec, bool use_rspec, u16 mimo_ctlchbw) { u32 rts_rspec = 0; if (use_rspec) /* use frame rate as rts rate */ rts_rspec = rspec; else if (wlc->band->gmode && wlc->protection->_g && !is_cck_rate(rspec)) /* Use 11Mbps as the g protection RTS target rate and fallback. * Use the brcms_basic_rate() lookup to find the best basic rate * under the target in case 11 Mbps is not Basic. * 6 and 9 Mbps are not usually selected by rate selection, but * even if the OFDM rate we are protecting is 6 or 9 Mbps, 11 * is more robust. */ rts_rspec = brcms_basic_rate(wlc, BRCM_RATE_11M); else /* calculate RTS rate and fallback rate based on the frame rate * RTS must be sent at a basic rate since it is a * control frame, sec 9.6 of 802.11 spec */ rts_rspec = brcms_basic_rate(wlc, rspec); if (BRCMS_PHY_11N_CAP(wlc->band)) { /* set rts txbw to correct side band */ rts_rspec &= ~RSPEC_BW_MASK; /* * if rspec/rspec_fallback is 40MHz, then send RTS on both * 20MHz channel (DUP), otherwise send RTS on control channel */ if (rspec_is40mhz(rspec) && !is_cck_rate(rts_rspec)) rts_rspec |= (PHY_TXC1_BW_40MHZ_DUP << RSPEC_BW_SHIFT); else rts_rspec |= (mimo_ctlchbw << RSPEC_BW_SHIFT); /* pick siso/cdd as default for ofdm */ if (is_ofdm_rate(rts_rspec)) { rts_rspec &= ~RSPEC_STF_MASK; rts_rspec |= (wlc->stf->ss_opmode << RSPEC_STF_SHIFT); } } return rts_rspec; } void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend) { wlc->core->txpktpend[fifo] -= txpktpend; BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend, wlc->core->txpktpend[fifo]); /* There is more room; mark precedences related to this FIFO sendable */ wlc->tx_prec_map |= wlc->fifo2prec_map[fifo]; /* figure out which bsscfg is being worked on... */ } /* Update beacon listen interval in shared memory */ static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc) { /* wake up every DTIM is the default */ if (wlc->bcn_li_dtim == 1) brcms_b_write_shm(wlc->hw, M_BCN_LI, 0); else brcms_b_write_shm(wlc->hw, M_BCN_LI, (wlc->bcn_li_dtim << 8) | wlc->bcn_li_bcn); } static void brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr, u32 *tsf_h_ptr) { struct bcma_device *core = wlc_hw->d11core; /* read the tsf timer low, then high to get an atomic read */ *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow)); *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh)); } /* * recover 64bit TSF value from the 16bit TSF value in the rx header * given the assumption that the TSF passed in header is within 65ms * of the current tsf. * * 6 5 4 4 3 2 1 * 3.......6.......8.......0.......2.......4.......6.......8......0 * |<---------- tsf_h ----------->||<--- tsf_l -->||<-RxTSFTime ->| * * The RxTSFTime are the lowest 16 bits and provided by the ucode. The * tsf_l is filled in by brcms_b_recv, which is done earlier in the * receive call sequence after rx interrupt. Only the higher 16 bits * are used. Finally, the tsf_h is read from the tsf register. */ static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc, struct d11rxhdr *rxh) { u32 tsf_h, tsf_l; u16 rx_tsf_0_15, rx_tsf_16_31; brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h); rx_tsf_16_31 = (u16)(tsf_l >> 16); rx_tsf_0_15 = rxh->RxTSFTime; /* * a greater tsf time indicates the low 16 bits of * tsf_l wrapped, so decrement the high 16 bits. */ if ((u16)tsf_l < rx_tsf_0_15) { rx_tsf_16_31 -= 1; if (rx_tsf_16_31 == 0xffff) tsf_h -= 1; } return ((u64)tsf_h << 32) | (((u32)rx_tsf_16_31 << 16) + rx_tsf_0_15); } static void prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, struct sk_buff *p, struct ieee80211_rx_status *rx_status) { int preamble; int channel; u32 rspec; unsigned char *plcp; /* fill in TSF and flag its presence */ rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh); rx_status->flag |= RX_FLAG_MACTIME_MPDU; channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); if (channel > 14) { rx_status->band = IEEE80211_BAND_5GHZ; rx_status->freq = ieee80211_ofdm_chan_to_freq( WF_CHAN_FACTOR_5_G/2, channel); } else { rx_status->band = IEEE80211_BAND_2GHZ; rx_status->freq = ieee80211_dsss_chan_to_freq(channel); } rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh); /* noise */ /* qual */ rx_status->antenna = (rxh->PhyRxStatus_0 & PRXS0_RXANT_UPSUBBAND) ? 1 : 0; plcp = p->data; rspec = brcms_c_compute_rspec(rxh, plcp); if (is_mcs_rate(rspec)) { rx_status->rate_idx = rspec & RSPEC_RATE_MASK; rx_status->flag |= RX_FLAG_HT; if (rspec_is40mhz(rspec)) rx_status->flag |= RX_FLAG_40MHZ; } else { switch (rspec2rate(rspec)) { case BRCM_RATE_1M: rx_status->rate_idx = 0; break; case BRCM_RATE_2M: rx_status->rate_idx = 1; break; case BRCM_RATE_5M5: rx_status->rate_idx = 2; break; case BRCM_RATE_11M: rx_status->rate_idx = 3; break; case BRCM_RATE_6M: rx_status->rate_idx = 4; break; case BRCM_RATE_9M: rx_status->rate_idx = 5; break; case BRCM_RATE_12M: rx_status->rate_idx = 6; break; case BRCM_RATE_18M: rx_status->rate_idx = 7; break; case BRCM_RATE_24M: rx_status->rate_idx = 8; break; case BRCM_RATE_36M: rx_status->rate_idx = 9; break; case BRCM_RATE_48M: rx_status->rate_idx = 10; break; case BRCM_RATE_54M: rx_status->rate_idx = 11; break; default: wiphy_err(wlc->wiphy, "%s: Unknown rate\n", __func__); } /* * For 5GHz, we should decrease the index as it is * a subset of the 2.4G rates. See bitrates field * of brcms_band_5GHz_nphy (in mac80211_if.c). */ if (rx_status->band == IEEE80211_BAND_5GHZ) rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET; /* Determine short preamble and rate_idx */ preamble = 0; if (is_cck_rate(rspec)) { if (rxh->PhyRxStatus_0 & PRXS0_SHORTH) rx_status->flag |= RX_FLAG_SHORTPRE; } else if (is_ofdm_rate(rspec)) { rx_status->flag |= RX_FLAG_SHORTPRE; } else { wiphy_err(wlc->wiphy, "%s: Unknown modulation\n", __func__); } } if (plcp3_issgi(plcp[3])) rx_status->flag |= RX_FLAG_SHORT_GI; if (rxh->RxStatus1 & RXS_DECERR) { rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_PLCP_CRC\n", __func__); } if (rxh->RxStatus1 & RXS_FCSERR) { rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_FCS_CRC\n", __func__); } } static void brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh, struct sk_buff *p) { int len_mpdu; struct ieee80211_rx_status rx_status; struct ieee80211_hdr *hdr; memset(&rx_status, 0, sizeof(rx_status)); prep_mac80211_status(wlc, rxh, p, &rx_status); /* mac header+body length, exclude CRC and plcp header */ len_mpdu = p->len - D11_PHY_HDR_LEN - FCS_LEN; skb_pull(p, D11_PHY_HDR_LEN); __skb_trim(p, len_mpdu); /* unmute transmit */ if (wlc->hw->suspended_fifos) { hdr = (struct ieee80211_hdr *)p->data; if (ieee80211_is_beacon(hdr->frame_control)) brcms_b_mute(wlc->hw, false); } memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); } /* calculate frame duration for Mixed-mode L-SIG spoofing, return * number of bytes goes in the length field * * Formula given by HT PHY Spec v 1.13 * len = 3(nsyms + nstream + 3) - 3 */ u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len) { uint nsyms, len = 0, kNdps; BCMMSG(wlc->wiphy, "wl%d: rate %d, len%d\n", wlc->pub->unit, rspec2rate(ratespec), mac_len); if (is_mcs_rate(ratespec)) { uint mcs = ratespec & RSPEC_RATE_MASK; int tot_streams = (mcs_2_txstreams(mcs) + 1) + rspec_stc(ratespec); /* * the payload duration calculation matches that * of regular ofdm */ /* 1000Ndbps = kbps * 4 */ kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec), rspec_issgi(ratespec)) * 4; if (rspec_stc(ratespec) == 0) nsyms = CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS) * 1000, kNdps); else /* STBC needs to have even number of symbols */ nsyms = 2 * CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS) * 1000, 2 * kNdps); /* (+3) account for HT-SIG(2) and HT-STF(1) */ nsyms += (tot_streams + 3); /* * 3 bytes/symbol @ legacy 6Mbps rate * (-3) excluding service bits and tail bits */ len = (3 * nsyms) - 3; } return (u16) len; } static void brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc, uint frame_len) { const struct brcms_c_rateset *rs_dflt; struct brcms_c_rateset rs; u8 rate; u16 entry_ptr; u8 plcp[D11_PHY_HDR_LEN]; u16 dur, sifs; uint i; sifs = get_sifs(wlc->band); rs_dflt = brcms_c_rateset_get_hwrs(wlc); brcms_c_rateset_copy(rs_dflt, &rs); brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams); /* * walk the phy rate table and update MAC core SHM * basic rate table entries */ for (i = 0; i < rs.count; i++) { rate = rs.rates[i] & BRCMS_RATE_MASK; entry_ptr = brcms_b_rate_shm_offset(wlc->hw, rate); /* Calculate the Probe Response PLCP for the given rate */ brcms_c_compute_plcp(wlc, rate, frame_len, plcp); /* * Calculate the duration of the Probe Response * frame plus SIFS for the MAC */ dur = (u16) brcms_c_calc_frame_time(wlc, rate, BRCMS_LONG_PREAMBLE, frame_len); dur += sifs; /* Update the SHM Rate Table entry Probe Response values */ brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_PLCP_POS, (u16) (plcp[0] + (plcp[1] << 8))); brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_PLCP_POS + 2, (u16) (plcp[2] + (plcp[3] << 8))); brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_DUR_POS, dur); } } /* Max buffering needed for beacon template/prb resp template is 142 bytes. * * PLCP header is 6 bytes. * 802.11 A3 header is 24 bytes. * Max beacon frame body template length is 112 bytes. * Max probe resp frame body template length is 110 bytes. * * *len on input contains the max length of the packet available. * * The *len value is set to the number of bytes in buf used, and starts * with the PLCP and included up to, but not including, the 4 byte FCS. */ static void brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type, u32 bcn_rspec, struct brcms_bss_cfg *cfg, u16 *buf, int *len) { static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255}; struct cck_phy_hdr *plcp; struct ieee80211_mgmt *h; int hdr_len, body_len; hdr_len = D11_PHY_HDR_LEN + DOT11_MAC_HDR_LEN; /* calc buffer size provided for frame body */ body_len = *len - hdr_len; /* return actual size */ *len = hdr_len + body_len; /* format PHY and MAC headers */ memset((char *)buf, 0, hdr_len); plcp = (struct cck_phy_hdr *) buf; /* * PLCP for Probe Response frames are filled in from * core's rate table */ if (type == IEEE80211_STYPE_BEACON) /* fill in PLCP */ brcms_c_compute_plcp(wlc, bcn_rspec, (DOT11_MAC_HDR_LEN + body_len + FCS_LEN), (u8 *) plcp); /* "Regular" and 16 MBSS but not for 4 MBSS */ /* Update the phytxctl for the beacon based on the rspec */ brcms_c_beacon_phytxctl_txant_upd(wlc, bcn_rspec); h = (struct ieee80211_mgmt *)&plcp[1]; /* fill in 802.11 header */ h->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | type); /* DUR is 0 for multicast bcn, or filled in by MAC for prb resp */ /* A1 filled in by MAC for prb resp, broadcast for bcn */ if (type == IEEE80211_STYPE_BEACON) memcpy(&h->da, &ether_bcast, ETH_ALEN); memcpy(&h->sa, &cfg->cur_etheraddr, ETH_ALEN); memcpy(&h->bssid, &cfg->BSSID, ETH_ALEN); /* SEQ filled in by MAC */ } int brcms_c_get_header_len(void) { return TXOFF; } /* * Update all beacons for the system. */ void brcms_c_update_beacon(struct brcms_c_info *wlc) { struct brcms_bss_cfg *bsscfg = wlc->bsscfg; if (bsscfg->up && !bsscfg->BSS) /* Clear the soft intmask */ wlc->defmacintmask &= ~MI_BCNTPL; } /* Write ssid into shared memory */ static void brcms_c_shm_ssid_upd(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg) { u8 *ssidptr = cfg->SSID; u16 base = M_SSID; u8 ssidbuf[IEEE80211_MAX_SSID_LEN]; /* padding the ssid with zero and copy it into shm */ memset(ssidbuf, 0, IEEE80211_MAX_SSID_LEN); memcpy(ssidbuf, ssidptr, cfg->SSID_len); brcms_c_copyto_shm(wlc, base, ssidbuf, IEEE80211_MAX_SSID_LEN); brcms_b_write_shm(wlc->hw, M_SSIDLEN, (u16) cfg->SSID_len); } static void brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg, bool suspend) { u16 prb_resp[BCN_TMPL_LEN / 2]; int len = BCN_TMPL_LEN; /* * write the probe response to hardware, or save in * the config structure */ /* create the probe response template */ brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_PROBE_RESP, 0, cfg, prb_resp, &len); if (suspend) brcms_c_suspend_mac_and_wait(wlc); /* write the probe response into the template region */ brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE, (len + 3) & ~3, prb_resp); /* write the length of the probe response frame (+PLCP/-FCS) */ brcms_b_write_shm(wlc->hw, M_PRB_RESP_FRM_LEN, (u16) len); /* write the SSID and SSID length */ brcms_c_shm_ssid_upd(wlc, cfg); /* * Write PLCP headers and durations for probe response frames * at all rates. Use the actual frame length covered by the * PLCP header for the call to brcms_c_mod_prb_rsp_rate_table() * by subtracting the PLCP len and adding the FCS. */ len += (-D11_PHY_HDR_LEN + FCS_LEN); brcms_c_mod_prb_rsp_rate_table(wlc, (u16) len); if (suspend) brcms_c_enable_mac(wlc); } void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend) { struct brcms_bss_cfg *bsscfg = wlc->bsscfg; /* update AP or IBSS probe responses */ if (bsscfg->up && !bsscfg->BSS) brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend); } /* prepares pdu for transmission. returns BCM error codes */ int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop) { uint fifo; struct d11txh *txh; struct ieee80211_hdr *h; struct scb *scb; txh = (struct d11txh *) (pdu->data); h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN); /* get the pkt queue info. This was put at brcms_c_sendctl or * brcms_c_send for PDU */ fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK; scb = NULL; *fifop = fifo; /* return if insufficient dma resources */ if (*wlc->core->txavail[fifo] < MAX_DMA_SEGS) { /* Mark precedences related to this FIFO, unsendable */ /* A fifo is full. Clear precedences related to that FIFO */ wlc->tx_prec_map &= ~(wlc->fifo2prec_map[fifo]); return -EBUSY; } return 0; } int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo, uint *blocks) { if (fifo >= NFIFO) return -EINVAL; *blocks = wlc_hw->xmtfifo_sz[fifo]; return 0; } void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset, const u8 *addr) { brcms_b_set_addrmatch(wlc->hw, match_reg_offset, addr); if (match_reg_offset == RCM_BSSID_OFFSET) memcpy(wlc->bsscfg->BSSID, addr, ETH_ALEN); } /* * Flag 'scan in progress' to withhold dynamic phy calibration */ void brcms_c_scan_start(struct brcms_c_info *wlc) { wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true); } void brcms_c_scan_stop(struct brcms_c_info *wlc) { wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false); } void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state) { wlc->pub->associated = state; wlc->bsscfg->associated = state; } /* * When a remote STA/AP is removed by Mac80211, or when it can no longer accept * AMPDU traffic, packets pending in hardware have to be invalidated so that * when later on hardware releases them, they can be handled appropriately. */ void brcms_c_inval_dma_pkts(struct brcms_hardware *hw, struct ieee80211_sta *sta, void (*dma_callback_fn)) { struct dma_pub *dmah; int i; for (i = 0; i < NFIFO; i++) { dmah = hw->di[i]; if (dmah != NULL) dma_walk_packets(dmah, dma_callback_fn, sta); } } int brcms_c_get_curband(struct brcms_c_info *wlc) { return wlc->band->bandunit; } void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) { int timeout = 20; /* flush packet queue when requested */ if (drop) brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL); /* wait for queue and DMA fifos to run dry */ while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) { brcms_msleep(wlc->wl, 1); if (--timeout == 0) break; } WARN_ON_ONCE(timeout == 0); } void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) { wlc->bcn_li_bcn = interval; if (wlc->pub->up) brcms_c_bcn_li_upd(wlc); } int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr) { uint qdbm; /* Remove override bit and clip to max qdbm value */ qdbm = min_t(uint, txpwr * BRCMS_TXPWR_DB_FACTOR, 0xff); return wlc_phy_txpower_set(wlc->band->pi, qdbm, false); } int brcms_c_get_tx_power(struct brcms_c_info *wlc) { uint qdbm; bool override; wlc_phy_txpower_get(wlc->band->pi, &qdbm, &override); /* Return qdbm units */ return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR); } /* Process received frames */ /* * Return true if more frames need to be processed. false otherwise. * Param 'bound' indicates max. # frames to process before break out. */ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p) { struct d11rxhdr *rxh; struct ieee80211_hdr *h; uint len; bool is_amsdu; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); /* frame starts with rxhdr */ rxh = (struct d11rxhdr *) (p->data); /* strip off rxhdr */ skb_pull(p, BRCMS_HWRXOFF); /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */ if (rxh->RxStatus1 & RXS_PBPRES) { if (p->len < 2) { wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of " "len %d\n", wlc->pub->unit, p->len); goto toss; } skb_pull(p, 2); } h = (struct ieee80211_hdr *)(p->data + D11_PHY_HDR_LEN); len = p->len; if (rxh->RxStatus1 & RXS_FCSERR) { if (!(wlc->filter_flags & FIF_FCSFAIL)) goto toss; } /* check received pkt has at least frame control field */ if (len < D11_PHY_HDR_LEN + sizeof(h->frame_control)) goto toss; /* not supporting A-MSDU */ is_amsdu = rxh->RxStatus2 & RXS_AMSDU_MASK; if (is_amsdu) goto toss; brcms_c_recvctl(wlc, rxh, p); return; toss: brcmu_pkt_buf_free_skb(p); } /* Process received frames */ /* * Return true if more frames need to be processed. false otherwise. * Param 'bound' indicates max. # frames to process before break out. */ static bool brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) { struct sk_buff *p; struct sk_buff *next = NULL; struct sk_buff_head recv_frames; uint n = 0; uint bound_limit = bound ? RXBND : -1; BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); skb_queue_head_init(&recv_frames); /* gather received frames */ while (dma_rx(wlc_hw->di[fifo], &recv_frames)) { /* !give others some time to run! */ if (++n >= bound_limit) break; } /* post more rbufs */ dma_rxfill(wlc_hw->di[fifo]); /* process each frame */ skb_queue_walk_safe(&recv_frames, p, next) { struct d11rxhdr_le *rxh_le; struct d11rxhdr *rxh; skb_unlink(p, &recv_frames); rxh_le = (struct d11rxhdr_le *)p->data; rxh = (struct d11rxhdr *)p->data; /* fixup rx header endianness */ rxh->RxFrameSize = le16_to_cpu(rxh_le->RxFrameSize); rxh->PhyRxStatus_0 = le16_to_cpu(rxh_le->PhyRxStatus_0); rxh->PhyRxStatus_1 = le16_to_cpu(rxh_le->PhyRxStatus_1); rxh->PhyRxStatus_2 = le16_to_cpu(rxh_le->PhyRxStatus_2); rxh->PhyRxStatus_3 = le16_to_cpu(rxh_le->PhyRxStatus_3); rxh->PhyRxStatus_4 = le16_to_cpu(rxh_le->PhyRxStatus_4); rxh->PhyRxStatus_5 = le16_to_cpu(rxh_le->PhyRxStatus_5); rxh->RxStatus1 = le16_to_cpu(rxh_le->RxStatus1); rxh->RxStatus2 = le16_to_cpu(rxh_le->RxStatus2); rxh->RxTSFTime = le16_to_cpu(rxh_le->RxTSFTime); rxh->RxChan = le16_to_cpu(rxh_le->RxChan); brcms_c_recv(wlc_hw->wlc, p); } return n >= bound_limit; } /* second-level interrupt processing * Return true if another dpc needs to be re-scheduled. false otherwise. * Param 'bounded' indicates if applicable loops should be bounded. */ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded) { u32 macintstatus; struct brcms_hardware *wlc_hw = wlc->hw; struct bcma_device *core = wlc_hw->d11core; struct wiphy *wiphy = wlc->wiphy; if (brcms_deviceremoved(wlc)) { wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); brcms_down(wlc->wl); return false; } /* grab and clear the saved software intstatus bits */ macintstatus = wlc->macintstatus; wlc->macintstatus = 0; BCMMSG(wlc->wiphy, "wl%d: macintstatus 0x%x\n", wlc_hw->unit, macintstatus); WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */ /* tx status */ if (macintstatus & MI_TFS) { bool fatal; if (brcms_b_txstatus(wlc->hw, bounded, &fatal)) wlc->macintstatus |= MI_TFS; if (fatal) { wiphy_err(wiphy, "MI_TFS: fatal\n"); goto fatal; } } if (macintstatus & (MI_TBTT | MI_DTIM_TBTT)) brcms_c_tbtt(wlc); /* ATIM window end */ if (macintstatus & MI_ATIMWINEND) { BCMMSG(wlc->wiphy, "end of ATIM window\n"); bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid); wlc->qvalid = 0; } /* * received data or control frame, MI_DMAINT is * indication of RX_FIFO interrupt */ if (macintstatus & MI_DMAINT) if (brcms_b_recv(wlc_hw, RX_FIFO, bounded)) wlc->macintstatus |= MI_DMAINT; /* noise sample collected */ if (macintstatus & MI_BG_NOISE) wlc_phy_noise_sample_intr(wlc_hw->band->pi); if (macintstatus & MI_GP0) { wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d " "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n", __func__, ai_get_chip_id(wlc_hw->sih), ai_get_chiprev(wlc_hw->sih)); brcms_fatal_error(wlc_hw->wlc->wl); } /* gptimer timeout */ if (macintstatus & MI_TO) bcma_write32(core, D11REGOFFS(gptimer), 0); if (macintstatus & MI_RFDISABLE) { BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the" " RF Disable Input\n", wlc_hw->unit); brcms_rfkill_set_hw_state(wlc->wl); } /* send any enq'd tx packets. Just makes sure to jump start tx */ if (!pktq_empty(&wlc->pkt_queue->q)) brcms_c_send_q(wlc); /* it isn't done and needs to be resched if macintstatus is non-zero */ return wlc->macintstatus != 0; fatal: brcms_fatal_error(wlc_hw->wlc->wl); return wlc->macintstatus != 0; } void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) { struct bcma_device *core = wlc->hw->d11core; u16 chanspec; BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); /* * This will happen if a big-hammer was executed. In * that case, we want to go back to the channel that * we were on and not new channel */ if (wlc->pub->associated) chanspec = wlc->home_chanspec; else chanspec = brcms_c_init_chanspec(wlc); brcms_b_init(wlc->hw, chanspec); /* update beacon listen interval */ brcms_c_bcn_li_upd(wlc); /* write ethernet address to core */ brcms_c_set_mac(wlc->bsscfg); brcms_c_set_bssid(wlc->bsscfg); /* Update tsf_cfprep if associated and up */ if (wlc->pub->associated && wlc->bsscfg->up) { u32 bi; /* get beacon period and convert to uS */ bi = wlc->bsscfg->current_bss->beacon_period << 10; /* * update since init path would reset * to default value */ bcma_write32(core, D11REGOFFS(tsf_cfprep), bi << CFPREP_CBI_SHIFT); /* Update maccontrol PM related bits */ brcms_c_set_ps_ctrl(wlc); } brcms_c_bandinit_ordered(wlc, chanspec); /* init probe response timeout */ brcms_b_write_shm(wlc->hw, M_PRS_MAXTIME, wlc->prb_resp_timeout); /* init max burst txop (framebursting) */ brcms_b_write_shm(wlc->hw, M_MBURST_TXOP, (wlc-> _rifs ? (EDCF_AC_VO_TXOP_AP << 5) : MAXFRAMEBURST_TXOP)); /* initialize maximum allowed duty cycle */ brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_ofdm, true, true); brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_cck, false, true); /* * Update some shared memory locations related to * max AMPDU size allowed to received */ brcms_c_ampdu_shm_upd(wlc->ampdu); /* band-specific inits */ brcms_c_bsinit(wlc); /* Enable EDCF mode (while the MAC is suspended) */ bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF); brcms_c_edcf_setparams(wlc, false); /* Init precedence maps for empty FIFOs */ brcms_c_tx_prec_map_init(wlc); /* read the ucode version if we have not yet done so */ if (wlc->ucode_rev == 0) { wlc->ucode_rev = brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR) << NBITS(u16); wlc->ucode_rev |= brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR); } /* ..now really unleash hell (allow the MAC out of suspend) */ brcms_c_enable_mac(wlc); /* suspend the tx fifos and mute the phy for preism cac time */ if (mute_tx) brcms_b_mute(wlc->hw, true); /* clear tx flow control */ brcms_c_txflowcontrol_reset(wlc); /* enable the RF Disable Delay timer */ bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT); /* * Initialize WME parameters; if they haven't been set by some other * mechanism (IOVar, etc) then read them from the hardware. */ if (GFIELD(wlc->wme_retries[0], EDCF_SHORT) == 0) { /* Uninitialized; read from HW */ int ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) wlc->wme_retries[ac] = brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac)); } } /* * The common driver entry routine. Error codes should be unique */ struct brcms_c_info * brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit, bool piomode, uint *perr) { struct brcms_c_info *wlc; uint err = 0; uint i, j; struct brcms_pub *pub; /* allocate struct brcms_c_info state and its substructures */ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0); if (wlc == NULL) goto fail; wlc->wiphy = wl->wiphy; pub = wlc->pub; #if defined(DEBUG) wlc_info_dbg = wlc; #endif wlc->band = wlc->bandstate[0]; wlc->core = wlc->corestate; wlc->wl = wl; pub->unit = unit; pub->_piomode = piomode; wlc->bandinit_pending = false; /* populate struct brcms_c_info with default values */ brcms_c_info_init(wlc, unit); /* update sta/ap related parameters */ brcms_c_ap_upd(wlc); /* * low level attach steps(all hw accesses go * inside, no more in rest of the attach) */ err = brcms_b_attach(wlc, core, unit, piomode); if (err) goto fail; brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, OFF); pub->phy_11ncapable = BRCMS_PHY_11N_CAP(wlc->band); /* disable allowed duty cycle */ wlc->tx_duty_cycle_ofdm = 0; wlc->tx_duty_cycle_cck = 0; brcms_c_stf_phy_chain_calc(wlc); /* txchain 1: txant 0, txchain 2: txant 1 */ if (BRCMS_ISNPHY(wlc->band) && (wlc->stf->txstreams == 1)) wlc->stf->txant = wlc->stf->hw_txchain - 1; /* push to BMAC driver */ wlc_phy_stf_chain_init(wlc->band->pi, wlc->stf->hw_txchain, wlc->stf->hw_rxchain); /* pull up some info resulting from the low attach */ for (i = 0; i < NFIFO; i++) wlc->core->txavail[i] = wlc->hw->txavail[i]; memcpy(&wlc->perm_etheraddr, &wlc->hw->etheraddr, ETH_ALEN); memcpy(&pub->cur_etheraddr, &wlc->hw->etheraddr, ETH_ALEN); for (j = 0; j < wlc->pub->_nbands; j++) { wlc->band = wlc->bandstate[j]; if (!brcms_c_attach_stf_ant_init(wlc)) { err = 24; goto fail; } /* default contention windows size limits */ wlc->band->CWmin = APHY_CWMIN; wlc->band->CWmax = PHY_CWMAX; /* init gmode value */ if (wlc->band->bandtype == BRCM_BAND_2G) { wlc->band->gmode = GMODE_AUTO; brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, wlc->band->gmode); } /* init _n_enab supported mode */ if (BRCMS_PHY_11N_CAP(wlc->band)) { pub->_n_enab = SUPPORT_11N; brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER, ((pub->_n_enab == SUPPORT_11N) ? WL_11N_2x2 : WL_11N_3x3)); } /* init per-band default rateset, depend on band->gmode */ brcms_default_rateset(wlc, &wlc->band->defrateset); /* fill in hw_rateset */ brcms_c_rateset_filter(&wlc->band->defrateset, &wlc->band->hw_rateset, false, BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK, (bool) (wlc->pub->_n_enab & SUPPORT_11N)); } /* * update antenna config due to * wlc->stf->txant/txchain/ant_rx_ovr change */ brcms_c_stf_phy_txant_upd(wlc); /* attach each modules */ err = brcms_c_attach_module(wlc); if (err != 0) goto fail; if (!brcms_c_timers_init(wlc, unit)) { wiphy_err(wl->wiphy, "wl%d: %s: init_timer failed\n", unit, __func__); err = 32; goto fail; } /* depend on rateset, gmode */ wlc->cmi = brcms_c_channel_mgr_attach(wlc); if (!wlc->cmi) { wiphy_err(wl->wiphy, "wl%d: %s: channel_mgr_attach failed" "\n", unit, __func__); err = 33; goto fail; } /* init default when all parameters are ready, i.e. ->rateset */ brcms_c_bss_default_init(wlc); /* * Complete the wlc default state initializations.. */ /* allocate our initial queue */ wlc->pkt_queue = brcms_c_txq_alloc(wlc); if (wlc->pkt_queue == NULL) { wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n", unit, __func__); err = 100; goto fail; } wlc->bsscfg->wlc = wlc; wlc->mimoft = FT_HT; wlc->mimo_40txbw = AUTO; wlc->ofdm_40txbw = AUTO; wlc->cck_40txbw = AUTO; brcms_c_update_mimo_band_bwcap(wlc, BRCMS_N_BW_20IN2G_40IN5G); /* Set default values of SGI */ if (BRCMS_SGI_CAP_PHY(wlc)) { brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 | BRCMS_N_SGI_40)); } else if (BRCMS_ISSSLPNPHY(wlc->band)) { brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 | BRCMS_N_SGI_40)); } else { brcms_c_ht_update_sgi_rx(wlc, 0); } brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail); if (perr) *perr = 0; return wlc; fail: wiphy_err(wl->wiphy, "wl%d: %s: failed with err %d\n", unit, __func__, err); if (wlc) brcms_c_detach(wlc); if (perr) *perr = err; return NULL; }
gpl-2.0
CaptainThrowback/kernel_htc_m8whl_3.30.654.2
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
5042
4626
/******************************************************************************* This is the driver for the MAC 10/100 on-chip Ethernet controller currently tested on all the ST boards based on STb7109 and stx7200 SoCs. DWC Ether MAC 10/100 Universal version 4.0 has been used for developing this code. This contains the functions to handle the dma. Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <asm/io.h> #include "dwmac100.h" #include "dwmac_dma.h" static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx) { u32 value = readl(ioaddr + DMA_BUS_MODE); int limit; /* DMA SW reset */ value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); limit = 10; while (limit--) { if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) break; mdelay(10); } if (limit < 0) return -EBUSY; /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), ioaddr + DMA_BUS_MODE); /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); /* The base address of the RX/TX descriptor lists must be written into * DMA CSR3 and CSR4, respectively. */ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); return 0; } /* Store and Forward capability is not used at all.. * The transmit threshold can be programmed by * setting the TTC bits in the DMA control register.*/ static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, int rxmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); if (txmode <= 32) csr6 |= DMA_CONTROL_TTC_32; else if (txmode <= 64) csr6 |= DMA_CONTROL_TTC_64; else csr6 |= DMA_CONTROL_TTC_128; writel(csr6, ioaddr + DMA_CONTROL); } static void dwmac100_dump_dma_regs(void __iomem *ioaddr) { int i; CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); for (i = 0; i < 9; i++) pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, (DMA_BUS_MODE + i * 4), readl(ioaddr + DMA_BUS_MODE + i * 4)); CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); } /* DMA controller has two counters to track the number of * the receive missed frames. */ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, void __iomem *ioaddr) { struct net_device_stats *stats = (struct net_device_stats *)data; u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); if (unlikely(csr8)) { if (csr8 & DMA_MISSED_FRAME_OVE) { stats->rx_over_errors += 0x800; x->rx_overflow_cntr += 0x800; } else { unsigned int ove_cntr; ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); stats->rx_over_errors += ove_cntr; x->rx_overflow_cntr += ove_cntr; } if (csr8 & DMA_MISSED_FRAME_OVE_M) { stats->rx_missed_errors += 0xffff; x->rx_missed_cntr += 0xffff; } else { unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); stats->rx_missed_errors += miss_f; x->rx_missed_cntr += miss_f; } } } const struct stmmac_dma_ops dwmac100_dma_ops = { .init = dwmac100_dma_init, .dump_regs = dwmac100_dump_dma_regs, .dma_mode = dwmac100_dma_operation_mode, .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq, .start_tx = dwmac_dma_start_tx, .stop_tx = dwmac_dma_stop_tx, .start_rx = dwmac_dma_start_rx, .stop_rx = dwmac_dma_stop_rx, .dma_interrupt = dwmac_dma_interrupt, };
gpl-2.0
garwynn/android_kernel_samsung_jflte
drivers/input/touchscreen/da9034-ts.c
5042
8860
/* * Touchscreen driver for Dialog Semiconductor DA9034 * * Copyright (C) 2006-2008 Marvell International Ltd. * Fengwei Yin <fengwei.yin@marvell.com> * Bin Yang <bin.yang@marvell.com> * Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/workqueue.h> #include <linux/mfd/da903x.h> #include <linux/slab.h> #define DA9034_MANUAL_CTRL 0x50 #define DA9034_LDO_ADC_EN (1 << 4) #define DA9034_AUTO_CTRL1 0x51 #define DA9034_AUTO_CTRL2 0x52 #define DA9034_AUTO_TSI_EN (1 << 3) #define DA9034_PEN_DETECT (1 << 4) #define DA9034_TSI_CTRL1 0x53 #define DA9034_TSI_CTRL2 0x54 #define DA9034_TSI_X_MSB 0x6c #define DA9034_TSI_Y_MSB 0x6d #define DA9034_TSI_XY_LSB 0x6e enum { STATE_IDLE, /* wait for pendown */ STATE_BUSY, /* TSI busy sampling */ STATE_STOP, /* sample available */ STATE_WAIT, /* Wait to start next sample */ }; enum { EVENT_PEN_DOWN, EVENT_PEN_UP, EVENT_TSI_READY, EVENT_TIMEDOUT, }; struct da9034_touch { struct device *da9034_dev; struct input_dev *input_dev; struct delayed_work tsi_work; struct notifier_block notifier; int state; int interval_ms; int x_inverted; int y_inverted; int last_x; int last_y; }; static inline int is_pen_down(struct da9034_touch *touch) { return da903x_query_status(touch->da9034_dev, DA9034_STATUS_PEN_DOWN); } static inline int detect_pen_down(struct da9034_touch *touch, int on) { if (on) return da903x_set_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_PEN_DETECT); else return da903x_clr_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_PEN_DETECT); } static int read_tsi(struct da9034_touch *touch) { uint8_t _x, _y, _v; int ret; ret = da903x_read(touch->da9034_dev, DA9034_TSI_X_MSB, &_x); if (ret) return ret; ret = da903x_read(touch->da9034_dev, DA9034_TSI_Y_MSB, &_y); if (ret) return ret; ret = da903x_read(touch->da9034_dev, DA9034_TSI_XY_LSB, &_v); if (ret) return ret; touch->last_x = ((_x << 2) & 0x3fc) | (_v & 0x3); touch->last_y = ((_y << 2) & 0x3fc) | ((_v & 0xc) >> 2); return 0; } static inline int start_tsi(struct da9034_touch *touch) { return da903x_set_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN); } static inline int stop_tsi(struct da9034_touch *touch) { return da903x_clr_bits(touch->da9034_dev, DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN); } static inline void report_pen_down(struct da9034_touch *touch) { int x = touch->last_x; int y = touch->last_y; x &= 0xfff; if (touch->x_inverted) x = 1024 - x; y &= 0xfff; if (touch->y_inverted) y = 1024 - y; input_report_abs(touch->input_dev, ABS_X, x); input_report_abs(touch->input_dev, ABS_Y, y); input_report_key(touch->input_dev, BTN_TOUCH, 1); input_sync(touch->input_dev); } static inline void report_pen_up(struct da9034_touch *touch) { input_report_key(touch->input_dev, BTN_TOUCH, 0); input_sync(touch->input_dev); } static void da9034_event_handler(struct da9034_touch *touch, int event) { int err; switch (touch->state) { case STATE_IDLE: if (event != EVENT_PEN_DOWN) break; /* Enable auto measurement of the TSI, this will * automatically disable pen down detection */ err = start_tsi(touch); if (err) goto err_reset; touch->state = STATE_BUSY; break; case STATE_BUSY: if (event != EVENT_TSI_READY) break; err = read_tsi(touch); if (err) goto err_reset; /* Disable auto measurement of the TSI, so that * pen down status will be available */ err = stop_tsi(touch); if (err) goto err_reset; touch->state = STATE_STOP; /* FIXME: PEN_{UP/DOWN} events are expected to be * available by stopping TSI, but this is found not * always true, delay and simulate such an event * here is more reliable */ mdelay(1); da9034_event_handler(touch, is_pen_down(touch) ? EVENT_PEN_DOWN : EVENT_PEN_UP); break; case STATE_STOP: if (event == EVENT_PEN_DOWN) { report_pen_down(touch); schedule_delayed_work(&touch->tsi_work, msecs_to_jiffies(touch->interval_ms)); touch->state = STATE_WAIT; } if (event == EVENT_PEN_UP) { report_pen_up(touch); touch->state = STATE_IDLE; } break; case STATE_WAIT: if (event != EVENT_TIMEDOUT) break; if (is_pen_down(touch)) { start_tsi(touch); touch->state = STATE_BUSY; } else { report_pen_up(touch); touch->state = STATE_IDLE; } break; } return; err_reset: touch->state = STATE_IDLE; stop_tsi(touch); detect_pen_down(touch, 1); } static void da9034_tsi_work(struct work_struct *work) { struct da9034_touch *touch = container_of(work, struct da9034_touch, tsi_work.work); da9034_event_handler(touch, EVENT_TIMEDOUT); } static int da9034_touch_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct da9034_touch *touch = container_of(nb, struct da9034_touch, notifier); if (event & DA9034_EVENT_TSI_READY) da9034_event_handler(touch, EVENT_TSI_READY); if ((event & DA9034_EVENT_PEN_DOWN) && touch->state == STATE_IDLE) da9034_event_handler(touch, EVENT_PEN_DOWN); return 0; } static int da9034_touch_open(struct input_dev *dev) { struct da9034_touch *touch = input_get_drvdata(dev); int ret; ret = da903x_register_notifier(touch->da9034_dev, &touch->notifier, DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY); if (ret) return -EBUSY; /* Enable ADC LDO */ ret = da903x_set_bits(touch->da9034_dev, DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN); if (ret) return ret; /* TSI_DELAY: 3 slots, TSI_SKIP: 3 slots */ ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL1, 0x1b); if (ret) return ret; ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL2, 0x00); if (ret) return ret; touch->state = STATE_IDLE; detect_pen_down(touch, 1); return 0; } static void da9034_touch_close(struct input_dev *dev) { struct da9034_touch *touch = input_get_drvdata(dev); da903x_unregister_notifier(touch->da9034_dev, &touch->notifier, DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY); cancel_delayed_work_sync(&touch->tsi_work); touch->state = STATE_IDLE; stop_tsi(touch); detect_pen_down(touch, 0); /* Disable ADC LDO */ da903x_clr_bits(touch->da9034_dev, DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN); } static int __devinit da9034_touch_probe(struct platform_device *pdev) { struct da9034_touch_pdata *pdata = pdev->dev.platform_data; struct da9034_touch *touch; struct input_dev *input_dev; int ret; touch = kzalloc(sizeof(struct da9034_touch), GFP_KERNEL); if (touch == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } touch->da9034_dev = pdev->dev.parent; if (pdata) { touch->interval_ms = pdata->interval_ms; touch->x_inverted = pdata->x_inverted; touch->y_inverted = pdata->y_inverted; } else /* fallback into default */ touch->interval_ms = 10; INIT_DELAYED_WORK(&touch->tsi_work, da9034_tsi_work); touch->notifier.notifier_call = da9034_touch_notifier; input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate input device\n"); ret = -ENOMEM; goto err_free_touch; } input_dev->name = pdev->name; input_dev->open = da9034_touch_open; input_dev->close = da9034_touch_close; input_dev->dev.parent = &pdev->dev; __set_bit(EV_ABS, input_dev->evbit); __set_bit(ABS_X, input_dev->absbit); __set_bit(ABS_Y, input_dev->absbit); input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_TOUCH, input_dev->keybit); touch->input_dev = input_dev; input_set_drvdata(input_dev, touch); ret = input_register_device(input_dev); if (ret) goto err_free_input; platform_set_drvdata(pdev, touch); return 0; err_free_input: input_free_device(input_dev); err_free_touch: kfree(touch); return ret; } static int __devexit da9034_touch_remove(struct platform_device *pdev) { struct da9034_touch *touch = platform_get_drvdata(pdev); input_unregister_device(touch->input_dev); kfree(touch); return 0; } static struct platform_driver da9034_touch_driver = { .driver = { .name = "da9034-touch", .owner = THIS_MODULE, }, .probe = da9034_touch_probe, .remove = __devexit_p(da9034_touch_remove), }; module_platform_driver(da9034_touch_driver); MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034"); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da9034-touch");
gpl-2.0
zombi-x/that1_kernel_asus_tf700t
arch/mips/pci/ops-titan-ht.c
7858
3429
/* * Copyright 2003 PMC-Sierra * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/titan_dep.h> static int titan_ht_config_read_dword(struct pci_bus *bus, unsigned int devfn, int offset, u32 *val) { volatile uint32_t address; int busno; busno = bus->number; address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; if (busno != 0) address |= 1; /* * RM9000 HT Errata: Issue back to back HT config * transcations. Issue a BIU sync before and * after the HT cycle */ *(volatile int32_t *) 0xfb0000f0 |= 0x2; udelay(30); *(volatile int32_t *) 0xfb0006f8 = address; *(val) = *(volatile int32_t *) 0xfb0006fc; udelay(30); * (volatile int32_t *) 0xfb0000f0 |= 0x2; return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_read(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 *val) { uint32_t dword; titan_ht_config_read_dword(bus, devfn, offset, &dword); dword >>= ((offset & 3) << 3); dword &= (0xffffffffU >> ((4 - size) << 8)); return PCIBIOS_SUCCESSFUL; } static inline int titan_ht_config_write_dword(struct pci_bus *bus, unsigned int devfn, int offset, u32 val) { volatile uint32_t address; int busno; busno = bus->number; address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; if (busno != 0) address |= 1; *(volatile int32_t *) 0xfb0000f0 |= 0x2; udelay(30); *(volatile int32_t *) 0xfb0006f8 = address; *(volatile int32_t *) 0xfb0006fc = val; udelay(30); *(volatile int32_t *) 0xfb0000f0 |= 0x2; return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 val) { uint32_t val1, val2, mask; titan_ht_config_read_dword(bus, devfn, offset, &val2); val1 = val << ((offset & 3) << 3); mask = ~(0xffffffffU >> ((4 - size) << 8)); val2 &= ~(mask << ((offset & 3) << 8)); titan_ht_config_write_dword(bus, devfn, offset, val1 | val2); return PCIBIOS_SUCCESSFUL; } struct pci_ops titan_ht_pci_ops = { .read = titan_ht_config_read, .write = titan_ht_config_write, };
gpl-2.0
zaclimon/android_kernel_samsung_kylepro
arch/h8300/kernel/gpio.c
9650
3641
/* * linux/arch/h8300/kernel/gpio.c * * Yoshinori Sato <ysato@users.sourceforge.jp> * */ /* * Internal I/O Port Management */ #include <linux/stddef.h> #include <linux/proc_fs.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/init.h> #define _(addr) (volatile unsigned char *)(addr) #if defined(CONFIG_H83007) || defined(CONFIG_H83068) #include <asm/regs306x.h> static volatile unsigned char *ddrs[] = { _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR), NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR), }; #define MAX_PORT 11 #endif #if defined(CONFIG_H83002) || defined(CONFIG_H8048) /* Fix me!! */ #include <asm/regs306x.h> static volatile unsigned char *ddrs[] = { _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR), NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR), }; #define MAX_PORT 11 #endif #if defined(CONFIG_H8S2678) #include <asm/regs267x.h> static volatile unsigned char *ddrs[] = { _(P1DDR),_(P2DDR),_(P3DDR),NULL ,_(P5DDR),_(P6DDR), _(P7DDR),_(P8DDR),NULL, _(PADDR),_(PBDDR),_(PCDDR), _(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR), _(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR), _(PGDDR),_(PHDDR) }; #define MAX_PORT 17 #endif #undef _ #if !defined(P1DDR) #error Unsuppoted CPU Selection #endif static struct { unsigned char used; unsigned char ddr; } gpio_regs[MAX_PORT]; extern char *_platform_gpio_table(int length); int h8300_reserved_gpio(int port, unsigned int bits) { unsigned char *used; if (port < 0 || port >= MAX_PORT) return -1; used = &(gpio_regs[port].used); if ((*used & bits) != 0) return 0; *used |= bits; return 1; } int h8300_free_gpio(int port, unsigned int bits) { unsigned char *used; if (port < 0 || port >= MAX_PORT) return -1; used = &(gpio_regs[port].used); if ((*used & bits) != bits) return 0; *used &= (~bits); return 1; } int h8300_set_gpio_dir(int port_bit,int dir) { int port = (port_bit >> 8) & 0xff; int bit = port_bit & 0xff; if (ddrs[port] == NULL) return 0; if (gpio_regs[port].used & bit) { if (dir) gpio_regs[port].ddr |= bit; else gpio_regs[port].ddr &= ~bit; *ddrs[port] = gpio_regs[port].ddr; return 1; } else return 0; } int h8300_get_gpio_dir(int port_bit) { int port = (port_bit >> 8) & 0xff; int bit = port_bit & 0xff; if (ddrs[port] == NULL) return 0; if (gpio_regs[port].used & bit) { return (gpio_regs[port].ddr & bit) != 0; } else return -1; } #if defined(CONFIG_PROC_FS) static char *port_status(int portno) { static char result[10]; static const char io[2]={'I','O'}; char *rp; int c; unsigned char used,ddr; used = gpio_regs[portno].used; ddr = gpio_regs[portno].ddr; result[8]='\0'; rp = result + 7; for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1) if (used & 0x01) *rp = io[ ddr & 0x01]; else *rp = '-'; return result; } static int gpio_proc_read(char *buf, char **start, off_t offset, int len, int *unused_i, void *unused_v) { int c,outlen; static const char port_name[]="123456789ABCDEFGH"; outlen = 0; for (c = 0; c < MAX_PORT; c++) { if (ddrs[c] == NULL) continue ; len = sprintf(buf,"P%c: %s\n",port_name[c],port_status(c)); buf += len; outlen += len; } return outlen; } static __init int register_proc(void) { struct proc_dir_entry *proc_gpio; proc_gpio = create_proc_entry("gpio", S_IRUGO, NULL); if (proc_gpio) proc_gpio->read_proc = gpio_proc_read; return proc_gpio != NULL; } __initcall(register_proc); #endif void __init h8300_gpio_init(void) { memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs)); }
gpl-2.0
mdalexca/marlin
net/x25/x25_out.c
12978
5467
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor New timer architecture. * 2000-09-04 Henner Eisen Prevented x25_output() skb leakage. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. * 2000-11-10 Henner Eisen x25_send_iframe(): re-queued frames * needed cleaned seq-number fields. */ #include <linux/slab.h> #include <linux/socket.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/x25.h> static int x25_pacsize_to_bytes(unsigned int pacsize) { int bytes = 1; if (!pacsize) return 128; while (pacsize-- > 0) bytes *= 2; return bytes; } /* * This is where all X.25 information frames pass. * * Returns the amount of user data bytes sent on success * or a negative error code on failure. */ int x25_output(struct sock *sk, struct sk_buff *skb) { struct sk_buff *skbn; unsigned char header[X25_EXT_MIN_LEN]; int err, frontlen, len; int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT; struct x25_sock *x25 = x25_sk(sk); int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out); if (skb->len - header_len > max_len) { /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, header_len); skb_pull(skb, header_len); frontlen = skb_headroom(skb); while (skb->len > 0) { release_sock(sk); skbn = sock_alloc_send_skb(sk, frontlen + max_len, noblock, &err); lock_sock(sk); if (!skbn) { if (err == -EWOULDBLOCK && noblock){ kfree_skb(skb); return sent; } SOCK_DEBUG(sk, "x25_output: fragment alloc" " failed, err=%d, %d bytes " "sent\n", err, sent); return err; } skb_reserve(skbn, frontlen); len = max_len > skb->len ? skb->len : max_len; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, len), len); skb_pull(skb, len); /* Duplicate the Header */ skb_push(skbn, header_len); skb_copy_to_linear_data(skbn, header, header_len); if (skb->len > 0) { if (x25->neighbour->extended) skbn->data[3] |= X25_EXT_M_BIT; else skbn->data[2] |= X25_STD_M_BIT; } skb_queue_tail(&sk->sk_write_queue, skbn); sent += len; } kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); sent = skb->len - header_len; } return sent; } /* * This procedure is passed a buffer descriptor for an iframe. It builds * the rest of the control part of the frame and then writes it out. */ static void x25_send_iframe(struct sock *sk, struct sk_buff *skb) { struct x25_sock *x25 = x25_sk(sk); if (!skb) return; if (x25->neighbour->extended) { skb->data[2] = (x25->vs << 1) & 0xFE; skb->data[3] &= X25_EXT_M_BIT; skb->data[3] |= (x25->vr << 1) & 0xFE; } else { skb->data[2] &= X25_STD_M_BIT; skb->data[2] |= (x25->vs << 1) & 0x0E; skb->data[2] |= (x25->vr << 5) & 0xE0; } x25_transmit_link(skb, x25->neighbour); } void x25_kick(struct sock *sk) { struct sk_buff *skb, *skbn; unsigned short start, end; int modulus; struct x25_sock *x25 = x25_sk(sk); if (x25->state != X25_STATE_3) return; /* * Transmit interrupt data. */ if (skb_peek(&x25->interrupt_out_queue) != NULL && !test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) { skb = skb_dequeue(&x25->interrupt_out_queue); x25_transmit_link(skb, x25->neighbour); } if (x25->condition & X25_COND_PEER_RX_BUSY) return; if (!skb_peek(&sk->sk_write_queue)) return; modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS; start = skb_peek(&x25->ack_queue) ? x25->vs : x25->va; end = (x25->va + x25->facilities.winsize_out) % modulus; if (start == end) return; x25->vs = start; /* * Transmit data until either we're out of data to send or * the window is full. */ skb = skb_dequeue(&sk->sk_write_queue); do { if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&sk->sk_write_queue, skb); break; } skb_set_owner_w(skbn, sk); /* * Transmit the frame copy. */ x25_send_iframe(sk, skbn); x25->vs = (x25->vs + 1) % modulus; /* * Requeue the original data frame. */ skb_queue_tail(&x25->ack_queue, skb); } while (x25->vs != end && (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); x25->vl = x25->vr; x25->condition &= ~X25_COND_ACK_PENDING; x25_stop_timer(sk); } /* * The following routines are taken from page 170 of the 7th ARRL Computer * Networking Conference paper, as is the whole state machine. */ void x25_enquiry_response(struct sock *sk) { struct x25_sock *x25 = x25_sk(sk); if (x25->condition & X25_COND_OWN_RX_BUSY) x25_write_internal(sk, X25_RNR); else x25_write_internal(sk, X25_RR); x25->vl = x25->vr; x25->condition &= ~X25_COND_ACK_PENDING; x25_stop_timer(sk); }
gpl-2.0
doixanh/semc-kernel-msm7x27
drivers/usb/core/generic.c
179
6922
/* * drivers/usb/generic.c - generic driver for USB devices (not interfaces) * * (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de> * * based on drivers/usb/usb.c which had the following copyrights: * (C) Copyright Linus Torvalds 1999 * (C) Copyright Johannes Erdfelt 1999-2001 * (C) Copyright Andreas Gal 1999 * (C) Copyright Gregory P. Smith 1999 * (C) Copyright Deti Fliegl 1999 (new USB architecture) * (C) Copyright Randy Dunlap 2000 * (C) Copyright David Brownell 2000-2004 * (C) Copyright Yggdrasil Computing, Inc. 2000 * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * */ #include <linux/usb.h> #include "usb.h" #include "hcd.h" static inline const char *plural(int n) { return (n == 1 ? "" : "s"); } static int is_rndis(struct usb_interface_descriptor *desc) { return desc->bInterfaceClass == USB_CLASS_COMM && desc->bInterfaceSubClass == 2 && desc->bInterfaceProtocol == 0xff; } static int is_activesync(struct usb_interface_descriptor *desc) { return desc->bInterfaceClass == USB_CLASS_MISC && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 1; } int usb_choose_configuration(struct usb_device *udev) { int i; int num_configs; int insufficient_power = 0; struct usb_host_config *c, *best; best = NULL; c = udev->config; num_configs = udev->descriptor.bNumConfigurations; for (i = 0; i < num_configs; (i++, c++)) { struct usb_interface_descriptor *desc = NULL; /* It's possible that a config has no interfaces! */ if (c->desc.bNumInterfaces > 0) desc = &c->intf_cache[0]->altsetting->desc; /* * HP's USB bus-powered keyboard has only one configuration * and it claims to be self-powered; other devices may have * similar errors in their descriptors. If the next test * were allowed to execute, such configurations would always * be rejected and the devices would not work as expected. * In the meantime, we run the risk of selecting a config * that requires external power at a time when that power * isn't available. It seems to be the lesser of two evils. * * Bugzilla #6448 reports a device that appears to crash * when it receives a GET_DEVICE_STATUS request! We don't * have any other way to tell whether a device is self-powered, * but since we don't use that information anywhere but here, * the call has been removed. * * Maybe the GET_DEVICE_STATUS call and the test below can * be reinstated when device firmwares become more reliable. * Don't hold your breath. */ #if 0 /* Rule out self-powered configs for a bus-powered device */ if (bus_powered && (c->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER)) continue; #endif /* * The next test may not be as effective as it should be. * Some hubs have errors in their descriptor, claiming * to be self-powered when they are really bus-powered. * We will overestimate the amount of current such hubs * make available for each port. * * This is a fairly benign sort of failure. It won't * cause us to reject configurations that we should have * accepted. */ /* Rule out configs that draw too much bus current */ if (c->desc.bMaxPower * 2 > udev->bus_mA) { insufficient_power++; continue; } /* When the first config's first interface is one of Microsoft's * pet nonstandard Ethernet-over-USB protocols, ignore it unless * this kernel has enabled the necessary host side driver. */ if (i == 0 && desc && (is_rndis(desc) || is_activesync(desc))) { #if !defined(CONFIG_USB_NET_RNDIS_HOST) && !defined(CONFIG_USB_NET_RNDIS_HOST_MODULE) continue; #else best = c; #endif } /* From the remaining configs, choose the first one whose * first interface is for a non-vendor-specific class. * Reason: Linux is more likely to have a class driver * than a vendor-specific driver. */ else if (udev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC && (!desc || desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC)) { best = c; break; } /* If all the remaining configs are vendor-specific, * choose the first one. */ else if (!best) best = c; } if (insufficient_power > 0) dev_info(&udev->dev, "rejected %d configuration%s " "due to insufficient available bus power\n", insufficient_power, plural(insufficient_power)); if (best) { i = best->desc.bConfigurationValue; dev_info(&udev->dev, "configuration #%d chosen from %d choice%s\n", i, num_configs, plural(num_configs)); } else { i = -1; dev_warn(&udev->dev, "no configuration chosen from %d choice%s\n", num_configs, plural(num_configs)); } return i; } static int generic_probe(struct usb_device *udev) { int err, c; /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. */ if (udev->authorized == 0) dev_err(&udev->dev, "Device is not authorized for usage\n"); else { c = usb_choose_configuration(udev); if (c >= 0) { err = usb_set_configuration(udev, c); if (err) { dev_err(&udev->dev, "can't set config #%d, error %d\n", c, err); /* This need not be fatal. The user can try to * set other configurations. */ } } } /* USB device state == configured ... usable */ usb_notify_add_device(udev); return 0; } static void generic_disconnect(struct usb_device *udev) { usb_notify_remove_device(udev); /* if this is only an unbind, not a physical disconnect, then * unconfigure the device */ if (udev->actconfig) usb_set_configuration(udev, -1); } #ifdef CONFIG_PM static int generic_suspend(struct usb_device *udev, pm_message_t msg) { int rc; /* Normal USB devices suspend through their upstream port. * Root hubs don't have upstream ports to suspend, * so we have to shut down their downstream HC-to-USB * interfaces manually by doing a bus (or "global") suspend. */ if (!udev->parent) rc = hcd_bus_suspend(udev, msg); /* Non-root devices don't need to do anything for FREEZE or PRETHAW */ else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) rc = 0; else rc = usb_port_suspend(udev, msg); return rc; } static int generic_resume(struct usb_device *udev, pm_message_t msg) { int rc; /* Normal USB devices resume/reset through their upstream port. * Root hubs don't have upstream ports to resume or reset, * so we have to start up their downstream HC-to-USB * interfaces manually by doing a bus (or "global") resume. */ if (!udev->parent) rc = hcd_bus_resume(udev, msg); else rc = usb_port_resume(udev, msg); return rc; } #endif /* CONFIG_PM */ struct usb_device_driver usb_generic_driver = { .name = "usb", .probe = generic_probe, .disconnect = generic_disconnect, #ifdef CONFIG_PM .suspend = generic_suspend, .resume = generic_resume, #endif .supports_autosuspend = 1, };
gpl-2.0
gauravdatir/linux
arch/arm/mm/init.c
435
18838
/* * linux/arch/arm/mm/init.c * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> #include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> #include <linux/of_fdt.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> #include <linux/dma-contiguous.h> #include <linux/sizes.h> #include <asm/cp15.h> #include <asm/mach-types.h> #include <asm/memblock.h> #include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/system_info.h> #include <asm/tlb.h> #include <asm/fixmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "mm.h" #ifdef CONFIG_CPU_CP15_MMU unsigned long __init __clear_cr(unsigned long mask) { cr_alignment = cr_alignment & ~mask; return cr_alignment; } #endif static phys_addr_t phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; static int __init early_initrd(char *p) { phys_addr_t start; unsigned long size; char *endp; start = memparse(p, &endp); if (*endp == ',') { size = memparse(endp + 1, NULL); phys_initrd_start = start; phys_initrd_size = size; } return 0; } early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { pr_warn("ATAG_INITRD is deprecated; " "please update your bootloader.\n"); phys_initrd_start = __virt_to_phys(tag->u.initrd.start); phys_initrd_size = tag->u.initrd.size; return 0; } __tagtable(ATAG_INITRD, parse_tag_initrd); static int __init parse_tag_initrd2(const struct tag *tag) { phys_initrd_start = tag->u.initrd.start; phys_initrd_size = tag->u.initrd.size; return 0; } __tagtable(ATAG_INITRD2, parse_tag_initrd2); static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) { *max_low = PFN_DOWN(memblock_get_current_limit()); *min = PFN_UP(memblock_start_of_DRAM()); *max_high = PFN_DOWN(memblock_end_of_DRAM()); } #ifdef CONFIG_ZONE_DMA phys_addr_t arm_dma_zone_size __read_mostly; EXPORT_SYMBOL(arm_dma_zone_size); /* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA * allocations. This must be the smallest DMA mask in the system, * so a successful GFP_DMA allocation will always satisfy this. */ phys_addr_t arm_dma_limit; unsigned long arm_dma_pfn_limit; static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) { if (size[0] <= dma_size) return; size[ZONE_NORMAL] = size[0] - dma_size; size[ZONE_DMA] = dma_size; hole[ZONE_NORMAL] = hole[0]; hole[ZONE_DMA] = 0; } #endif void __init setup_dma_zone(const struct machine_desc *mdesc) { #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { arm_dma_zone_size = mdesc->dma_zone_size; arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; #endif } static void __init zone_sizes_init(unsigned long min, unsigned long max_low, unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; struct memblock_region *reg; /* * initialise the zones. */ memset(zone_size, 0, sizeof(zone_size)); /* * The memory size has already been determined. If we need * to do anything fancy with the allocation of this memory * to the zones, now is the time to do it. */ zone_size[0] = max_low - min; #ifdef CONFIG_HIGHMEM zone_size[ZONE_HIGHMEM] = max_high - max_low; #endif /* * Calculate the size of the holes. * holes = node_size - sum(bank_sizes) */ memcpy(zhole_size, zone_size, sizeof(zhole_size)); for_each_memblock(memory, reg) { unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); if (start < max_low) { unsigned long low_end = min(end, max_low); zhole_size[0] -= low_end - start; } #ifdef CONFIG_HIGHMEM if (end > max_low) { unsigned long high_start = max(start, max_low); zhole_size[ZONE_HIGHMEM] -= end - high_start; } #endif } #ifdef CONFIG_ZONE_DMA /* * Adjust the sizes according to any special requirements for * this machine type. */ if (arm_dma_zone_size) arm_adjust_dma_zone(zone_size, zhole_size, arm_dma_zone_size >> PAGE_SHIFT); #endif free_area_init_node(0, zone_size, min, zhole_size); } #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { return memblock_is_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM static void __init arm_memory_present(void) { } #else static void __init arm_memory_present(void) { struct memblock_region *reg; for_each_memblock(memory, reg) memory_present(0, memblock_region_memory_base_pfn(reg), memblock_region_memory_end_pfn(reg)); } #endif static bool arm_memblock_steal_permitted = true; phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) { phys_addr_t phys; BUG_ON(!arm_memblock_steal_permitted); phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); memblock_free(phys, size); memblock_remove(phys, size); return phys; } void __init arm_memblock_init(const struct machine_desc *mdesc) { /* Register the kernel text, kernel data and initrd with memblock. */ #ifdef CONFIG_XIP_KERNEL memblock_reserve(__pa(_sdata), _end - _sdata); #else memblock_reserve(__pa(_stext), _end - _stext); #endif #ifdef CONFIG_BLK_DEV_INITRD /* FDT scan will populate initrd_start */ if (initrd_start && !phys_initrd_size) { phys_initrd_start = __virt_to_phys(initrd_start); phys_initrd_size = initrd_end - initrd_start; } initrd_start = initrd_end = 0; if (phys_initrd_size && !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", (u64)phys_initrd_start, phys_initrd_size); phys_initrd_start = phys_initrd_size = 0; } if (phys_initrd_size && memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", (u64)phys_initrd_start, phys_initrd_size); phys_initrd_start = phys_initrd_size = 0; } if (phys_initrd_size) { memblock_reserve(phys_initrd_start, phys_initrd_size); /* Now convert initrd to virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); initrd_end = initrd_start + phys_initrd_size; } #endif arm_mm_memblock_reserve(); /* reserve any platform specific memblock areas */ if (mdesc->reserve) mdesc->reserve(); early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); /* reserve memory for DMA contiguous allocations */ dma_contiguous_reserve(arm_dma_limit); arm_memblock_steal_permitted = false; memblock_dump_all(); } void __init bootmem_init(void) { unsigned long min, max_low, max_high; memblock_allow_resize(); max_low = max_high = 0; find_limits(&min, &max_low, &max_high); early_memtest((phys_addr_t)min << PAGE_SHIFT, (phys_addr_t)max_low << PAGE_SHIFT); /* * Sparsemem tries to allocate bootmem in memory_present(), * so must be done after the fixed reservations */ arm_memory_present(); /* * sparse_init() needs the bootmem allocator up and running. */ sparse_init(); /* * Now free the memory - free_area_init_node needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ zone_sizes_init(min, max_low, max_high); /* * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we * also get rid of some of the stuff above as well. */ min_low_pfn = min; max_low_pfn = max_low; max_pfn = max_high; } /* * Poison init memory with an undefined instruction (ARM) or a branch to an * undefined instruction (Thumb). */ static inline void poison_init_mem(void *s, size_t count) { u32 *p = (u32 *)s; for (; count != 0; count -= 4) *p++ = 0xe7fddef0; } static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and * round start upwards and end downwards. */ pg = PAGE_ALIGN(__pa(start_pg)); pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, * free the section of the memmap array. */ if (pg < pgend) memblock_free_early(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ static void __init free_unused_memmap(void) { unsigned long start, prev_end = 0; struct memblock_region *reg; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ for_each_memblock(memory, reg) { start = memblock_region_memory_base_pfn(reg); #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist * due to SPARSEMEM sections which aren't present. */ start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); #else /* * Align down here since the VM subsystem insists that the * memmap entries are valid from the bank start aligned to * MAX_ORDER_NR_PAGES. */ start = round_down(start, MAX_ORDER_NR_PAGES); #endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_end && prev_end < start) free_memmap(prev_end, start); /* * Align up here since the VM subsystem insists that the * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ prev_end = ALIGN(memblock_region_memory_end_pfn(reg), MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); #endif } #ifdef CONFIG_HIGHMEM static inline void free_area_high(unsigned long pfn, unsigned long end) { for (; pfn < end; pfn++) free_highmem_page(pfn_to_page(pfn)); } #endif static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM unsigned long max_low = max_low_pfn; struct memblock_region *mem, *res; /* set highmem page free */ for_each_memblock(memory, mem) { unsigned long start = memblock_region_memory_base_pfn(mem); unsigned long end = memblock_region_memory_end_pfn(mem); /* Ignore complete lowmem entries */ if (end <= max_low) continue; /* Truncate partial highmem entries */ if (start < max_low) start = max_low; /* Find and exclude any reserved regions */ for_each_memblock(reserved, res) { unsigned long res_start, res_end; res_start = memblock_region_reserved_base_pfn(res); res_end = memblock_region_reserved_end_pfn(res); if (res_end < start) continue; if (res_start < start) res_start = start; if (res_start > end) res_start = end; if (res_end > end) res_end = end; if (res_start != start) free_area_high(start, res_start); start = res_end; if (start == end) break; } /* And now free anything which remains */ if (start < end) free_area_high(start, end); } #endif } /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have * claimed their memory after the kernel image. */ void __init mem_init(void) { #ifdef CONFIG_HAVE_TCM /* These pointers are filled in on TCM detection */ extern u32 dtcm_end; extern u32 itcm_end; #endif set_max_mapnr(pfn_to_page(max_pfn) - mem_map); /* this will put all unused low memory onto the freelists */ free_unused_memmap(); free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif free_highpages(); mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) pr_notice("Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HAVE_TCM " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " .text : 0x%p" " - 0x%p" " (%4td kB)\n" " .init : 0x%p" " - 0x%p" " (%4td kB)\n" " .data : 0x%p" " - 0x%p" " (%4td kB)\n" " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), #ifdef CONFIG_HAVE_TCM MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_END), MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * (PAGE_SIZE)), #endif #ifdef CONFIG_MODULES MLM(MODULES_VADDR, MODULES_END), #endif MLK_ROUNDUP(_text, _etext), MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); #undef MLK #undef MLM #undef MLK_ROUNDUP /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #ifdef CONFIG_MMU BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); BUG_ON(TASK_SIZE > MODULES_VADDR); #endif #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); #endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get * anywhere without overcommit, so turn * it on by default. */ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; } } #ifdef CONFIG_ARM_KERNMEM_PERMS struct section_perm { unsigned long start; unsigned long end; pmdval_t mask; pmdval_t prot; pmdval_t clear; }; static struct section_perm nx_perms[] = { /* Make pages tables, etc before _stext RW (set NX). */ { .start = PAGE_OFFSET, .end = (unsigned long)_stext, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, }, /* Make init RW (set NX). */ { .start = (unsigned long)__init_begin, .end = (unsigned long)_sdata, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, }, #ifdef CONFIG_DEBUG_RODATA /* Make rodata NX (set RO in ro_perms below). */ { .start = (unsigned long)__start_rodata, .end = (unsigned long)__init_begin, .mask = ~PMD_SECT_XN, .prot = PMD_SECT_XN, }, #endif }; #ifdef CONFIG_DEBUG_RODATA static struct section_perm ro_perms[] = { /* Make kernel code and rodata RX (set RO). */ { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE .mask = ~L_PMD_SECT_RDONLY, .prot = L_PMD_SECT_RDONLY, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, .clear = PMD_SECT_AP_WRITE, #endif }, }; #endif /* * Updates section permissions only for the current mm (sections are * copied into each mm). During startup, this is the init_mm. Is only * safe to be called with preemption disabled, as under stop_machine(). */ static inline void section_update(unsigned long addr, pmdval_t mask, pmdval_t prot) { struct mm_struct *mm; pmd_t *pmd; mm = current->active_mm; pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); #ifdef CONFIG_ARM_LPAE pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); #else if (addr & SECTION_SIZE) pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); else pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); #endif flush_pmd_entry(pmd); local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); } /* Make sure extended page tables are in use. */ static inline bool arch_has_strict_perms(void) { if (cpu_architecture() < CPU_ARCH_ARMv6) return false; return !!(get_cr() & CR_XP); } #define set_section_perms(perms, field) { \ size_t i; \ unsigned long addr; \ \ if (!arch_has_strict_perms()) \ return; \ \ for (i = 0; i < ARRAY_SIZE(perms); i++) { \ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ perms[i].start, perms[i].end, \ SECTION_SIZE); \ continue; \ } \ \ for (addr = perms[i].start; \ addr < perms[i].end; \ addr += SECTION_SIZE) \ section_update(addr, perms[i].mask, \ perms[i].field); \ } \ } static inline void fix_kernmem_perms(void) { set_section_perms(nx_perms, prot); } #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void) { set_section_perms(ro_perms, prot); } void set_kernel_text_rw(void) { set_section_perms(ro_perms, clear); } void set_kernel_text_ro(void) { set_section_perms(ro_perms, prot); } #endif /* CONFIG_DEBUG_RODATA */ #else static inline void fix_kernmem_perms(void) { } #endif /* CONFIG_ARM_KERNMEM_PERMS */ void free_tcmmem(void) { #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif } void free_initmem(void) { fix_kernmem_perms(); free_tcmmem(); poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { if (start == initrd_start) start = round_down(start, PAGE_SIZE); if (end == initrd_end) end = round_up(end, PAGE_SIZE); poison_init_mem((void *)start, PAGE_ALIGN(end) - start); free_reserved_area((void *)start, (void *)end, -1, "initrd"); } } static int __init keepinitrd_setup(char *__unused) { keep_initrd = 1; return 1; } __setup("keepinitrd", keepinitrd_setup); #endif
gpl-2.0
OpenClovis/linux_tipc
fs/exofs/dir.c
691
16992
/* * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh <bharrosh@panasas.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * This file is part of exofs. * * exofs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. Since it is based on ext2, and the only * valid version of GPL for the Linux kernel is version 2, the only valid * version of GPL for exofs is version 2. * * exofs is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with exofs; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "exofs.h" static inline unsigned exofs_chunk_size(struct inode *inode) { return inode->i_sb->s_blocksize; } static inline void exofs_put_page(struct page *page) { kunmap(page); page_cache_release(page); } /* Accesses dir's inode->i_size must be called under inode lock */ static inline unsigned long dir_pages(struct inode *inode) { return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) { loff_t last_byte = inode->i_size; last_byte -= page_nr << PAGE_CACHE_SHIFT; if (last_byte > PAGE_CACHE_SIZE) last_byte = PAGE_CACHE_SIZE; return last_byte; } static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len) { struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; int err = 0; dir->i_version++; if (!PageUptodate(page)) SetPageUptodate(page); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } set_page_dirty(page); if (IS_DIRSYNC(dir)) err = write_one_page(page, 1); else unlock_page(page); return err; } static void exofs_check_page(struct page *page) { struct inode *dir = page->mapping->host; unsigned chunk_size = exofs_chunk_size(dir); char *kaddr = page_address(page); unsigned offs, rec_len; unsigned limit = PAGE_CACHE_SIZE; struct exofs_dir_entry *p; char *error; /* if the page is the last one in the directory */ if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { limit = dir->i_size & ~PAGE_CACHE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) goto out; } for (offs = 0; offs <= limit - EXOFS_DIR_REC_LEN(1); offs += rec_len) { p = (struct exofs_dir_entry *)(kaddr + offs); rec_len = le16_to_cpu(p->rec_len); if (rec_len < EXOFS_DIR_REC_LEN(1)) goto Eshort; if (rec_len & 3) goto Ealign; if (rec_len < EXOFS_DIR_REC_LEN(p->name_len)) goto Enamelen; if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) goto Espan; } if (offs != limit) goto Eend; out: SetPageChecked(page); return; Ebadsize: EXOFS_ERR("ERROR [exofs_check_page]: " "size of directory(0x%lx) is not a multiple of chunk size\n", dir->i_ino ); goto fail; Eshort: error = "rec_len is smaller than minimal"; goto bad_entry; Ealign: error = "unaligned directory entry"; goto bad_entry; Enamelen: error = "rec_len is too small for name_len"; goto bad_entry; Espan: error = "directory entry across blocks"; goto bad_entry; bad_entry: EXOFS_ERR( "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no)), rec_len, p->name_len); goto fail; Eend: p = (struct exofs_dir_entry *)(kaddr + offs); EXOFS_ERR("ERROR [exofs_check_page]: " "entry in directory(0x%lx) spans the page boundary" "offset=%lu, inode=0x%llx\n", dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no))); fail: SetPageChecked(page); SetPageError(page); } static struct page *exofs_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { kmap(page); if (!PageChecked(page)) exofs_check_page(page); if (PageError(page)) goto fail; } return page; fail: exofs_put_page(page); return ERR_PTR(-EIO); } static inline int exofs_match(int len, const unsigned char *name, struct exofs_dir_entry *de) { if (len != de->name_len) return 0; if (!de->inode_no) return 0; return !memcmp(name, de->name, len); } static inline struct exofs_dir_entry *exofs_next_entry(struct exofs_dir_entry *p) { return (struct exofs_dir_entry *)((char *)p + le16_to_cpu(p->rec_len)); } static inline unsigned exofs_validate_entry(char *base, unsigned offset, unsigned mask) { struct exofs_dir_entry *de = (struct exofs_dir_entry *)(base + offset); struct exofs_dir_entry *p = (struct exofs_dir_entry *)(base + (offset&mask)); while ((char *)p < (char *)de) { if (p->rec_len == 0) break; p = exofs_next_entry(p); } return (char *)p - base; } static unsigned char exofs_filetype_table[EXOFS_FT_MAX] = { [EXOFS_FT_UNKNOWN] = DT_UNKNOWN, [EXOFS_FT_REG_FILE] = DT_REG, [EXOFS_FT_DIR] = DT_DIR, [EXOFS_FT_CHRDEV] = DT_CHR, [EXOFS_FT_BLKDEV] = DT_BLK, [EXOFS_FT_FIFO] = DT_FIFO, [EXOFS_FT_SOCK] = DT_SOCK, [EXOFS_FT_SYMLINK] = DT_LNK, }; #define S_SHIFT 12 static unsigned char exofs_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EXOFS_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EXOFS_FT_DIR, [S_IFCHR >> S_SHIFT] = EXOFS_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EXOFS_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EXOFS_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EXOFS_FT_SOCK, [S_IFLNK >> S_SHIFT] = EXOFS_FT_SYMLINK, }; static inline void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode) { umode_t mode = inode->i_mode; de->file_type = exofs_type_by_mode[(mode & S_IFMT) >> S_SHIFT]; } static int exofs_readdir(struct file *file, struct dir_context *ctx) { loff_t pos = ctx->pos; struct inode *inode = file_inode(file); unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); int need_revalidate = (file->f_version != inode->i_version); if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1)) return 0; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; struct exofs_dir_entry *de; struct page *page = exofs_get_page(inode, n); if (IS_ERR(page)) { EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", inode->i_ino); ctx->pos += PAGE_CACHE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = exofs_validate_entry(kaddr, offset, chunk_mask); ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; } de = (struct exofs_dir_entry *)(kaddr + offset); limit = kaddr + exofs_last_byte(inode, n) - EXOFS_DIR_REC_LEN(1); for (; (char *)de <= limit; de = exofs_next_entry(de)) { if (de->rec_len == 0) { EXOFS_ERR("ERROR: " "zero-length entry in directory(0x%lx)\n", inode->i_ino); exofs_put_page(page); return -EIO; } if (de->inode_no) { unsigned char t; if (de->file_type < EXOFS_FT_MAX) t = exofs_filetype_table[de->file_type]; else t = DT_UNKNOWN; if (!dir_emit(ctx, de->name, de->name_len, le64_to_cpu(de->inode_no), t)) { exofs_put_page(page); return 0; } } ctx->pos += le16_to_cpu(de->rec_len); } exofs_put_page(page); } return 0; } struct exofs_dir_entry *exofs_find_entry(struct inode *dir, struct dentry *dentry, struct page **res_page) { const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned reclen = EXOFS_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = dir_pages(dir); struct page *page = NULL; struct exofs_i_info *oi = exofs_i(dir); struct exofs_dir_entry *de; if (npages == 0) goto out; *res_page = NULL; start = oi->i_dir_start_lookup; if (start >= npages) start = 0; n = start; do { char *kaddr; page = exofs_get_page(dir, n); if (!IS_ERR(page)) { kaddr = page_address(page); de = (struct exofs_dir_entry *) kaddr; kaddr += exofs_last_byte(dir, n) - reclen; while ((char *) de <= kaddr) { if (de->rec_len == 0) { EXOFS_ERR("ERROR: zero-length entry in " "directory(0x%lx)\n", dir->i_ino); exofs_put_page(page); goto out; } if (exofs_match(namelen, name, de)) goto found; de = exofs_next_entry(de); } exofs_put_page(page); } if (++n >= npages) n = 0; } while (n != start); out: return NULL; found: *res_page = page; oi->i_dir_start_lookup = n; return de; } struct exofs_dir_entry *exofs_dotdot(struct inode *dir, struct page **p) { struct page *page = exofs_get_page(dir, 0); struct exofs_dir_entry *de = NULL; if (!IS_ERR(page)) { de = exofs_next_entry( (struct exofs_dir_entry *)page_address(page)); *p = page; } return de; } ino_t exofs_parent_ino(struct dentry *child) { struct page *page; struct exofs_dir_entry *de; ino_t ino; de = exofs_dotdot(child->d_inode, &page); if (!de) return 0; ino = le64_to_cpu(de->inode_no); exofs_put_page(page); return ino; } ino_t exofs_inode_by_name(struct inode *dir, struct dentry *dentry) { ino_t res = 0; struct exofs_dir_entry *de; struct page *page; de = exofs_find_entry(dir, dentry, &page); if (de) { res = le64_to_cpu(de->inode_no); exofs_put_page(page); } return res; } int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de, struct page *page, struct inode *inode) { loff_t pos = page_offset(page) + (char *) de - (char *) page_address(page); unsigned len = le16_to_cpu(de->rec_len); int err; lock_page(page); err = exofs_write_begin(NULL, page->mapping, pos, len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); if (err) EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n", err); de->inode_no = cpu_to_le64(inode->i_ino); exofs_set_de_type(de, inode); if (likely(!err)) err = exofs_commit_chunk(page, pos, len); exofs_put_page(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME; mark_inode_dirty(dir); return err; } int exofs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned chunk_size = exofs_chunk_size(dir); unsigned reclen = EXOFS_DIR_REC_LEN(namelen); unsigned short rec_len, name_len; struct page *page = NULL; struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; struct exofs_dir_entry *de; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr; loff_t pos; int err; for (n = 0; n <= npages; n++) { char *dir_end; page = exofs_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = page_address(page); dir_end = kaddr + exofs_last_byte(dir, n); de = (struct exofs_dir_entry *)kaddr; kaddr += PAGE_CACHE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { name_len = 0; rec_len = chunk_size; de->rec_len = cpu_to_le16(chunk_size); de->inode_no = 0; goto got_it; } if (de->rec_len == 0) { EXOFS_ERR("ERROR: exofs_add_link: " "zero-length entry in directory(0x%lx)\n", inode->i_ino); err = -EIO; goto out_unlock; } err = -EEXIST; if (exofs_match(namelen, name, de)) goto out_unlock; name_len = EXOFS_DIR_REC_LEN(de->name_len); rec_len = le16_to_cpu(de->rec_len); if (!de->inode_no && rec_len >= reclen) goto got_it; if (rec_len >= name_len + reclen) goto got_it; de = (struct exofs_dir_entry *) ((char *) de + rec_len); } unlock_page(page); exofs_put_page(page); } EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=0x%lx\n", dentry, inode->i_ino); return -EINVAL; got_it: pos = page_offset(page) + (char *)de - (char *)page_address(page); err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0, &page, NULL); if (err) goto out_unlock; if (de->inode_no) { struct exofs_dir_entry *de1 = (struct exofs_dir_entry *)((char *)de + name_len); de1->rec_len = cpu_to_le16(rec_len - name_len); de->rec_len = cpu_to_le16(name_len); de = de1; } de->name_len = namelen; memcpy(de->name, name, namelen); de->inode_no = cpu_to_le64(inode->i_ino); exofs_set_de_type(de, inode); err = exofs_commit_chunk(page, pos, rec_len); dir->i_mtime = dir->i_ctime = CURRENT_TIME; mark_inode_dirty(dir); sbi->s_numfiles++; out_put: exofs_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; char *kaddr = page_address(page); unsigned from = ((char *)dir - kaddr) & ~(exofs_chunk_size(inode)-1); unsigned to = ((char *)dir - kaddr) + le16_to_cpu(dir->rec_len); loff_t pos; struct exofs_dir_entry *pde = NULL; struct exofs_dir_entry *de = (struct exofs_dir_entry *) (kaddr + from); int err; while (de < dir) { if (de->rec_len == 0) { EXOFS_ERR("ERROR: exofs_delete_entry:" "zero-length entry in directory(0x%lx)\n", inode->i_ino); err = -EIO; goto out; } pde = de; de = exofs_next_entry(de); } if (pde) from = (char *)pde - (char *)page_address(page); pos = page_offset(page) + from; lock_page(page); err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0, &page, NULL); if (err) EXOFS_ERR("exofs_delete_entry: exofs_write_begin FAILED => %d\n", err); if (pde) pde->rec_len = cpu_to_le16(to - from); dir->inode_no = 0; if (likely(!err)) err = exofs_commit_chunk(page, pos, to - from); inode->i_ctime = inode->i_mtime = CURRENT_TIME; mark_inode_dirty(inode); sbi->s_numfiles--; out: exofs_put_page(page); return err; } /* kept aligned on 4 bytes */ #define THIS_DIR ".\0\0" #define PARENT_DIR "..\0" int exofs_make_empty(struct inode *inode, struct inode *parent) { struct address_space *mapping = inode->i_mapping; struct page *page = grab_cache_page(mapping, 0); unsigned chunk_size = exofs_chunk_size(inode); struct exofs_dir_entry *de; int err; void *kaddr; if (!page) return -ENOMEM; err = exofs_write_begin(NULL, page->mapping, 0, chunk_size, 0, &page, NULL); if (err) { unlock_page(page); goto fail; } kaddr = kmap_atomic(page); de = (struct exofs_dir_entry *)kaddr; de->name_len = 1; de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1)); memcpy(de->name, THIS_DIR, sizeof(THIS_DIR)); de->inode_no = cpu_to_le64(inode->i_ino); exofs_set_de_type(de, inode); de = (struct exofs_dir_entry *)(kaddr + EXOFS_DIR_REC_LEN(1)); de->name_len = 2; de->rec_len = cpu_to_le16(chunk_size - EXOFS_DIR_REC_LEN(1)); de->inode_no = cpu_to_le64(parent->i_ino); memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); exofs_set_de_type(de, inode); kunmap_atomic(kaddr); err = exofs_commit_chunk(page, 0, chunk_size); fail: page_cache_release(page); return err; } int exofs_empty_dir(struct inode *inode) { struct page *page = NULL; unsigned long i, npages = dir_pages(inode); for (i = 0; i < npages; i++) { char *kaddr; struct exofs_dir_entry *de; page = exofs_get_page(inode, i); if (IS_ERR(page)) continue; kaddr = page_address(page); de = (struct exofs_dir_entry *)kaddr; kaddr += exofs_last_byte(inode, i) - EXOFS_DIR_REC_LEN(1); while ((char *)de <= kaddr) { if (de->rec_len == 0) { EXOFS_ERR("ERROR: exofs_empty_dir: " "zero-length directory entry" "kaddr=%p, de=%p\n", kaddr, de); goto not_empty; } if (de->inode_no != 0) { /* check for . and .. */ if (de->name[0] != '.') goto not_empty; if (de->name_len > 2) goto not_empty; if (de->name_len < 2) { if (le64_to_cpu(de->inode_no) != inode->i_ino) goto not_empty; } else if (de->name[1] != '.') goto not_empty; } de = exofs_next_entry(de); } exofs_put_page(page); } return 1; not_empty: exofs_put_page(page); return 0; } const struct file_operations exofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate = exofs_readdir, };
gpl-2.0
Shkerzy/alcatelOT990-kernel-msm7x27
drivers/net/usb/smsc75xx.c
1203
35421
/*************************************************************************** * * Copyright (C) 2007-2010 SMSC * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * *****************************************************************************/ #include <linux/module.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include <linux/slab.h> #include "smsc75xx.h" #define SMSC_CHIPNAME "smsc75xx" #define SMSC_DRIVER_VERSION "1.0.0" #define HS_USB_PKT_SIZE (512) #define FS_USB_PKT_SIZE (64) #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) #define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE) #define DEFAULT_BULK_IN_DELAY (0x00002000) #define MAX_SINGLE_PACKET_SIZE (9000) #define LAN75XX_EEPROM_MAGIC (0x7500) #define EEPROM_MAC_OFFSET (0x01) #define DEFAULT_TX_CSUM_ENABLE (true) #define DEFAULT_RX_CSUM_ENABLE (true) #define DEFAULT_TSO_ENABLE (true) #define SMSC75XX_INTERNAL_PHY_ID (1) #define SMSC75XX_TX_OVERHEAD (8) #define MAX_RX_FIFO_SIZE (20 * 1024) #define MAX_TX_FIFO_SIZE (12 * 1024) #define USB_VENDOR_ID_SMSC (0x0424) #define USB_PRODUCT_ID_LAN7500 (0x7500) #define USB_PRODUCT_ID_LAN7505 (0x7505) #define check_warn(ret, fmt, args...) \ ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) #define check_warn_return(ret, fmt, args...) \ ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } }) #define check_warn_goto_done(ret, fmt, args...) \ ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } }) struct smsc75xx_priv { struct usbnet *dev; u32 rfe_ctl; u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; bool use_rx_csum; struct mutex dataport_mutex; spinlock_t rfe_ctl_lock; struct work_struct set_multicast; }; struct usb_context { struct usb_ctrlrequest req; struct usbnet *dev; }; static int turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data) { u32 *buf = kmalloc(4, GFP_KERNEL); int ret; BUG_ON(!dev); if (!buf) return -ENOMEM; ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 00, index, buf, 4, USB_CTRL_GET_TIMEOUT); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to read register index 0x%08x", index); le32_to_cpus(buf); *data = *buf; kfree(buf); return ret; } static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data) { u32 *buf = kmalloc(4, GFP_KERNEL); int ret; BUG_ON(!dev); if (!buf) return -ENOMEM; *buf = data; cpu_to_le32s(buf); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 00, index, buf, 4, USB_CTRL_SET_TIMEOUT); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to write register index 0x%08x", index); kfree(buf); return ret; } /* Loop until the read is completed with timeout * called with phy_mutex held */ static int smsc75xx_phy_wait_not_busy(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, MII_ACCESS, &val); check_warn_return(ret, "Error reading MII_ACCESS"); if (!(val & MII_ACCESS_BUSY)) return 0; } while (!time_after(jiffies, start_time + HZ)); return -EIO; } static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = smsc75xx_phy_wait_not_busy(dev); check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read"); /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_READ; ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); check_warn_goto_done(ret, "Error writing MII_ACCESS"); ret = smsc75xx_phy_wait_not_busy(dev); check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx); ret = smsc75xx_read_reg(dev, MII_DATA, &val); check_warn_goto_done(ret, "Error reading MII_DATA"); ret = (u16)(val & 0xFFFF); done: mutex_unlock(&dev->phy_mutex); return ret; } static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = smsc75xx_phy_wait_not_busy(dev); check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write"); val = regval; ret = smsc75xx_write_reg(dev, MII_DATA, val); check_warn_goto_done(ret, "Error writing MII_DATA"); /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_WRITE; ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); check_warn_goto_done(ret, "Error writing MII_ACCESS"); ret = smsc75xx_phy_wait_not_busy(dev); check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx); done: mutex_unlock(&dev->phy_mutex); } static int smsc75xx_wait_eeprom(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); check_warn_return(ret, "Error reading E2P_CMD"); if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) break; udelay(40); } while (!time_after(jiffies, start_time + HZ)); if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { netdev_warn(dev->net, "EEPROM read operation timeout"); return -EIO; } return 0; } static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); check_warn_return(ret, "Error reading E2P_CMD"); if (!(val & E2P_CMD_BUSY)) return 0; udelay(40); } while (!time_after(jiffies, start_time + HZ)); netdev_warn(dev->net, "EEPROM is busy"); return -EIO; } static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; for (i = 0; i < length; i++) { val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); check_warn_return(ret, "Error writing E2P_CMD"); ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; ret = smsc75xx_read_reg(dev, E2P_DATA, &val); check_warn_return(ret, "Error reading E2P_DATA"); data[i] = val & 0xFF; offset++; } return 0; } static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; /* Issue write/erase enable command */ val = E2P_CMD_BUSY | E2P_CMD_EWEN; ret = smsc75xx_write_reg(dev, E2P_CMD, val); check_warn_return(ret, "Error writing E2P_CMD"); ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = smsc75xx_write_reg(dev, E2P_DATA, val); check_warn_return(ret, "Error writing E2P_DATA"); /* Send "write" command */ val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); check_warn_return(ret, "Error writing E2P_CMD"); ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; offset++; } return 0; } static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) { int i, ret; for (i = 0; i < 100; i++) { u32 dp_sel; ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); check_warn_return(ret, "Error reading DP_SEL"); if (dp_sel & DP_SEL_DPRDY) return 0; udelay(40); } netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out"); return -EIO; } static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, u32 length, u32 *buf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 dp_sel; int i, ret; mutex_lock(&pdata->dataport_mutex); ret = smsc75xx_dataport_wait_not_busy(dev); check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry"); ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); check_warn_goto_done(ret, "Error reading DP_SEL"); dp_sel &= ~DP_SEL_RSEL; dp_sel |= ram_select; ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); check_warn_goto_done(ret, "Error writing DP_SEL"); for (i = 0; i < length; i++) { ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); check_warn_goto_done(ret, "Error writing DP_ADDR"); ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); check_warn_goto_done(ret, "Error writing DP_DATA"); ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); check_warn_goto_done(ret, "Error writing DP_CMD"); ret = smsc75xx_dataport_wait_not_busy(dev); check_warn_goto_done(ret, "smsc75xx_dataport_write timeout"); } done: mutex_unlock(&pdata->dataport_mutex); return ret; } /* returns hash bit number for given MAC address */ static u32 smsc75xx_hash(char addr[ETH_ALEN]) { return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; } static void smsc75xx_deferred_multicast_write(struct work_struct *param) { struct smsc75xx_priv *pdata = container_of(param, struct smsc75xx_priv, set_multicast); struct usbnet *dev = pdata->dev; int ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x", pdata->rfe_ctl); smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); check_warn(ret, "Error writing RFE_CRL"); } static void smsc75xx_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int i; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); pdata->rfe_ctl &= ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF); pdata->rfe_ctl |= RFE_CTL_AB; for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) pdata->multicast_hash_table[i] = 0; if (dev->net->flags & IFF_PROMISC) { netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; } else if (dev->net->flags & IFF_ALLMULTI) { netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; } else if (!netdev_mc_empty(dev->net)) { struct netdev_hw_addr *ha; netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; netdev_for_each_mc_addr(ha, netdev) { u32 bitnum = smsc75xx_hash(ha->addr); pdata->multicast_hash_table[bitnum / 32] |= (1 << (bitnum % 32)); } } else { netif_dbg(dev, drv, dev->net, "receive own packets only"); pdata->rfe_ctl |= RFE_CTL_DPF; } spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_multicast); } static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; int ret; if (duplex == DUPLEX_FULL) { u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_TX) { flow = (FLOW_TX_FCEN | 0xFFFF); /* set fct_flow thresholds to 20% and 80% */ fct_flow = (8 << 8) | 32; } if (cap & FLOW_CTRL_RX) flow |= FLOW_RX_FCEN; netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); } else { netif_dbg(dev, link, dev->net, "half duplex"); } ret = smsc75xx_write_reg(dev, FLOW, flow); check_warn_return(ret, "Error writing FLOW"); ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); check_warn_return(ret, "Error writing FCT_FLOW"); return 0; } static int smsc75xx_link_reset(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; struct ethtool_cmd ecmd; u16 lcladv, rmtadv; int ret; /* clear interrupt status */ ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); check_warn_return(ret, "Error reading PHY_INT_SRC"); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); check_warn_return(ret, "Error writing INT_STS"); mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x" " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv); return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); } static void smsc75xx_status(struct usbnet *dev, struct urb *urb) { u32 intdata; if (urb->actual_length != 4) { netdev_warn(dev->net, "unexpected urb length %d", urb->actual_length); return; } memcpy(&intdata, urb->transfer_buffer, 4); le32_to_cpus(&intdata); netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata); if (intdata & INT_ENP_PHY_INT) usbnet_defer_kevent(dev, EVENT_LINK_RESET); else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X", intdata); } /* Enable or disable Rx checksum offload engine */ static int smsc75xx_set_rx_csum_offload(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int ret; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); if (pdata->use_rx_csum) pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM; else pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM); spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); check_warn_return(ret, "Error writing RFE_CTL"); return 0; } static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) { return MAX_EEPROM_SIZE; } static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); ee->magic = LAN75XX_EEPROM_MAGIC; return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data); } static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); if (ee->magic != LAN75XX_EEPROM_MAGIC) { netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x", ee->magic); return -EINVAL; } return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); } static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); return pdata->use_rx_csum; } static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); pdata->use_rx_csum = !!val; return smsc75xx_set_rx_csum_offload(dev); } static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data) { if (data) netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; else netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); return 0; } static const struct ethtool_ops smsc75xx_ethtool_ops = { .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_settings = usbnet_get_settings, .set_settings = usbnet_set_settings, .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, .get_eeprom = smsc75xx_ethtool_get_eeprom, .set_eeprom = smsc75xx_ethtool_set_eeprom, .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, .get_rx_csum = smsc75xx_ethtool_get_rx_csum, .set_rx_csum = smsc75xx_ethtool_set_rx_csum, .get_tso = ethtool_op_get_tso, .set_tso = smsc75xx_ethtool_set_tso, }; static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(netdev); if (!netif_running(netdev)) return -EINVAL; return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static void smsc75xx_init_mac_address(struct usbnet *dev) { /* try reading mac address from EEPROM */ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, dev->net->dev_addr) == 0) { if (is_valid_ether_addr(dev->net->dev_addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM"); return; } } /* no eeprom, or eeprom values are invalid. generate random MAC */ random_ether_addr(dev->net->dev_addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr"); } static int smsc75xx_set_mac_address(struct usbnet *dev) { u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 | dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24; u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret); ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret); addr_hi |= ADDR_FILTX_FB_VALID; ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret); ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret); return 0; } static int smsc75xx_phy_initialize(struct usbnet *dev) { int bmcr, timeout = 0; /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = smsc75xx_mdio_read; dev->mii.mdio_write = smsc75xx_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; /* reset phy and wait for reset to complete */ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); do { msleep(10); bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); check_warn_return(bmcr, "Error reading MII_BMCR"); timeout++; } while ((bmcr & MII_BMCR) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on PHY Reset"); return -EIO; } smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); /* read to clear */ smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); check_warn_return(bmcr, "Error reading PHY_INT_SRC"); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, PHY_INT_MASK_DEFAULT); mii_nway_restart(&dev->mii); netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); return 0; } static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) { int ret = 0; u32 buf; bool rxenabled; ret = smsc75xx_read_reg(dev, MAC_RX, &buf); check_warn_return(ret, "Failed to read MAC_RX: %d", ret); rxenabled = ((buf & MAC_RX_RXEN) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); check_warn_return(ret, "Failed to write MAC_RX: %d", ret); } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); ret = smsc75xx_write_reg(dev, MAC_RX, buf); check_warn_return(ret, "Failed to write MAC_RX: %d", ret); if (rxenabled) { buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); check_warn_return(ret, "Failed to write MAC_RX: %d", ret); } return 0; } static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); check_warn_return(ret, "Failed to set mac rx frame length"); return usbnet_change_mtu(netdev, new_mtu); } static int smsc75xx_reset(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 buf; int ret = 0, timeout; netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset"); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); check_warn_return(ret, "Failed to read HW_CFG: %d", ret); buf |= HW_CFG_LRST; ret = smsc75xx_write_reg(dev, HW_CFG, buf); check_warn_return(ret, "Failed to write HW_CFG: %d", ret); timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); check_warn_return(ret, "Failed to read HW_CFG: %d", ret); timeout++; } while ((buf & HW_CFG_LRST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on completion of Lite Reset"); return -EIO; } netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY"); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); check_warn_return(ret, "Failed to write PMT_CTL: %d", ret); timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for PHY Reset"); return -EIO; } netif_dbg(dev, ifup, dev->net, "PHY reset complete"); smsc75xx_init_mac_address(dev); ret = smsc75xx_set_mac_address(dev); check_warn_return(ret, "Failed to set mac address"); netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); check_warn_return(ret, "Failed to read HW_CFG: %d", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf); buf |= HW_CFG_BIR; ret = smsc75xx_write_reg(dev, HW_CFG, buf); check_warn_return(ret, "Failed to write HW_CFG: %d", ret); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); check_warn_return(ret, "Failed to read HW_CFG: %d", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after " "writing HW_CFG_BIR: 0x%08x", buf); if (!turbo_mode) { buf = 0; dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE; } else if (dev->udev->speed == USB_SPEED_HIGH) { buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE; } else { buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; } netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld", (ulong)dev->rx_urb_size); ret = smsc75xx_write_reg(dev, BURST_CAP, buf); check_warn_return(ret, "Failed to write BURST_CAP: %d", ret); ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); check_warn_return(ret, "Failed to read BURST_CAP: %d", ret); netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x", buf); ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret); ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret); netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x", buf); if (turbo_mode) { ret = smsc75xx_read_reg(dev, HW_CFG, &buf); check_warn_return(ret, "Failed to read HW_CFG: %d", ret); netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); buf |= (HW_CFG_MEF | HW_CFG_BCE); ret = smsc75xx_write_reg(dev, HW_CFG, buf); check_warn_return(ret, "Failed to write HW_CFG: %d", ret); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); check_warn_return(ret, "Failed to read HW_CFG: %d", ret); netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); } /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret); netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf); buf = (MAX_TX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret); netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); check_warn_return(ret, "Failed to write INT_STS: %d", ret); ret = smsc75xx_read_reg(dev, ID_REV, &buf); check_warn_return(ret, "Failed to read ID_REV: %d", ret); netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf); /* Configure GPIO pins as LED outputs */ ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); ret = smsc75xx_write_reg(dev, FLOW, 0); check_warn_return(ret, "Failed to write FLOW: %d", ret); ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret); /* Don't need rfe_ctl_lock during initialisation */ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); check_warn_return(ret, "Failed to write RFE_CTL: %d", ret); ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl); /* Enable or disable checksum offload engines */ ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE); ret = smsc75xx_set_rx_csum_offload(dev); check_warn_return(ret, "Failed to set rx csum offload: %d", ret); smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE); smsc75xx_set_multicast(dev->net); ret = smsc75xx_phy_initialize(dev); check_warn_return(ret, "Failed to initialize PHY: %d", ret); ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret); /* enable PHY interrupts */ buf |= INT_ENP_PHY_INT; ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); ret = smsc75xx_read_reg(dev, MAC_TX, &buf); check_warn_return(ret, "Failed to read MAC_TX: %d", ret); buf |= MAC_TX_TXEN; ret = smsc75xx_write_reg(dev, MAC_TX, buf); check_warn_return(ret, "Failed to write MAC_TX: %d", ret); netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf); ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret); buf |= FCT_TX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret); netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf); ret = smsc75xx_set_rx_max_frame_length(dev, 1514); check_warn_return(ret, "Failed to set max rx frame length"); ret = smsc75xx_read_reg(dev, MAC_RX, &buf); check_warn_return(ret, "Failed to read MAC_RX: %d", ret); buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); check_warn_return(ret, "Failed to write MAC_RX: %d", ret); netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf); ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret); buf |= FCT_RX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret); netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf); netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0"); return 0; } static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = smsc75xx_ioctl, .ndo_set_multicast_list = smsc75xx_set_multicast, }; static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = NULL; int ret; printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); ret = usbnet_get_endpoints(dev, intf); check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret); dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), GFP_KERNEL); pdata = (struct smsc75xx_priv *)(dev->data[0]); if (!pdata) { netdev_warn(dev->net, "Unable to allocate smsc75xx_priv"); return -ENOMEM; } pdata->dev = dev; spin_lock_init(&pdata->rfe_ctl_lock); mutex_init(&pdata->dataport_mutex); INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; /* We have to advertise SG otherwise TSO cannot be enabled */ dev->net->features |= NETIF_F_SG; /* Init all registers */ ret = smsc75xx_reset(dev); dev->net->netdev_ops = &smsc75xx_netdev_ops; dev->net->ethtool_ops = &smsc75xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; return 0; } static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); if (pdata) { netif_dbg(dev, ifdown, dev->net, "free pdata"); kfree(pdata); pdata = NULL; dev->data[0] = 0; } } static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); skb->ip_summed = CHECKSUM_COMPLETE; } } static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; struct sk_buff *ax_skb; unsigned char *packet; memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); le32_to_cpus(&rx_cmd_a); skb_pull(skb, 4); memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); le32_to_cpus(&rx_cmd_b); skb_pull(skb, 4 + NET_IP_ALIGN); packet = skb->data; /* get the packet length */ size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x", rx_cmd_a); dev->net->stats.rx_errors++; dev->net->stats.rx_dropped++; if (rx_cmd_a & RX_CMD_A_FCS) dev->net->stats.rx_crc_errors++; else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ if (unlikely(size > (ETH_FRAME_LEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x", rx_cmd_a); return 0; } /* last frame in this batch */ if (skb->len == size) { if (pdata->use_rx_csum) smsc75xx_rx_csum_offload(skb, rx_cmd_a, rx_cmd_b); else skb->ip_summed = CHECKSUM_NONE; skb_trim(skb, skb->len - 4); /* remove fcs */ skb->truesize = size + sizeof(struct sk_buff); return 1; } ax_skb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb"); return 0; } ax_skb->len = size; ax_skb->data = packet; skb_set_tail_pointer(ax_skb, size); if (pdata->use_rx_csum) smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a, rx_cmd_b); else ax_skb->ip_summed = CHECKSUM_NONE; skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ ax_skb->truesize = size + sizeof(struct sk_buff); usbnet_skb_return(dev, ax_skb); } skb_pull(skb, size); /* padding bytes before the next frame starts */ if (skb->len) skb_pull(skb, align_count); } if (unlikely(skb->len < 0)) { netdev_warn(dev->net, "invalid rx length<0 %d", skb->len); return 0; } return 1; } static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; skb_linearize(skb); if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { struct sk_buff *skb2 = skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; if (skb->ip_summed == CHECKSUM_PARTIAL) tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE; if (skb_is_gso(skb)) { u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN); tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS; tx_cmd_a |= TX_CMD_A_LSO; } else { tx_cmd_b = 0; } skb_push(skb, 4); cpu_to_le32s(&tx_cmd_b); memcpy(skb->data, &tx_cmd_b, 4); skb_push(skb, 4); cpu_to_le32s(&tx_cmd_a); memcpy(skb->data, &tx_cmd_a, 4); return skb; } static const struct driver_info smsc75xx_info = { .description = "smsc75xx USB 2.0 Gigabit Ethernet", .bind = smsc75xx_bind, .unbind = smsc75xx_unbind, .link_reset = smsc75xx_link_reset, .reset = smsc75xx_reset, .rx_fixup = smsc75xx_rx_fixup, .tx_fixup = smsc75xx_tx_fixup, .status = smsc75xx_status, .flags = FLAG_ETHER | FLAG_SEND_ZLP, }; static const struct usb_device_id products[] = { { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500), .driver_info = (unsigned long) &smsc75xx_info, }, { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505), .driver_info = (unsigned long) &smsc75xx_info, }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver smsc75xx_driver = { .name = SMSC_CHIPNAME, .id_table = products, .probe = usbnet_probe, .suspend = usbnet_suspend, .resume = usbnet_resume, .disconnect = usbnet_disconnect, }; static int __init smsc75xx_init(void) { return usb_register(&smsc75xx_driver); } module_init(smsc75xx_init); static void __exit smsc75xx_exit(void) { usb_deregister(&smsc75xx_driver); } module_exit(smsc75xx_exit); MODULE_AUTHOR("Nancy Lin"); MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>"); MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices"); MODULE_LICENSE("GPL");
gpl-2.0
zeroblade1984/Yureka-64bit
drivers/media/dvb-frontends/nxt200x.c
1715
30280
/* * Support for NXT2002 and NXT2004 - VSB/QAM * * Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com> * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net> * based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net> * and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* * NOTES ABOUT THIS DRIVER * * This Linux driver supports: * B2C2/BBTI Technisat Air2PC - ATSC (NXT2002) * AverTVHD MCE A180 (NXT2004) * ATI HDTV Wonder (NXT2004) * * This driver needs external firmware. Please use the command * "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" or * "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to * download/extract the appropriate firmware, and then copy it to * /usr/lib/hotplug/firmware/ or /lib/firmware/ * (depending on configuration of firmware hotplug). */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 256 #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw" #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw" #define CRC_CCIT_MASK 0x1021 #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include "dvb_frontend.h" #include "nxt200x.h" struct nxt200x_state { struct i2c_adapter* i2c; const struct nxt200x_config* config; struct dvb_frontend frontend; /* demodulator private data */ nxt_chip_type demod_chip; u8 initialised:1; }; static int debug; #define dprintk(args...) do { if (debug) pr_debug(args); } while (0) static int i2c_writebytes (struct nxt200x_state* state, u8 addr, u8 *buf, u8 len) { int err; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = len }; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { pr_warn("%s: i2c write error (addr 0x%02x, err == %i)\n", __func__, addr, err); return -EREMOTEIO; } return 0; } static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len) { int err; struct i2c_msg msg = { .addr = addr, .flags = I2C_M_RD, .buf = buf, .len = len }; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { pr_warn("%s: i2c read error (addr 0x%02x, err == %i)\n", __func__, addr, err); return -EREMOTEIO; } return 0; } static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg, const u8 *buf, u8 len) { u8 buf2[MAX_XFER_SIZE]; int err; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 }; if (1 + len > sizeof(buf2)) { pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n", __func__, reg, len); return -EINVAL; } buf2[0] = reg; memcpy(&buf2[1], buf, len); if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { pr_warn("%s: i2c write error (addr 0x%02x, err == %i)\n", __func__, state->config->demod_address, err); return -EREMOTEIO; } return 0; } static int nxt200x_readbytes(struct nxt200x_state *state, u8 reg, u8 *buf, u8 len) { u8 reg2 [] = { reg }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = reg2, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = buf, .len = len } }; int err; if ((err = i2c_transfer (state->i2c, msg, 2)) != 2) { pr_warn("%s: i2c read error (addr 0x%02x, err == %i)\n", __func__, state->config->demod_address, err); return -EREMOTEIO; } return 0; } static u16 nxt200x_crc(u16 crc, u8 c) { u8 i; u16 input = (u16) c & 0xFF; input<<=8; for(i=0; i<8; i++) { if((crc^input) & 0x8000) crc=(crc<<1)^CRC_CCIT_MASK; else crc<<=1; input<<=1; } return crc; } static int nxt200x_writereg_multibyte (struct nxt200x_state* state, u8 reg, u8* data, u8 len) { u8 attr, len2, buf; dprintk("%s\n", __func__); /* set mutli register register */ nxt200x_writebytes(state, 0x35, &reg, 1); /* send the actual data */ nxt200x_writebytes(state, 0x36, data, len); switch (state->demod_chip) { case NXT2002: len2 = len; buf = 0x02; break; case NXT2004: /* probably not right, but gives correct values */ attr = 0x02; if (reg & 0x80) { attr = attr << 1; if (reg & 0x04) attr = attr >> 1; } /* set write bit */ len2 = ((attr << 4) | 0x10) | len; buf = 0x80; break; default: return -EINVAL; break; } /* set multi register length */ nxt200x_writebytes(state, 0x34, &len2, 1); /* toggle the multireg write bit */ nxt200x_writebytes(state, 0x21, &buf, 1); nxt200x_readbytes(state, 0x21, &buf, 1); switch (state->demod_chip) { case NXT2002: if ((buf & 0x02) == 0) return 0; break; case NXT2004: if (buf == 0) return 0; break; default: return -EINVAL; break; } pr_warn("Error writing multireg register 0x%02X\n", reg); return 0; } static int nxt200x_readreg_multibyte (struct nxt200x_state* state, u8 reg, u8* data, u8 len) { int i; u8 buf, len2, attr; dprintk("%s\n", __func__); /* set mutli register register */ nxt200x_writebytes(state, 0x35, &reg, 1); switch (state->demod_chip) { case NXT2002: /* set multi register length */ len2 = len & 0x80; nxt200x_writebytes(state, 0x34, &len2, 1); /* read the actual data */ nxt200x_readbytes(state, reg, data, len); return 0; break; case NXT2004: /* probably not right, but gives correct values */ attr = 0x02; if (reg & 0x80) { attr = attr << 1; if (reg & 0x04) attr = attr >> 1; } /* set multi register length */ len2 = (attr << 4) | len; nxt200x_writebytes(state, 0x34, &len2, 1); /* toggle the multireg bit*/ buf = 0x80; nxt200x_writebytes(state, 0x21, &buf, 1); /* read the actual data */ for(i = 0; i < len; i++) { nxt200x_readbytes(state, 0x36 + i, &data[i], 1); } return 0; break; default: return -EINVAL; break; } } static void nxt200x_microcontroller_stop (struct nxt200x_state* state) { u8 buf, stopval, counter = 0; dprintk("%s\n", __func__); /* set correct stop value */ switch (state->demod_chip) { case NXT2002: stopval = 0x40; break; case NXT2004: stopval = 0x10; break; default: stopval = 0; break; } buf = 0x80; nxt200x_writebytes(state, 0x22, &buf, 1); while (counter < 20) { nxt200x_readbytes(state, 0x31, &buf, 1); if (buf & stopval) return; msleep(10); counter++; } pr_warn("Timeout waiting for nxt200x to stop. This is ok after " "firmware upload.\n"); return; } static void nxt200x_microcontroller_start (struct nxt200x_state* state) { u8 buf; dprintk("%s\n", __func__); buf = 0x00; nxt200x_writebytes(state, 0x22, &buf, 1); } static void nxt2004_microcontroller_init (struct nxt200x_state* state) { u8 buf[9]; u8 counter = 0; dprintk("%s\n", __func__); buf[0] = 0x00; nxt200x_writebytes(state, 0x2b, buf, 1); buf[0] = 0x70; nxt200x_writebytes(state, 0x34, buf, 1); buf[0] = 0x04; nxt200x_writebytes(state, 0x35, buf, 1); buf[0] = 0x01; buf[1] = 0x23; buf[2] = 0x45; buf[3] = 0x67; buf[4] = 0x89; buf[5] = 0xAB; buf[6] = 0xCD; buf[7] = 0xEF; buf[8] = 0xC0; nxt200x_writebytes(state, 0x36, buf, 9); buf[0] = 0x80; nxt200x_writebytes(state, 0x21, buf, 1); while (counter < 20) { nxt200x_readbytes(state, 0x21, buf, 1); if (buf[0] == 0) return; msleep(10); counter++; } pr_warn("Timeout waiting for nxt2004 to init.\n"); return; } static int nxt200x_writetuner (struct nxt200x_state* state, u8* data) { u8 buf, count = 0; dprintk("%s\n", __func__); dprintk("Tuner Bytes: %*ph\n", 4, data + 1); /* if NXT2004, write directly to tuner. if NXT2002, write through NXT chip. * direct write is required for Philips TUV1236D and ALPS TDHU2 */ switch (state->demod_chip) { case NXT2004: if (i2c_writebytes(state, data[0], data+1, 4)) pr_warn("error writing to tuner\n"); /* wait until we have a lock */ while (count < 20) { i2c_readbytes(state, data[0], &buf, 1); if (buf & 0x40) return 0; msleep(100); count++; } pr_warn("timeout waiting for tuner lock\n"); break; case NXT2002: /* set the i2c transfer speed to the tuner */ buf = 0x03; nxt200x_writebytes(state, 0x20, &buf, 1); /* setup to transfer 4 bytes via i2c */ buf = 0x04; nxt200x_writebytes(state, 0x34, &buf, 1); /* write actual tuner bytes */ nxt200x_writebytes(state, 0x36, data+1, 4); /* set tuner i2c address */ buf = data[0] << 1; nxt200x_writebytes(state, 0x35, &buf, 1); /* write UC Opmode to begin transfer */ buf = 0x80; nxt200x_writebytes(state, 0x21, &buf, 1); while (count < 20) { nxt200x_readbytes(state, 0x21, &buf, 1); if ((buf & 0x80)== 0x00) return 0; msleep(100); count++; } pr_warn("timeout error writing to tuner\n"); break; default: return -EINVAL; break; } return 0; } static void nxt200x_agc_reset(struct nxt200x_state* state) { u8 buf; dprintk("%s\n", __func__); switch (state->demod_chip) { case NXT2002: buf = 0x08; nxt200x_writebytes(state, 0x08, &buf, 1); buf = 0x00; nxt200x_writebytes(state, 0x08, &buf, 1); break; case NXT2004: nxt200x_readreg_multibyte(state, 0x08, &buf, 1); buf = 0x08; nxt200x_writereg_multibyte(state, 0x08, &buf, 1); buf = 0x00; nxt200x_writereg_multibyte(state, 0x08, &buf, 1); break; default: break; } return; } static int nxt2002_load_firmware (struct dvb_frontend* fe, const struct firmware *fw) { struct nxt200x_state* state = fe->demodulator_priv; u8 buf[3], written = 0, chunkpos = 0; u16 rambase, position, crc = 0; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes\n", fw->size); /* Get the RAM base for this nxt2002 */ nxt200x_readbytes(state, 0x10, buf, 1); if (buf[0] & 0x10) rambase = 0x1000; else rambase = 0x0000; dprintk("rambase on this nxt2002 is %04X\n", rambase); /* Hold the micro in reset while loading firmware */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf, 1); for (position = 0; position < fw->size; position++) { if (written == 0) { crc = 0; chunkpos = 0x28; buf[0] = ((rambase + position) >> 8); buf[1] = (rambase + position) & 0xFF; buf[2] = 0x81; /* write starting address */ nxt200x_writebytes(state, 0x29, buf, 3); } written++; chunkpos++; if ((written % 4) == 0) nxt200x_writebytes(state, chunkpos, &fw->data[position-3], 4); crc = nxt200x_crc(crc, fw->data[position]); if ((written == 255) || (position+1 == fw->size)) { /* write remaining bytes of firmware */ nxt200x_writebytes(state, chunkpos+4-(written %4), &fw->data[position-(written %4) + 1], written %4); buf[0] = crc << 8; buf[1] = crc & 0xFF; /* write crc */ nxt200x_writebytes(state, 0x2C, buf, 2); /* do a read to stop things */ nxt200x_readbytes(state, 0x2A, buf, 1); /* set transfer mode to complete */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf, 1); written = 0; } } return 0; }; static int nxt2004_load_firmware (struct dvb_frontend* fe, const struct firmware *fw) { struct nxt200x_state* state = fe->demodulator_priv; u8 buf[3]; u16 rambase, position, crc=0; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes\n", fw->size); /* set rambase */ rambase = 0x1000; /* hold the micro in reset while loading firmware */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf,1); /* calculate firmware CRC */ for (position = 0; position < fw->size; position++) { crc = nxt200x_crc(crc, fw->data[position]); } buf[0] = rambase >> 8; buf[1] = rambase & 0xFF; buf[2] = 0x81; /* write starting address */ nxt200x_writebytes(state,0x29,buf,3); for (position = 0; position < fw->size;) { nxt200x_writebytes(state, 0x2C, &fw->data[position], fw->size-position > 255 ? 255 : fw->size-position); position += (fw->size-position > 255 ? 255 : fw->size-position); } buf[0] = crc >> 8; buf[1] = crc & 0xFF; dprintk("firmware crc is 0x%02X 0x%02X\n", buf[0], buf[1]); /* write crc */ nxt200x_writebytes(state, 0x2C, buf,2); /* do a read to stop things */ nxt200x_readbytes(state, 0x2C, buf, 1); /* set transfer mode to complete */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf,1); return 0; }; static int nxt200x_setup_frontend_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct nxt200x_state* state = fe->demodulator_priv; u8 buf[5]; /* stop the micro first */ nxt200x_microcontroller_stop(state); if (state->demod_chip == NXT2004) { /* make sure demod is set to digital */ buf[0] = 0x04; nxt200x_writebytes(state, 0x14, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x17, buf, 1); } /* set additional params */ switch (p->modulation) { case QAM_64: case QAM_256: /* Set punctured clock for QAM */ /* This is just a guess since I am unable to test it */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 1); break; case VSB_8: /* Set non-punctured clock for VSB */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); break; default: return -EINVAL; break; } if (fe->ops.tuner_ops.calc_regs) { /* get tuning information */ fe->ops.tuner_ops.calc_regs(fe, buf, 5); /* write frequency information */ nxt200x_writetuner(state, buf); } /* reset the agc now that tuning has been completed */ nxt200x_agc_reset(state); /* set target power level */ switch (p->modulation) { case QAM_64: case QAM_256: buf[0] = 0x74; break; case VSB_8: buf[0] = 0x70; break; default: return -EINVAL; break; } nxt200x_writebytes(state, 0x42, buf, 1); /* configure sdm */ switch (state->demod_chip) { case NXT2002: buf[0] = 0x87; break; case NXT2004: buf[0] = 0x07; break; default: return -EINVAL; break; } nxt200x_writebytes(state, 0x57, buf, 1); /* write sdm1 input */ buf[0] = 0x10; buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x58, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x58, buf, 2); break; default: return -EINVAL; break; } /* write sdmx input */ switch (p->modulation) { case QAM_64: buf[0] = 0x68; break; case QAM_256: buf[0] = 0x64; break; case VSB_8: buf[0] = 0x60; break; default: return -EINVAL; break; } buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x5C, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x5C, buf, 2); break; default: return -EINVAL; break; } /* write adc power lpf fc */ buf[0] = 0x05; nxt200x_writebytes(state, 0x43, buf, 1); if (state->demod_chip == NXT2004) { /* write ??? */ buf[0] = 0x00; buf[1] = 0x00; nxt200x_writebytes(state, 0x46, buf, 2); } /* write accumulator2 input */ buf[0] = 0x80; buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x4B, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x4B, buf, 2); break; default: return -EINVAL; break; } /* write kg1 */ buf[0] = 0x00; nxt200x_writebytes(state, 0x4D, buf, 1); /* write sdm12 lpf fc */ buf[0] = 0x44; nxt200x_writebytes(state, 0x55, buf, 1); /* write agc control reg */ buf[0] = 0x04; nxt200x_writebytes(state, 0x41, buf, 1); if (state->demod_chip == NXT2004) { nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x24; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* soft reset? */ nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x10; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x04; nxt200x_writereg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x81, buf, 1); buf[0] = 0x80; buf[1] = 0x00; buf[2] = 0x00; nxt200x_writereg_multibyte(state, 0x82, buf, 3); nxt200x_readreg_multibyte(state, 0x88, buf, 1); buf[0] = 0x11; nxt200x_writereg_multibyte(state, 0x88, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x44; nxt200x_writereg_multibyte(state, 0x80, buf, 1); } /* write agc ucgp0 */ switch (p->modulation) { case QAM_64: buf[0] = 0x02; break; case QAM_256: buf[0] = 0x03; break; case VSB_8: buf[0] = 0x00; break; default: return -EINVAL; break; } nxt200x_writebytes(state, 0x30, buf, 1); /* write agc control reg */ buf[0] = 0x00; nxt200x_writebytes(state, 0x41, buf, 1); /* write accumulator2 input */ buf[0] = 0x80; buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x49, buf, 2); nxt200x_writereg_multibyte(state, 0x4B, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x49, buf, 2); nxt200x_writebytes(state, 0x4B, buf, 2); break; default: return -EINVAL; break; } /* write agc control reg */ buf[0] = 0x04; nxt200x_writebytes(state, 0x41, buf, 1); nxt200x_microcontroller_start(state); if (state->demod_chip == NXT2004) { nxt2004_microcontroller_init(state); /* ???? */ buf[0] = 0xF0; buf[1] = 0x00; nxt200x_writebytes(state, 0x5C, buf, 2); } /* adjacent channel detection should be done here, but I don't have any stations with this need so I cannot test it */ return 0; } static int nxt200x_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct nxt200x_state* state = fe->demodulator_priv; u8 lock; nxt200x_readbytes(state, 0x31, &lock, 1); *status = 0; if (lock & 0x20) { *status |= FE_HAS_SIGNAL; *status |= FE_HAS_CARRIER; *status |= FE_HAS_VITERBI; *status |= FE_HAS_SYNC; *status |= FE_HAS_LOCK; } return 0; } static int nxt200x_read_ber(struct dvb_frontend* fe, u32* ber) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[3]; nxt200x_readreg_multibyte(state, 0xE6, b, 3); *ber = ((b[0] << 8) + b[1]) * 8; return 0; } static int nxt200x_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[2]; u16 temp = 0; /* setup to read cluster variance */ b[0] = 0x00; nxt200x_writebytes(state, 0xA1, b, 1); /* get multreg val */ nxt200x_readreg_multibyte(state, 0xA6, b, 2); temp = (b[0] << 8) | b[1]; *strength = ((0x7FFF - temp) & 0x0FFF) * 16; return 0; } static int nxt200x_read_snr(struct dvb_frontend* fe, u16* snr) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[2]; u16 temp = 0, temp2; u32 snrdb = 0; /* setup to read cluster variance */ b[0] = 0x00; nxt200x_writebytes(state, 0xA1, b, 1); /* get multreg val from 0xA6 */ nxt200x_readreg_multibyte(state, 0xA6, b, 2); temp = (b[0] << 8) | b[1]; temp2 = 0x7FFF - temp; /* snr will be in db */ if (temp2 > 0x7F00) snrdb = 1000*24 + ( 1000*(30-24) * ( temp2 - 0x7F00 ) / ( 0x7FFF - 0x7F00 ) ); else if (temp2 > 0x7EC0) snrdb = 1000*18 + ( 1000*(24-18) * ( temp2 - 0x7EC0 ) / ( 0x7F00 - 0x7EC0 ) ); else if (temp2 > 0x7C00) snrdb = 1000*12 + ( 1000*(18-12) * ( temp2 - 0x7C00 ) / ( 0x7EC0 - 0x7C00 ) ); else snrdb = 1000*0 + ( 1000*(12-0) * ( temp2 - 0 ) / ( 0x7C00 - 0 ) ); /* the value reported back from the frontend will be FFFF=32db 0000=0db */ *snr = snrdb * (0xFFFF/32000); return 0; } static int nxt200x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[3]; nxt200x_readreg_multibyte(state, 0xE6, b, 3); *ucblocks = b[2]; return 0; } static int nxt200x_sleep(struct dvb_frontend* fe) { return 0; } static int nxt2002_init(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; const struct firmware *fw; int ret; u8 buf[2]; /* request the firmware, this will block until someone uploads it */ pr_debug("%s: Waiting for firmware upload (%s)...\n", __func__, NXT2002_DEFAULT_FIRMWARE); ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE, state->i2c->dev.parent); pr_debug("%s: Waiting for firmware upload(2)...\n", __func__); if (ret) { pr_err("%s: No firmware uploaded (timeout or file not found?)" "\n", __func__); return ret; } ret = nxt2002_load_firmware(fe, fw); release_firmware(fw); if (ret) { pr_err("%s: Writing firmware to device failed\n", __func__); return ret; } pr_info("%s: Firmware upload complete\n", __func__); /* Put the micro into reset */ nxt200x_microcontroller_stop(state); /* ensure transfer is complete */ buf[0]=0x00; nxt200x_writebytes(state, 0x2B, buf, 1); /* Put the micro into reset for real this time */ nxt200x_microcontroller_stop(state); /* soft reset everything (agc,frontend,eq,fec)*/ buf[0] = 0x0F; nxt200x_writebytes(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x08, buf, 1); /* write agc sdm configure */ buf[0] = 0xF1; nxt200x_writebytes(state, 0x57, buf, 1); /* write mod output format */ buf[0] = 0x20; nxt200x_writebytes(state, 0x09, buf, 1); /* write fec mpeg mode */ buf[0] = 0x7E; buf[1] = 0x00; nxt200x_writebytes(state, 0xE9, buf, 2); /* write mux selection */ buf[0] = 0x00; nxt200x_writebytes(state, 0xCC, buf, 1); return 0; } static int nxt2004_init(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; const struct firmware *fw; int ret; u8 buf[3]; /* ??? */ buf[0]=0x00; nxt200x_writebytes(state, 0x1E, buf, 1); /* request the firmware, this will block until someone uploads it */ pr_debug("%s: Waiting for firmware upload (%s)...\n", __func__, NXT2004_DEFAULT_FIRMWARE); ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE, state->i2c->dev.parent); pr_debug("%s: Waiting for firmware upload(2)...\n", __func__); if (ret) { pr_err("%s: No firmware uploaded (timeout or file not found?)" "\n", __func__); return ret; } ret = nxt2004_load_firmware(fe, fw); release_firmware(fw); if (ret) { pr_err("%s: Writing firmware to device failed\n", __func__); return ret; } pr_info("%s: Firmware upload complete\n", __func__); /* ensure transfer is complete */ buf[0] = 0x01; nxt200x_writebytes(state, 0x19, buf, 1); nxt2004_microcontroller_init(state); nxt200x_microcontroller_stop(state); nxt200x_microcontroller_stop(state); nxt2004_microcontroller_init(state); nxt200x_microcontroller_stop(state); /* soft reset everything (agc,frontend,eq,fec)*/ buf[0] = 0xFF; nxt200x_writereg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); /* write agc sdm configure */ buf[0] = 0xD7; nxt200x_writebytes(state, 0x57, buf, 1); /* ???*/ buf[0] = 0x07; buf[1] = 0xfe; nxt200x_writebytes(state, 0x35, buf, 2); buf[0] = 0x12; nxt200x_writebytes(state, 0x34, buf, 1); buf[0] = 0x80; nxt200x_writebytes(state, 0x21, buf, 1); /* ???*/ buf[0] = 0x21; nxt200x_writebytes(state, 0x0A, buf, 1); /* ???*/ buf[0] = 0x01; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* write fec mpeg mode */ buf[0] = 0x7E; buf[1] = 0x00; nxt200x_writebytes(state, 0xE9, buf, 2); /* write mux selection */ buf[0] = 0x00; nxt200x_writebytes(state, 0xCC, buf, 1); /* ???*/ nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* soft reset? */ nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x10; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); /* ???*/ nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x01; nxt200x_writereg_multibyte(state, 0x80, buf, 1); buf[0] = 0x70; nxt200x_writereg_multibyte(state, 0x81, buf, 1); buf[0] = 0x31; buf[1] = 0x5E; buf[2] = 0x66; nxt200x_writereg_multibyte(state, 0x82, buf, 3); nxt200x_readreg_multibyte(state, 0x88, buf, 1); buf[0] = 0x11; nxt200x_writereg_multibyte(state, 0x88, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x40; nxt200x_writereg_multibyte(state, 0x80, buf, 1); nxt200x_readbytes(state, 0x10, buf, 1); buf[0] = 0x10; nxt200x_writebytes(state, 0x10, buf, 1); nxt200x_readbytes(state, 0x0A, buf, 1); buf[0] = 0x21; nxt200x_writebytes(state, 0x0A, buf, 1); nxt2004_microcontroller_init(state); buf[0] = 0x21; nxt200x_writebytes(state, 0x0A, buf, 1); buf[0] = 0x7E; nxt200x_writebytes(state, 0xE9, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0xEA, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x80, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* soft reset? */ nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x10; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x04; nxt200x_writereg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x81, buf, 1); buf[0] = 0x80; buf[1] = 0x00; buf[2] = 0x00; nxt200x_writereg_multibyte(state, 0x82, buf, 3); nxt200x_readreg_multibyte(state, 0x88, buf, 1); buf[0] = 0x11; nxt200x_writereg_multibyte(state, 0x88, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x44; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* initialize tuner */ nxt200x_readbytes(state, 0x10, buf, 1); buf[0] = 0x12; nxt200x_writebytes(state, 0x10, buf, 1); buf[0] = 0x04; nxt200x_writebytes(state, 0x13, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x16, buf, 1); buf[0] = 0x04; nxt200x_writebytes(state, 0x14, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x14, buf, 1); nxt200x_writebytes(state, 0x17, buf, 1); nxt200x_writebytes(state, 0x14, buf, 1); nxt200x_writebytes(state, 0x17, buf, 1); return 0; } static int nxt200x_init(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; int ret = 0; if (!state->initialised) { switch (state->demod_chip) { case NXT2002: ret = nxt2002_init(fe); break; case NXT2004: ret = nxt2004_init(fe); break; default: return -EINVAL; break; } state->initialised = 1; } return ret; } static int nxt200x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 500; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void nxt200x_release(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops nxt200x_ops; struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config, struct i2c_adapter* i2c) { struct nxt200x_state* state = NULL; u8 buf [] = {0,0,0,0,0}; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct nxt200x_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->initialised = 0; /* read card id */ nxt200x_readbytes(state, 0x00, buf, 5); dprintk("NXT info: %*ph\n", 5, buf); /* set demod chip */ switch (buf[0]) { case 0x04: state->demod_chip = NXT2002; pr_info("NXT2002 Detected\n"); break; case 0x05: state->demod_chip = NXT2004; pr_info("NXT2004 Detected\n"); break; default: goto error; } /* make sure demod chip is supported */ switch (state->demod_chip) { case NXT2002: if (buf[0] != 0x04) goto error; /* device id */ if (buf[1] != 0x02) goto error; /* fab id */ if (buf[2] != 0x11) goto error; /* month */ if (buf[3] != 0x20) goto error; /* year msb */ if (buf[4] != 0x00) goto error; /* year lsb */ break; case NXT2004: if (buf[0] != 0x05) goto error; /* device id */ break; default: goto error; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &nxt200x_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); pr_err("Unknown/Unsupported NXT chip: %*ph\n", 5, buf); return NULL; } static struct dvb_frontend_ops nxt200x_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name = "Nextwave NXT200X VSB/QAM frontend", .frequency_min = 54000000, .frequency_max = 860000000, .frequency_stepsize = 166666, /* stepsize is just a guess */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_8VSB | FE_CAN_QAM_64 | FE_CAN_QAM_256 }, .release = nxt200x_release, .init = nxt200x_init, .sleep = nxt200x_sleep, .set_frontend = nxt200x_setup_frontend_parameters, .get_tune_settings = nxt200x_get_tune_settings, .read_status = nxt200x_read_status, .read_ber = nxt200x_read_ber, .read_signal_strength = nxt200x_read_signal_strength, .read_snr = nxt200x_read_snr, .read_ucblocks = nxt200x_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulator Driver"); MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(nxt200x_attach);
gpl-2.0
raininja/android_kernel_asus_a500cg
arch/mips/mti-malta/malta-platform.c
2227
3381
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006, 07 MIPS Technologies, Inc. * written by Ralf Baechle (ralf@linux-mips.org) * written by Ralf Baechle <ralf@linux-mips.org> * * Copyright (C) 2008 Wind River Systems, Inc. * updated by Tiejun Chen <tiejun.chen@windriver.com> * * 1. Probe driver for the Malta's UART ports: * * o 2 ports in the SMC SuperIO * o 1 port in the CBUS UART, a discrete 16550 which normally is only used * for bringups. * * We don't use 8250_platform.c on Malta as it would result in the CBUS * UART becoming ttyS0. * * 2. Register RTC-CMOS platform device on Malta. */ #include <linux/init.h> #include <linux/serial_8250.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <asm/mips-boards/maltaint.h> #include <mtd/mtd-abi.h> #define SMC_PORT(base, int) \ { \ .iobase = base, \ .irq = int, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ .regshift = 0, \ } #define CBUS_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP) static struct plat_serial8250_port uart8250_data[] = { SMC_PORT(0x3F8, 4), SMC_PORT(0x2F8, 3), { .mapbase = 0x1f000900, /* The CBUS UART */ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, .uartclk = 3686400, /* Twice the usual clk! */ .iotype = UPIO_MEM32, .flags = CBUS_UART_FLAGS, .regshift = 3, }, { }, }; static struct platform_device malta_uart8250_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = uart8250_data, }, }; struct resource malta_rtc_resources[] = { { .start = RTC_PORT(0), .end = RTC_PORT(7), .flags = IORESOURCE_IO, }, { .start = RTC_IRQ, .end = RTC_IRQ, .flags = IORESOURCE_IRQ, } }; static struct platform_device malta_rtc_device = { .name = "rtc_cmos", .id = -1, .resource = malta_rtc_resources, .num_resources = ARRAY_SIZE(malta_rtc_resources), }; static struct mtd_partition malta_mtd_partitions[] = { { .name = "YAMON", .offset = 0x0, .size = 0x100000, .mask_flags = MTD_WRITEABLE }, { .name = "User FS", .offset = 0x100000, .size = 0x2e0000 }, { .name = "Board Config", .offset = 0x3e0000, .size = 0x020000, .mask_flags = MTD_WRITEABLE } }; static struct physmap_flash_data malta_flash_data = { .width = 4, .nr_parts = ARRAY_SIZE(malta_mtd_partitions), .parts = malta_mtd_partitions }; static struct resource malta_flash_resource = { .start = 0x1e000000, .end = 0x1e3fffff, .flags = IORESOURCE_MEM }; static struct platform_device malta_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &malta_flash_data, }, .num_resources = 1, .resource = &malta_flash_resource, }; static struct platform_device *malta_devices[] __initdata = { &malta_uart8250_device, &malta_rtc_device, &malta_flash_device, }; static int __init malta_add_devices(void) { int err; err = platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices)); if (err) return err; return 0; } device_initcall(malta_add_devices);
gpl-2.0
drewis/android_kernel_htc_ruby
drivers/media/video/pwc/pwc-misc.c
2739
4174
/* Linux driver for Philips webcam Various miscellaneous functions and tables. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "pwc.h" const struct pwc_coord pwc_image_sizes[PSZ_MAX] = { { 128, 96, 0 }, /* sqcif */ { 160, 120, 0 }, /* qsif */ { 176, 144, 0 }, /* qcif */ { 320, 240, 0 }, /* sif */ { 352, 288, 0 }, /* cif */ { 640, 480, 0 }, /* vga */ }; /* x,y -> PSZ_ */ int pwc_decode_size(struct pwc_device *pdev, int width, int height) { int i, find; /* Make sure we don't go beyond our max size. NB: we have different limits for RAW and normal modes. In case you don't have the decompressor loaded or use RAW mode, the maximum viewable size is smaller. */ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420) { if (width > pdev->abs_max.x || height > pdev->abs_max.y) { PWC_DEBUG_SIZE("VIDEO_PALETTE_RAW: going beyond abs_max.\n"); return -1; } } else { if (width > pdev->view_max.x || height > pdev->view_max.y) { PWC_DEBUG_SIZE("VIDEO_PALETTE_not RAW: going beyond view_max.\n"); return -1; } } /* Find the largest size supported by the camera that fits into the requested size. */ find = -1; for (i = 0; i < PSZ_MAX; i++) { if (pdev->image_mask & (1 << i)) { if (pwc_image_sizes[i].x <= width && pwc_image_sizes[i].y <= height) find = i; } } return find; } /* initialize variables depending on type and decompressor*/ void pwc_construct(struct pwc_device *pdev) { if (DEVICE_USE_CODEC1(pdev->type)) { pdev->view_min.x = 128; pdev->view_min.y = 96; pdev->view_max.x = 352; pdev->view_max.y = 288; pdev->abs_max.x = 352; pdev->abs_max.y = 288; pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QCIF | 1 << PSZ_CIF; pdev->vcinterface = 2; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } else if (DEVICE_USE_CODEC3(pdev->type)) { pdev->view_min.x = 160; pdev->view_min.y = 120; pdev->view_max.x = 640; pdev->view_max.y = 480; pdev->image_mask = 1 << PSZ_QSIF | 1 << PSZ_SIF | 1 << PSZ_VGA; pdev->abs_max.x = 640; pdev->abs_max.y = 480; pdev->vcinterface = 3; pdev->vendpoint = 5; pdev->frame_header_size = TOUCAM_HEADER_SIZE; pdev->frame_trailer_size = TOUCAM_TRAILER_SIZE; } else /* if (DEVICE_USE_CODEC2(pdev->type)) */ { pdev->view_min.x = 128; pdev->view_min.y = 96; /* Anthill bug #38: PWC always reports max size, even without PWCX */ pdev->view_max.x = 640; pdev->view_max.y = 480; pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QSIF | 1 << PSZ_QCIF | 1 << PSZ_SIF | 1 << PSZ_CIF | 1 << PSZ_VGA; pdev->abs_max.x = 640; pdev->abs_max.y = 480; pdev->vcinterface = 3; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */ pdev->view_min.size = pdev->view_min.x * pdev->view_min.y; pdev->view_max.size = pdev->view_max.x * pdev->view_max.y; /* length of image, in YUV format; always allocate enough memory. */ pdev->len_per_image = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2); }
gpl-2.0
brymaster5000/BrYCS_kernel2
arch/powerpc/platforms/83xx/mpc830x_rdb.c
2995
2315
/* * arch/powerpc/platforms/83xx/mpc830x_rdb.c * * Description: MPC830x RDB board specific routines. * This file is based on mpc831x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2009. All rights reserved. * Copyright (C) 2010. Ilya Yanok, Emcraft Systems, yanok@emcraft.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" /* * Setup the architecture */ static void __init mpc830x_rdb_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc830x_rdb_setup_arch()", 0); #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8308-pcie") mpc83xx_add_bridge(np); #endif mpc831x_usb_cfg(); } static void __init mpc830x_rdb_init_IRQ(void) { struct device_node *np; np = of_find_node_by_type(NULL, "ipic"); if (!np) return; ipic_init(np, 0); /* Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); } static const char *board[] __initdata = { "MPC8308RDB", "fsl,mpc8308rdb", "denx,mpc8308_p1m", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc830x_rdb_probe(void) { return of_flat_dt_match(of_get_flat_dt_root(), board); } static struct of_device_id __initdata of_bus_ids[] = { { .compatible = "simple-bus" }, { .compatible = "gianfar" }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(mpc830x_rdb, declare_of_platform_devices); define_machine(mpc830x_rdb) { .name = "MPC830x RDB", .probe = mpc830x_rdb_probe, .setup_arch = mpc830x_rdb_setup_arch, .init_IRQ = mpc830x_rdb_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
TheStrix/caf_msm8916
sound/pci/ice1712/prodigy_hifi.c
3251
32363
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for Audiotrak Prodigy 7.1 Hifi * based on pontis.c * * Copyright (c) 2007 Julian Scheel <julian@jusst.de> * Copyright (c) 2007 allank * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/tlv.h> #include "ice1712.h" #include "envy24ht.h" #include "prodigy_hifi.h" struct prodigy_hifi_spec { unsigned short master[2]; unsigned short vol[8]; }; /* I2C addresses */ #define WM_DEV 0x34 /* WM8776 registers */ #define WM_HP_ATTEN_L 0x00 /* headphone left attenuation */ #define WM_HP_ATTEN_R 0x01 /* headphone left attenuation */ #define WM_HP_MASTER 0x02 /* headphone master (both channels), override LLR */ #define WM_DAC_ATTEN_L 0x03 /* digital left attenuation */ #define WM_DAC_ATTEN_R 0x04 #define WM_DAC_MASTER 0x05 #define WM_PHASE_SWAP 0x06 /* DAC phase swap */ #define WM_DAC_CTRL1 0x07 #define WM_DAC_MUTE 0x08 #define WM_DAC_CTRL2 0x09 #define WM_DAC_INT 0x0a #define WM_ADC_INT 0x0b #define WM_MASTER_CTRL 0x0c #define WM_POWERDOWN 0x0d #define WM_ADC_ATTEN_L 0x0e #define WM_ADC_ATTEN_R 0x0f #define WM_ALC_CTRL1 0x10 #define WM_ALC_CTRL2 0x11 #define WM_ALC_CTRL3 0x12 #define WM_NOISE_GATE 0x13 #define WM_LIMITER 0x14 #define WM_ADC_MUX 0x15 #define WM_OUT_MUX 0x16 #define WM_RESET 0x17 /* Analog Recording Source :- Mic, LineIn, CD/Video, */ /* implement capture source select control for WM8776 */ #define WM_AIN1 "AIN1" #define WM_AIN2 "AIN2" #define WM_AIN3 "AIN3" #define WM_AIN4 "AIN4" #define WM_AIN5 "AIN5" /* GPIO pins of envy24ht connected to wm8766 */ #define WM8766_SPI_CLK (1<<17) /* CLK, Pin97 on ICE1724 */ #define WM8766_SPI_MD (1<<16) /* DATA VT1724 -> WM8766, Pin96 */ #define WM8766_SPI_ML (1<<18) /* Latch, Pin98 */ /* WM8766 registers */ #define WM8766_DAC_CTRL 0x02 /* DAC Control */ #define WM8766_INT_CTRL 0x03 /* Interface Control */ #define WM8766_DAC_CTRL2 0x09 #define WM8766_DAC_CTRL3 0x0a #define WM8766_RESET 0x1f #define WM8766_LDA1 0x00 #define WM8766_LDA2 0x04 #define WM8766_LDA3 0x06 #define WM8766_RDA1 0x01 #define WM8766_RDA2 0x05 #define WM8766_RDA3 0x07 #define WM8766_MUTE1 0x0C #define WM8766_MUTE2 0x0F /* * Prodigy HD2 */ #define AK4396_ADDR 0x00 #define AK4396_CSN (1 << 8) /* CSN->GPIO8, pin 75 */ #define AK4396_CCLK (1 << 9) /* CCLK->GPIO9, pin 76 */ #define AK4396_CDTI (1 << 10) /* CDTI->GPIO10, pin 77 */ /* ak4396 registers */ #define AK4396_CTRL1 0x00 #define AK4396_CTRL2 0x01 #define AK4396_CTRL3 0x02 #define AK4396_LCH_ATT 0x03 #define AK4396_RCH_ATT 0x04 /* * get the current register value of WM codec */ static unsigned short wm_get(struct snd_ice1712 *ice, int reg) { reg <<= 1; return ((unsigned short)ice->akm[0].images[reg] << 8) | ice->akm[0].images[reg + 1]; } /* * set the register value of WM codec and remember it */ static void wm_put_nocache(struct snd_ice1712 *ice, int reg, unsigned short val) { unsigned short cval; cval = (reg << 9) | val; snd_vt1724_write_i2c(ice, WM_DEV, cval >> 8, cval & 0xff); } static void wm_put(struct snd_ice1712 *ice, int reg, unsigned short val) { wm_put_nocache(ice, reg, val); reg <<= 1; ice->akm[0].images[reg] = val >> 8; ice->akm[0].images[reg + 1] = val; } /* * write data in the SPI mode */ static void set_gpio_bit(struct snd_ice1712 *ice, unsigned int bit, int val) { unsigned int tmp = snd_ice1712_gpio_read(ice); if (val) tmp |= bit; else tmp &= ~bit; snd_ice1712_gpio_write(ice, tmp); } /* * SPI implementation for WM8766 codec - only writing supported, no readback */ static void wm8766_spi_send_word(struct snd_ice1712 *ice, unsigned int data) { int i; for (i = 0; i < 16; i++) { set_gpio_bit(ice, WM8766_SPI_CLK, 0); udelay(1); set_gpio_bit(ice, WM8766_SPI_MD, data & 0x8000); udelay(1); set_gpio_bit(ice, WM8766_SPI_CLK, 1); udelay(1); data <<= 1; } } static void wm8766_spi_write(struct snd_ice1712 *ice, unsigned int reg, unsigned int data) { unsigned int block; snd_ice1712_gpio_set_dir(ice, WM8766_SPI_MD| WM8766_SPI_CLK|WM8766_SPI_ML); snd_ice1712_gpio_set_mask(ice, ~(WM8766_SPI_MD| WM8766_SPI_CLK|WM8766_SPI_ML)); /* latch must be low when writing */ set_gpio_bit(ice, WM8766_SPI_ML, 0); block = (reg << 9) | (data & 0x1ff); wm8766_spi_send_word(ice, block); /* REGISTER ADDRESS */ /* release latch */ set_gpio_bit(ice, WM8766_SPI_ML, 1); udelay(1); /* restore */ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); } /* * serial interface for ak4396 - only writing supported, no readback */ static void ak4396_send_word(struct snd_ice1712 *ice, unsigned int data) { int i; for (i = 0; i < 16; i++) { set_gpio_bit(ice, AK4396_CCLK, 0); udelay(1); set_gpio_bit(ice, AK4396_CDTI, data & 0x8000); udelay(1); set_gpio_bit(ice, AK4396_CCLK, 1); udelay(1); data <<= 1; } } static void ak4396_write(struct snd_ice1712 *ice, unsigned int reg, unsigned int data) { unsigned int block; snd_ice1712_gpio_set_dir(ice, AK4396_CSN|AK4396_CCLK|AK4396_CDTI); snd_ice1712_gpio_set_mask(ice, ~(AK4396_CSN|AK4396_CCLK|AK4396_CDTI)); /* latch must be low when writing */ set_gpio_bit(ice, AK4396_CSN, 0); block = ((AK4396_ADDR & 0x03) << 14) | (1 << 13) | ((reg & 0x1f) << 8) | (data & 0xff); ak4396_send_word(ice, block); /* REGISTER ADDRESS */ /* release latch */ set_gpio_bit(ice, AK4396_CSN, 1); udelay(1); /* restore */ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask); snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); } /* * ak4396 mixers */ /* * DAC volume attenuation mixer control (-64dB to 0dB) */ static int ak4396_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = 0xFF; /* linear */ return 0; } static int ak4396_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = spec->vol[i]; return 0; } static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; int change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { if (ucontrol->value.integer.value[i] != spec->vol[i]) { spec->vol[i] = ucontrol->value.integer.value[i]; ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1); static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0); static struct snd_kcontrol_new prodigy_hd2_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Front Playback Volume", .info = ak4396_dac_vol_info, .get = ak4396_dac_vol_get, .put = ak4396_dac_vol_put, .tlv = { .p = ak4396_db_scale }, }, }; /* --------------- */ /* * Logarithmic volume values for WM87*6 * Computed as 20 * Log10(255 / x) */ static const unsigned char wm_vol[256] = { 127, 48, 42, 39, 36, 34, 33, 31, 30, 29, 28, 27, 27, 26, 25, 25, 24, 24, 23, 23, 22, 22, 21, 21, 21, 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17, 17, 16, 16, 16, 16, 15, 15, 15, 15, 15, 15, 14, 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #define WM_VOL_MAX (sizeof(wm_vol) - 1) #define WM_VOL_MUTE 0x8000 #define DAC_0dB 0xff #define DAC_RES 128 #define DAC_MIN (DAC_0dB - DAC_RES) static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned short vol, unsigned short master) { unsigned char nvol; if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; else { nvol = (((vol & ~WM_VOL_MUTE) * (master & ~WM_VOL_MUTE)) / 128) & WM_VOL_MAX; nvol = (nvol ? (nvol + DAC_MIN) : 0) & 0xff; } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x100 | nvol); } static void wm8766_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned short vol, unsigned short master) { unsigned char nvol; if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) nvol = 0; else { nvol = (((vol & ~WM_VOL_MUTE) * (master & ~WM_VOL_MUTE)) / 128) & WM_VOL_MAX; nvol = (nvol ? (nvol + DAC_MIN) : 0) & 0xff; } wm8766_spi_write(ice, index, (0x0100 | nvol)); } /* * DAC volume attenuation mixer control (-64dB to 0dB) */ static int wm_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = DAC_RES; /* 0dB, 0.5dB step */ return 0; } static int wm_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = spec->vol[2 + i] & ~WM_VOL_MUTE; return 0; } static int wm_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i, idx, change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { if (ucontrol->value.integer.value[i] != spec->vol[2 + i]) { idx = WM_DAC_ATTEN_L + i; spec->vol[2 + i] &= WM_VOL_MUTE; spec->vol[2 + i] |= ucontrol->value.integer.value[i]; wm_set_vol(ice, idx, spec->vol[2 + i], spec->master[i]); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * WM8766 DAC volume attenuation mixer control */ static int wm8766_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int voices = kcontrol->private_value >> 8; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = voices; uinfo->value.integer.min = 0; /* mute */ uinfo->value.integer.max = DAC_RES; /* 0dB */ return 0; } static int wm8766_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i, ofs, voices; voices = kcontrol->private_value >> 8; ofs = kcontrol->private_value & 0xff; for (i = 0; i < voices; i++) ucontrol->value.integer.value[i] = spec->vol[ofs + i]; return 0; } static int wm8766_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i, idx, ofs, voices; int change = 0; voices = kcontrol->private_value >> 8; ofs = kcontrol->private_value & 0xff; mutex_lock(&ice->gpio_mutex); for (i = 0; i < voices; i++) { if (ucontrol->value.integer.value[i] != spec->vol[ofs + i]) { idx = WM8766_LDA1 + ofs + i; spec->vol[ofs + i] &= WM_VOL_MUTE; spec->vol[ofs + i] |= ucontrol->value.integer.value[i]; wm8766_set_vol(ice, idx, spec->vol[ofs + i], spec->master[i]); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * Master volume attenuation mixer control / applied to WM8776+WM8766 */ static int wm_master_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = DAC_RES; return 0; } static int wm_master_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int i; for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = spec->master[i]; return 0; } static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); struct prodigy_hifi_spec *spec = ice->spec; int ch, change = 0; mutex_lock(&ice->gpio_mutex); for (ch = 0; ch < 2; ch++) { if (ucontrol->value.integer.value[ch] != spec->master[ch]) { spec->master[ch] = ucontrol->value.integer.value[ch]; /* Apply to front DAC */ wm_set_vol(ice, WM_DAC_ATTEN_L + ch, spec->vol[2 + ch], spec->master[ch]); wm8766_set_vol(ice, WM8766_LDA1 + ch, spec->vol[0 + ch], spec->master[ch]); wm8766_set_vol(ice, WM8766_LDA2 + ch, spec->vol[4 + ch], spec->master[ch]); wm8766_set_vol(ice, WM8766_LDA3 + ch, spec->vol[6 + ch], spec->master[ch]); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* KONSTI */ static int wm_adc_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char* texts[32] = { "NULL", WM_AIN1, WM_AIN2, WM_AIN1 "+" WM_AIN2, WM_AIN3, WM_AIN1 "+" WM_AIN3, WM_AIN2 "+" WM_AIN3, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3, WM_AIN4, WM_AIN1 "+" WM_AIN4, WM_AIN2 "+" WM_AIN4, WM_AIN1 "+" WM_AIN2 "+" WM_AIN4, WM_AIN3 "+" WM_AIN4, WM_AIN1 "+" WM_AIN3 "+" WM_AIN4, WM_AIN2 "+" WM_AIN3 "+" WM_AIN4, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN4, WM_AIN5, WM_AIN1 "+" WM_AIN5, WM_AIN2 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN5, WM_AIN3 "+" WM_AIN5, WM_AIN1 "+" WM_AIN3 "+" WM_AIN5, WM_AIN2 "+" WM_AIN3 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN5, WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN4 "+" WM_AIN5, WM_AIN2 "+" WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN4 "+" WM_AIN5, WM_AIN3 "+" WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5, WM_AIN2 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5, WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5 }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 32; if (uinfo->value.enumerated.item > 31) uinfo->value.enumerated.item = 31; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short oval, nval; int change = 0; mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_ADC_MUX); nval = (oval & 0xe0) | ucontrol->value.integer.value[0]; if (nval != oval) { wm_put(ice, WM_ADC_MUX, nval); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* KONSTI */ /* * ADC gain mixer control (-64dB to 0dB) */ #define ADC_0dB 0xcf #define ADC_RES 128 #define ADC_MIN (ADC_0dB - ADC_RES) static int wm_adc_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; /* mute (-64dB) */ uinfo->value.integer.max = ADC_RES; /* 0dB, 0.5dB step */ return 0; } static int wm_adc_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val; int i; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { val = wm_get(ice, WM_ADC_ATTEN_L + i) & 0xff; val = val > ADC_MIN ? (val - ADC_MIN) : 0; ucontrol->value.integer.value[i] = val; } mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short ovol, nvol; int i, idx, change = 0; mutex_lock(&ice->gpio_mutex); for (i = 0; i < 2; i++) { nvol = ucontrol->value.integer.value[i]; nvol = nvol ? (nvol + ADC_MIN) : 0; idx = WM_ADC_ATTEN_L + i; ovol = wm_get(ice, idx) & 0xff; if (ovol != nvol) { wm_put(ice, idx, nvol); change = 1; } } mutex_unlock(&ice->gpio_mutex); return change; } /* * ADC input mux mixer control */ #define wm_adc_mux_info snd_ctl_boolean_mono_info static int wm_adc_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int bit = kcontrol->private_value; mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_ADC_MUX) & (1 << bit)) ? 1 : 0; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_adc_mux_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int bit = kcontrol->private_value; unsigned short oval, nval; int change; mutex_lock(&ice->gpio_mutex); nval = oval = wm_get(ice, WM_ADC_MUX); if (ucontrol->value.integer.value[0]) nval |= (1 << bit); else nval &= ~(1 << bit); change = nval != oval; if (change) { wm_put(ice, WM_ADC_MUX, nval); } mutex_unlock(&ice->gpio_mutex); return 0; } /* * Analog bypass (In -> Out) */ #define wm_bypass_info snd_ctl_boolean_mono_info static int wm_bypass_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_OUT_MUX) & 0x04) ? 1 : 0; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_bypass_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val, oval; int change = 0; mutex_lock(&ice->gpio_mutex); val = oval = wm_get(ice, WM_OUT_MUX); if (ucontrol->value.integer.value[0]) val |= 0x04; else val &= ~0x04; if (val != oval) { wm_put(ice, WM_OUT_MUX, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * Left/Right swap */ #define wm_chswap_info snd_ctl_boolean_mono_info static int wm_chswap_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); ucontrol->value.integer.value[0] = (wm_get(ice, WM_DAC_CTRL1) & 0xf0) != 0x90; mutex_unlock(&ice->gpio_mutex); return 0; } static int wm_chswap_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned short val, oval; int change = 0; mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_DAC_CTRL1); val = oval & 0x0f; if (ucontrol->value.integer.value[0]) val |= 0x60; else val |= 0x90; if (val != oval) { wm_put(ice, WM_DAC_CTRL1, val); wm_put_nocache(ice, WM_DAC_CTRL1, val); change = 1; } mutex_unlock(&ice->gpio_mutex); return change; } /* * mixers */ static struct snd_kcontrol_new prodigy_hifi_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Master Playback Volume", .info = wm_master_vol_info, .get = wm_master_vol_get, .put = wm_master_vol_put, .tlv = { .p = db_scale_wm_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Front Playback Volume", .info = wm_dac_vol_info, .get = wm_dac_vol_get, .put = wm_dac_vol_put, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Rear Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (2 << 8) | 0, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Center Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (1 << 8) | 4, .tlv = { .p = db_scale_wm_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "LFE Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (1 << 8) | 5, .tlv = { .p = db_scale_wm_dac } }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Side Playback Volume", .info = wm8766_vol_info, .get = wm8766_vol_get, .put = wm8766_vol_put, .private_value = (2 << 8) | 6, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Capture Volume", .info = wm_adc_vol_info, .get = wm_adc_vol_get, .put = wm_adc_vol_put, .tlv = { .p = db_scale_wm_dac }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "CD Capture Switch", .info = wm_adc_mux_info, .get = wm_adc_mux_get, .put = wm_adc_mux_put, .private_value = 0, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Capture Switch", .info = wm_adc_mux_info, .get = wm_adc_mux_get, .put = wm_adc_mux_put, .private_value = 1, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Bypass Switch", .info = wm_bypass_info, .get = wm_bypass_get, .put = wm_bypass_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Swap Output Channels", .info = wm_chswap_info, .get = wm_chswap_get, .put = wm_chswap_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Capture Source", .info = wm_adc_mux_enum_info, .get = wm_adc_mux_enum_get, .put = wm_adc_mux_enum_put, }, }; /* * WM codec registers */ static void wm_proc_regs_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; char line[64]; unsigned int reg, val; mutex_lock(&ice->gpio_mutex); while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x", &reg, &val) != 2) continue; if (reg <= 0x17 && val <= 0xffff) wm_put(ice, reg, val); } mutex_unlock(&ice->gpio_mutex); } static void wm_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; int reg, val; mutex_lock(&ice->gpio_mutex); for (reg = 0; reg <= 0x17; reg++) { val = wm_get(ice, reg); snd_iprintf(buffer, "%02x = %04x\n", reg, val); } mutex_unlock(&ice->gpio_mutex); } static void wm_proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (!snd_card_proc_new(ice->card, "wm_codec", &entry)) { snd_info_set_text_ops(entry, ice, wm_proc_regs_read); entry->mode |= S_IWUSR; entry->c.text.write = wm_proc_regs_write; } } static int prodigy_hifi_add_controls(struct snd_ice1712 *ice) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(prodigy_hifi_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&prodigy_hifi_controls[i], ice)); if (err < 0) return err; } wm_proc_init(ice); return 0; } static int prodigy_hd2_add_controls(struct snd_ice1712 *ice) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(prodigy_hd2_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&prodigy_hd2_controls[i], ice)); if (err < 0) return err; } wm_proc_init(ice); return 0; } /* * initialize the chip */ static int prodigy_hifi_init(struct snd_ice1712 *ice) { static unsigned short wm_inits[] = { /* These come first to reduce init pop noise */ WM_ADC_MUX, 0x0003, /* ADC mute */ /* 0x00c0 replaced by 0x0003 */ WM_DAC_MUTE, 0x0001, /* DAC softmute */ WM_DAC_CTRL1, 0x0000, /* DAC mute */ WM_POWERDOWN, 0x0008, /* All power-up except HP */ WM_RESET, 0x0000, /* reset */ }; static unsigned short wm_inits2[] = { WM_MASTER_CTRL, 0x0022, /* 256fs, slave mode */ WM_DAC_INT, 0x0022, /* I2S, normal polarity, 24bit */ WM_ADC_INT, 0x0022, /* I2S, normal polarity, 24bit */ WM_DAC_CTRL1, 0x0090, /* DAC L/R */ WM_OUT_MUX, 0x0001, /* OUT DAC */ WM_HP_ATTEN_L, 0x0179, /* HP 0dB */ WM_HP_ATTEN_R, 0x0179, /* HP 0dB */ WM_DAC_ATTEN_L, 0x0000, /* DAC 0dB */ WM_DAC_ATTEN_L, 0x0100, /* DAC 0dB */ WM_DAC_ATTEN_R, 0x0000, /* DAC 0dB */ WM_DAC_ATTEN_R, 0x0100, /* DAC 0dB */ WM_PHASE_SWAP, 0x0000, /* phase normal */ #if 0 WM_DAC_MASTER, 0x0100, /* DAC master muted */ #endif WM_DAC_CTRL2, 0x0000, /* no deemphasis, no ZFLG */ WM_ADC_ATTEN_L, 0x0000, /* ADC muted */ WM_ADC_ATTEN_R, 0x0000, /* ADC muted */ #if 1 WM_ALC_CTRL1, 0x007b, /* */ WM_ALC_CTRL2, 0x0000, /* */ WM_ALC_CTRL3, 0x0000, /* */ WM_NOISE_GATE, 0x0000, /* */ #endif WM_DAC_MUTE, 0x0000, /* DAC unmute */ WM_ADC_MUX, 0x0003, /* ADC unmute, both CD/Line On */ }; static unsigned short wm8766_inits[] = { WM8766_RESET, 0x0000, WM8766_DAC_CTRL, 0x0120, WM8766_INT_CTRL, 0x0022, /* I2S Normal Mode, 24 bit */ WM8766_DAC_CTRL2, 0x0001, WM8766_DAC_CTRL3, 0x0080, WM8766_LDA1, 0x0100, WM8766_LDA2, 0x0100, WM8766_LDA3, 0x0100, WM8766_RDA1, 0x0100, WM8766_RDA2, 0x0100, WM8766_RDA3, 0x0100, WM8766_MUTE1, 0x0000, WM8766_MUTE2, 0x0000, }; struct prodigy_hifi_spec *spec; unsigned int i; ice->vt1720 = 0; ice->vt1724 = 1; ice->num_total_dacs = 8; ice->num_total_adcs = 1; /* HACK - use this as the SPDIF source. * don't call snd_ice1712_gpio_get/put(), otherwise it's overwritten */ ice->gpio.saved[0] = 0; /* to remember the register values */ ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ice->akm) return -ENOMEM; ice->akm_codecs = 1; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; /* initialize WM8776 codec */ for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2) wm_put(ice, wm_inits[i], wm_inits[i+1]); schedule_timeout_uninterruptible(1); for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2) wm_put(ice, wm_inits2[i], wm_inits2[i+1]); /* initialize WM8766 codec */ for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2) wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i+1]); return 0; } /* * initialize the chip */ static void ak4396_init(struct snd_ice1712 *ice) { static unsigned short ak4396_inits[] = { AK4396_CTRL1, 0x87, /* I2S Normal Mode, 24 bit */ AK4396_CTRL2, 0x02, AK4396_CTRL3, 0x00, AK4396_LCH_ATT, 0x00, AK4396_RCH_ATT, 0x00, }; unsigned int i; /* initialize ak4396 codec */ /* reset codec */ ak4396_write(ice, AK4396_CTRL1, 0x86); msleep(100); ak4396_write(ice, AK4396_CTRL1, 0x87); for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2) ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]); } #ifdef CONFIG_PM_SLEEP static int prodigy_hd2_resume(struct snd_ice1712 *ice) { /* initialize ak4396 codec and restore previous mixer volumes */ struct prodigy_hifi_spec *spec = ice->spec; int i; mutex_lock(&ice->gpio_mutex); ak4396_init(ice); for (i = 0; i < 2; i++) ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff); mutex_unlock(&ice->gpio_mutex); return 0; } #endif static int prodigy_hd2_init(struct snd_ice1712 *ice) { struct prodigy_hifi_spec *spec; ice->vt1720 = 0; ice->vt1724 = 1; ice->num_total_dacs = 1; ice->num_total_adcs = 1; /* HACK - use this as the SPDIF source. * don't call snd_ice1712_gpio_get/put(), otherwise it's overwritten */ ice->gpio.saved[0] = 0; /* to remember the register values */ ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ice->akm) return -ENOMEM; ice->akm_codecs = 1; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; #ifdef CONFIG_PM_SLEEP ice->pm_resume = &prodigy_hd2_resume; ice->pm_suspend_enabled = 1; #endif ak4396_init(ice); return 0; } static unsigned char prodigy71hifi_eeprom[] = { 0x4b, /* SYSCONF: clock 512, spdif-in/ADC, 4DACs */ 0x80, /* ACLINK: I2S */ 0xfc, /* I2S: vol, 96k, 24bit, 192k */ 0xc3, /* SPDIF: out-en, out-int, spdif-in */ 0xff, /* GPIO_DIR */ 0xff, /* GPIO_DIR1 */ 0x5f, /* GPIO_DIR2 */ 0x00, /* GPIO_MASK */ 0x00, /* GPIO_MASK1 */ 0x00, /* GPIO_MASK2 */ 0x00, /* GPIO_STATE */ 0x00, /* GPIO_STATE1 */ 0x00, /* GPIO_STATE2 */ }; static unsigned char prodigyhd2_eeprom[] = { 0x4b, /* SYSCONF: clock 512, spdif-in/ADC, 4DACs */ 0x80, /* ACLINK: I2S */ 0xfc, /* I2S: vol, 96k, 24bit, 192k */ 0xc3, /* SPDIF: out-en, out-int, spdif-in */ 0xff, /* GPIO_DIR */ 0xff, /* GPIO_DIR1 */ 0x5f, /* GPIO_DIR2 */ 0x00, /* GPIO_MASK */ 0x00, /* GPIO_MASK1 */ 0x00, /* GPIO_MASK2 */ 0x00, /* GPIO_STATE */ 0x00, /* GPIO_STATE1 */ 0x00, /* GPIO_STATE2 */ }; static unsigned char fortissimo4_eeprom[] = { 0x43, /* SYSCONF: clock 512, ADC, 4DACs */ 0x80, /* ACLINK: I2S */ 0xfc, /* I2S: vol, 96k, 24bit, 192k */ 0xc1, /* SPDIF: out-en, out-int */ 0xff, /* GPIO_DIR */ 0xff, /* GPIO_DIR1 */ 0x5f, /* GPIO_DIR2 */ 0x00, /* GPIO_MASK */ 0x00, /* GPIO_MASK1 */ 0x00, /* GPIO_MASK2 */ 0x00, /* GPIO_STATE */ 0x00, /* GPIO_STATE1 */ 0x00, /* GPIO_STATE2 */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_prodigy_hifi_cards[] = { { .subvendor = VT1724_SUBDEVICE_PRODIGY_HIFI, .name = "Audiotrak Prodigy 7.1 HiFi", .model = "prodigy71hifi", .chip_init = prodigy_hifi_init, .build_controls = prodigy_hifi_add_controls, .eeprom_size = sizeof(prodigy71hifi_eeprom), .eeprom_data = prodigy71hifi_eeprom, .driver = "Prodigy71HIFI", }, { .subvendor = VT1724_SUBDEVICE_PRODIGY_HD2, .name = "Audiotrak Prodigy HD2", .model = "prodigyhd2", .chip_init = prodigy_hd2_init, .build_controls = prodigy_hd2_add_controls, .eeprom_size = sizeof(prodigyhd2_eeprom), .eeprom_data = prodigyhd2_eeprom, .driver = "Prodigy71HD2", }, { .subvendor = VT1724_SUBDEVICE_FORTISSIMO4, .name = "Hercules Fortissimo IV", .model = "fortissimo4", .chip_init = prodigy_hifi_init, .build_controls = prodigy_hifi_add_controls, .eeprom_size = sizeof(fortissimo4_eeprom), .eeprom_data = fortissimo4_eeprom, .driver = "Fortissimo4", }, { } /* terminator */ };
gpl-2.0
CyanogenMod/android_kernel_motorola_msm8916
drivers/clk/mxs/clk-div.c
4019
2685
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/slab.h> #include "clk.h" /** * struct clk_div - mxs integer divider clock * @divider: the parent class * @ops: pointer to clk_ops of parent class * @reg: register address * @busy: busy bit shift * * The mxs divider clock is a subclass of basic clk_divider with an * addtional busy bit. */ struct clk_div { struct clk_divider divider; const struct clk_ops *ops; void __iomem *reg; u8 busy; }; static inline struct clk_div *to_clk_div(struct clk_hw *hw) { struct clk_divider *divider = container_of(hw, struct clk_divider, hw); return container_of(divider, struct clk_div, divider); } static unsigned long clk_div_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_div *div = to_clk_div(hw); return div->ops->recalc_rate(&div->divider.hw, parent_rate); } static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct clk_div *div = to_clk_div(hw); return div->ops->round_rate(&div->divider.hw, rate, prate); } static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_div *div = to_clk_div(hw); int ret; ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate); if (!ret) ret = mxs_clk_wait(div->reg, div->busy); return ret; } static struct clk_ops clk_div_ops = { .recalc_rate = clk_div_recalc_rate, .round_rate = clk_div_round_rate, .set_rate = clk_div_set_rate, }; struct clk *mxs_clk_div(const char *name, const char *parent_name, void __iomem *reg, u8 shift, u8 width, u8 busy) { struct clk_div *div; struct clk *clk; struct clk_init_data init; div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_div_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); div->reg = reg; div->busy = busy; div->divider.reg = reg; div->divider.shift = shift; div->divider.width = width; div->divider.flags = CLK_DIVIDER_ONE_BASED; div->divider.lock = &mxs_lock; div->divider.hw.init = &init; div->ops = &clk_divider_ops; clk = clk_register(NULL, &div->divider.hw); if (IS_ERR(clk)) kfree(div); return clk; }
gpl-2.0
Lenovo-K3/android_kernel_lenovo_msm8916
drivers/ide/icside.c
4275
16635
/* * Copyright (c) 1996-2004 Russell King. * * Please note that this platform does not support 32-bit IDE IO. */ #include <linux/string.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/errno.h> #include <linux/ide.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/init.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/ecard.h> #define DRV_NAME "icside" #define ICS_IDENT_OFFSET 0x2280 #define ICS_ARCIN_V5_INTRSTAT 0x0000 #define ICS_ARCIN_V5_INTROFFSET 0x0004 #define ICS_ARCIN_V5_IDEOFFSET 0x2800 #define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80 #define ICS_ARCIN_V5_IDESTEPPING 6 #define ICS_ARCIN_V6_IDEOFFSET_1 0x2000 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290 #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380 #define ICS_ARCIN_V6_IDEOFFSET_2 0x3000 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290 #define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380 #define ICS_ARCIN_V6_IDESTEPPING 6 struct cardinfo { unsigned int dataoffset; unsigned int ctrloffset; unsigned int stepping; }; static struct cardinfo icside_cardinfo_v5 = { .dataoffset = ICS_ARCIN_V5_IDEOFFSET, .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET, .stepping = ICS_ARCIN_V5_IDESTEPPING, }; static struct cardinfo icside_cardinfo_v6_1 = { .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1, .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1, .stepping = ICS_ARCIN_V6_IDESTEPPING, }; static struct cardinfo icside_cardinfo_v6_2 = { .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2, .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2, .stepping = ICS_ARCIN_V6_IDESTEPPING, }; struct icside_state { unsigned int channel; unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; unsigned int sel; unsigned int type; struct ide_host *host; }; #define ICS_TYPE_A3IN 0 #define ICS_TYPE_A3USER 1 #define ICS_TYPE_V6 3 #define ICS_TYPE_V5 15 #define ICS_TYPE_NOTYPE ((unsigned int)-1) /* ---------------- Version 5 PCB Support Functions --------------------- */ /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) * Purpose : enable interrupts from card */ static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); } /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) * Purpose : disable interrupts from card */ static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); } static const expansioncard_ops_t icside_ops_arcin_v5 = { .irqenable = icside_irqenable_arcin_v5, .irqdisable = icside_irqdisable_arcin_v5, }; /* ---------------- Version 6 PCB Support Functions --------------------- */ /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) * Purpose : enable interrupts from card */ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; void __iomem *base = state->irq_port; state->enabled = 1; switch (state->channel) { case 0: writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); readb(base + ICS_ARCIN_V6_INTROFFSET_2); break; case 1: writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); readb(base + ICS_ARCIN_V6_INTROFFSET_1); break; } } /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) * Purpose : disable interrupts from card */ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; state->enabled = 0; readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); } /* Prototype: icside_irqprobe(struct expansion_card *ec) * Purpose : detect an active interrupt from card */ static int icside_irqpending_arcin_v6(struct expansion_card *ec) { struct icside_state *state = ec->irq_data; return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; } static const expansioncard_ops_t icside_ops_arcin_v6 = { .irqenable = icside_irqenable_arcin_v6, .irqdisable = icside_irqdisable_arcin_v6, .irqpending = icside_irqpending_arcin_v6, }; /* * Handle routing of interrupts. This is called before * we write the command to the drive. */ static void icside_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; local_irq_save(flags); state->channel = hwif->channel; if (state->enabled && !mask) { switch (hwif->channel) { case 0: writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); break; case 1: writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); break; } } else { readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); } local_irq_restore(flags); } static const struct ide_port_ops icside_v6_no_dma_port_ops = { .maskproc = icside_maskproc, }; #ifdef CONFIG_BLK_DEV_IDEDMA_ICS /* * SG-DMA support. * * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. * There is only one DMA controller per card, which means that only * one drive can be accessed at one time. NOTE! We do not enforce that * here, but we rely on the main IDE driver spotting that both * interfaces use the same IRQ, which should guarantee this. */ /* * Configure the IOMD to give the appropriate timings for the transfer * mode being requested. We take the advice of the ATA standards, and * calculate the cycle time based on the transfer mode, and the EIDE * MW DMA specs that the drive provides in the IDENTIFY command. * * We have the following IOMD DMA modes to choose from: * * Type Active Recovery Cycle * A 250 (250) 312 (550) 562 (800) * B 187 250 437 * C 125 (125) 125 (375) 250 (500) * D 62 125 187 * * (figures in brackets are actual measured timings) * * However, we also need to take care of the read/write active and * recovery timings: * * Read Write * Mode Active -- Recovery -- Cycle IOMD type * MW0 215 50 215 480 A * MW1 80 50 50 150 C * MW2 70 25 25 120 C */ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long cycle_time = 0; int use_dma_info = 0; const u8 xfer_mode = drive->dma_mode; switch (xfer_mode) { case XFER_MW_DMA_2: cycle_time = 250; use_dma_info = 1; break; case XFER_MW_DMA_1: cycle_time = 250; use_dma_info = 1; break; case XFER_MW_DMA_0: cycle_time = 480; break; case XFER_SW_DMA_2: case XFER_SW_DMA_1: case XFER_SW_DMA_0: cycle_time = 480; break; } /* * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should * take care to note the values in the ID... */ if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time) cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME]; ide_set_drivedata(drive, (void *)cycle_time); printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", drive->name, ide_xfer_verbose(xfer_mode), 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); } static const struct ide_port_ops icside_v6_port_ops = { .set_dma_mode = icside_set_dma_mode, .maskproc = icside_maskproc, }; static void icside_dma_host_set(ide_drive_t *drive, int on) { } static int icside_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); disable_dma(ec->dma); return get_dma_residue(ec->dma) != 0; } static void icside_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); /* We can not enable DMA on both channels simultaneously. */ BUG_ON(dma_channel_active(ec->dma)); enable_dma(ec->dma); } static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); unsigned int dma_mode; if (cmd->tf_flags & IDE_TFLAG_WRITE) dma_mode = DMA_MODE_WRITE; else dma_mode = DMA_MODE_READ; /* * We can not enable DMA on both channels. */ BUG_ON(dma_channel_active(ec->dma)); /* * Ensure that we have the right interrupt routed. */ icside_maskproc(drive, 0); /* * Route the DMA signals to the correct interface. */ writeb(state->sel | hwif->channel, state->ioc_base); /* * Select the correct timing for this drive. */ set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive)); /* * Tell the DMA engine about the SG table and * data direction. */ set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents); set_dma_mode(ec->dma, dma_mode); return 0; } static int icside_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); return readb(state->irq_port + (hwif->channel ? ICS_ARCIN_V6_INTRSTAT_2 : ICS_ARCIN_V6_INTRSTAT_1)) & 1; } static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { hwif->dmatable_cpu = NULL; hwif->dmatable_dma = 0; return 0; } static const struct ide_dma_ops icside_v6_dma_ops = { .dma_host_set = icside_dma_host_set, .dma_setup = icside_dma_setup, .dma_start = icside_dma_start, .dma_end = icside_dma_end, .dma_test_irq = icside_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, }; #endif static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) { return -EOPNOTSUPP; } static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; hw->io_ports.data_addr = port; hw->io_ports.error_addr = port + (1 << info->stepping); hw->io_ports.nsect_addr = port + (2 << info->stepping); hw->io_ports.lbal_addr = port + (3 << info->stepping); hw->io_ports.lbam_addr = port + (4 << info->stepping); hw->io_ports.lbah_addr = port + (5 << info->stepping); hw->io_ports.device_addr = port + (6 << info->stepping); hw->io_ports.status_addr = port + (7 << info->stepping); hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset; hw->irq = ec->irq; hw->dev = &ec->dev; } static const struct ide_port_info icside_v5_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_acorn, }; static int icside_register_v5(struct icside_state *state, struct expansion_card *ec) { void __iomem *base; struct ide_host *host; struct ide_hw hw, *hws[] = { &hw }; int ret; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) return -ENOMEM; state->irq_port = base; ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; ec->irqmask = 1; ecard_setirq(ec, &icside_ops_arcin_v5, state); /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v5(ec, 0); icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); host = ide_host_alloc(&icside_v5_port_info, hws, 1); if (host == NULL) return -ENODEV; state->host = host; ecard_set_drvdata(ec, state); ret = ide_host_register(host, &icside_v5_port_info, hws); if (ret) goto err_free; return 0; err_free: ide_host_free(host); ecard_set_drvdata(ec, NULL); return ret; } static const struct ide_port_info icside_v6_port_info __initconst = { .init_dma = icside_dma_off_init, .port_ops = &icside_v6_no_dma_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, .swdma_mask = ATA_SWDMA2, .chipset = ide_acorn, }; static int icside_register_v6(struct icside_state *state, struct expansion_card *ec) { void __iomem *ioc_base, *easi_base; struct ide_host *host; unsigned int sel = 0; int ret; struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!ioc_base) { ret = -ENOMEM; goto out; } easi_base = ioc_base; if (ecard_resource_flags(ec, ECARD_RES_EASI)) { easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0); if (!easi_base) { ret = -ENOMEM; goto out; } /* * Enable access to the EASI region. */ sel = 1 << 5; } writeb(sel, ioc_base); ecard_setirq(ec, &icside_ops_arcin_v6, state); state->irq_port = easi_base; state->ioc_base = ioc_base; state->sel = sel; /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); host = ide_host_alloc(&d, hws, 2); if (host == NULL) return -ENODEV; state->host = host; ecard_set_drvdata(ec, state); #ifdef CONFIG_BLK_DEV_IDEDMA_ICS if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { d.init_dma = icside_dma_init; d.port_ops = &icside_v6_port_ops; d.dma_ops = &icside_v6_dma_ops; } #endif ret = ide_host_register(host, &d, hws); if (ret) goto err_free; return 0; err_free: ide_host_free(host); if (d.dma_ops) free_dma(ec->dma); ecard_set_drvdata(ec, NULL); out: return ret; } static int icside_probe(struct expansion_card *ec, const struct ecard_id *id) { struct icside_state *state; void __iomem *idmem; int ret; ret = ecard_request_resources(ec); if (ret) goto out; state = kzalloc(sizeof(struct icside_state), GFP_KERNEL); if (!state) { ret = -ENOMEM; goto release; } state->type = ICS_TYPE_NOTYPE; idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (idmem) { unsigned int type; type = readb(idmem + ICS_IDENT_OFFSET) & 1; type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; ecardm_iounmap(ec, idmem); state->type = type; } switch (state->type) { case ICS_TYPE_A3IN: dev_warn(&ec->dev, "A3IN unsupported\n"); ret = -ENODEV; break; case ICS_TYPE_A3USER: dev_warn(&ec->dev, "A3USER unsupported\n"); ret = -ENODEV; break; case ICS_TYPE_V5: ret = icside_register_v5(state, ec); break; case ICS_TYPE_V6: ret = icside_register_v6(state, ec); break; default: dev_warn(&ec->dev, "unknown interface type\n"); ret = -ENODEV; break; } if (ret == 0) goto out; kfree(state); release: ecard_release_resources(ec); out: return ret; } static void icside_remove(struct expansion_card *ec) { struct icside_state *state = ecard_get_drvdata(ec); switch (state->type) { case ICS_TYPE_V5: /* FIXME: tell IDE to stop using the interface */ /* Disable interrupts */ icside_irqdisable_arcin_v5(ec, 0); break; case ICS_TYPE_V6: /* FIXME: tell IDE to stop using the interface */ if (ec->dma != NO_DMA) free_dma(ec->dma); /* Disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); /* Reset the ROM pointer/EASI selection */ writeb(0, state->ioc_base); break; } ecard_set_drvdata(ec, NULL); kfree(state); ecard_release_resources(ec); } static void icside_shutdown(struct expansion_card *ec) { struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; /* * Disable interrupts from this card. We need to do * this before disabling EASI since we may be accessing * this register via that region. */ local_irq_save(flags); ec->ops->irqdisable(ec, 0); local_irq_restore(flags); /* * Reset the ROM pointer so that we can read the ROM * after a soft reboot. This also disables access to * the IDE taskfile via the EASI region. */ if (state->ioc_base) writeb(0, state->ioc_base); } static const struct ecard_id icside_ids[] = { { MANU_ICS, PROD_ICS_IDE }, { MANU_ICS2, PROD_ICS2_IDE }, { 0xffff, 0xffff } }; static struct ecard_driver icside_driver = { .probe = icside_probe, .remove = icside_remove, .shutdown = icside_shutdown, .id_table = icside_ids, .drv = { .name = "icside", }, }; static int __init icside_init(void) { return ecard_register_driver(&icside_driver); } static void __exit icside_exit(void) { ecard_remove_driver(&icside_driver); } MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ICS IDE driver"); module_init(icside_init); module_exit(icside_exit);
gpl-2.0
ztemt/z5s_mini_H113_kernel
arch/arm/mach-msm/board-sapphire-rfkill.c
4531
2657
/* linux/arch/arm/mach-msm/board-sapphire-rfkill.c * Copyright (C) 2007-2009 HTC Corporation. * Author: Thomas Tsai <thomas_tsai@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* Control bluetooth power for sapphire platform */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/device.h> #include <linux/rfkill.h> #include <linux/delay.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include "gpio_chip.h" #include "board-sapphire.h" static struct rfkill *bt_rfk; static const char bt_name[] = "brf6300"; extern int sapphire_bt_fastclock_power(int on); static int bluetooth_set_power(void *data, bool blocked) { if (!blocked) { sapphire_bt_fastclock_power(1); gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 1); udelay(10); gpio_direction_output(101, 1); } else { gpio_direction_output(101, 0); gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 0); sapphire_bt_fastclock_power(0); } return 0; } static struct rfkill_ops sapphire_rfkill_ops = { .set_block = bluetooth_set_power, }; static int sapphire_rfkill_probe(struct platform_device *pdev) { int rc = 0; bool default_state = true; /* off */ bluetooth_set_power(NULL, default_state); bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, &sapphire_rfkill_ops, NULL); if (!bt_rfk) return -ENOMEM; /* userspace cannot take exclusive control */ rfkill_set_states(bt_rfk, default_state, false); rc = rfkill_register(bt_rfk); if (rc) rfkill_destroy(bt_rfk); return rc; } static int sapphire_rfkill_remove(struct platform_device *dev) { rfkill_unregister(bt_rfk); rfkill_destroy(bt_rfk); return 0; } static struct platform_driver sapphire_rfkill_driver = { .probe = sapphire_rfkill_probe, .remove = sapphire_rfkill_remove, .driver = { .name = "sapphire_rfkill", .owner = THIS_MODULE, }, }; static int __init sapphire_rfkill_init(void) { return platform_driver_register(&sapphire_rfkill_driver); } static void __exit sapphire_rfkill_exit(void) { platform_driver_unregister(&sapphire_rfkill_driver); } module_init(sapphire_rfkill_init); module_exit(sapphire_rfkill_exit); MODULE_DESCRIPTION("sapphire rfkill"); MODULE_AUTHOR("Nick Pelly <npelly@google.com>"); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/ubuntu-utopic
net/netfilter/xt_time.c
4531
8327
/* * xt_time * Copyright © CC Computer Consultants GmbH, 2007 * * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> * This is a module which is used for time matching * It is using some modified code from dietlibc (localtime() function) * that you can find at http://www.fefe.de/dietlibc/ * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from gnu.org/gpl. */ #include <linux/ktime.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_time.h> struct xtm { u_int8_t month; /* (1-12) */ u_int8_t monthday; /* (1-31) */ u_int8_t weekday; /* (1-7) */ u_int8_t hour; /* (0-23) */ u_int8_t minute; /* (0-59) */ u_int8_t second; /* (0-59) */ unsigned int dse; }; extern struct timezone sys_tz; /* ouch */ static const u_int16_t days_since_year[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, }; static const u_int16_t days_since_leapyear[] = { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, }; /* * Since time progresses forward, it is best to organize this array in reverse, * to minimize lookup time. */ enum { DSE_FIRST = 2039, SECONDS_PER_DAY = 86400, }; static const u_int16_t days_since_epoch[] = { /* 2039 - 2030 */ 25202, 24837, 24472, 24106, 23741, 23376, 23011, 22645, 22280, 21915, /* 2029 - 2020 */ 21550, 21184, 20819, 20454, 20089, 19723, 19358, 18993, 18628, 18262, /* 2019 - 2010 */ 17897, 17532, 17167, 16801, 16436, 16071, 15706, 15340, 14975, 14610, /* 2009 - 2000 */ 14245, 13879, 13514, 13149, 12784, 12418, 12053, 11688, 11323, 10957, /* 1999 - 1990 */ 10592, 10227, 9862, 9496, 9131, 8766, 8401, 8035, 7670, 7305, /* 1989 - 1980 */ 6940, 6574, 6209, 5844, 5479, 5113, 4748, 4383, 4018, 3652, /* 1979 - 1970 */ 3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0, }; static inline bool is_leap(unsigned int y) { return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); } /* * Each network packet has a (nano)seconds-since-the-epoch (SSTE) timestamp. * Since we match against days and daytime, the SSTE value needs to be * computed back into human-readable dates. * * This is done in three separate functions so that the most expensive * calculations are done last, in case a "simple match" can be found earlier. */ static inline unsigned int localtime_1(struct xtm *r, time_t time) { unsigned int v, w; /* Each day has 86400s, so finding the hour/minute is actually easy. */ v = time % SECONDS_PER_DAY; r->second = v % 60; w = v / 60; r->minute = w % 60; r->hour = w / 60; return v; } static inline void localtime_2(struct xtm *r, time_t time) { /* * Here comes the rest (weekday, monthday). First, divide the SSTE * by seconds-per-day to get the number of _days_ since the epoch. */ r->dse = time / 86400; /* * 1970-01-01 (w=0) was a Thursday (4). * -1 and +1 map Sunday properly onto 7. */ r->weekday = (4 + r->dse - 1) % 7 + 1; } static void localtime_3(struct xtm *r, time_t time) { unsigned int year, i, w = r->dse; /* * In each year, a certain number of days-since-the-epoch have passed. * Find the year that is closest to said days. * * Consider, for example, w=21612 (2029-03-04). Loop will abort on * dse[i] <= w, which happens when dse[i] == 21550. This implies * year == 2009. w will then be 62. */ for (i = 0, year = DSE_FIRST; days_since_epoch[i] > w; ++i, --year) /* just loop */; w -= days_since_epoch[i]; /* * By now we have the current year, and the day of the year. * r->yearday = w; * * On to finding the month (like above). In each month, a certain * number of days-since-New Year have passed, and find the closest * one. * * Consider w=62 (in a non-leap year). Loop will abort on * dsy[i] < w, which happens when dsy[i] == 31+28 (i == 2). * Concludes i == 2, i.e. 3rd month => March. * * (A different approach to use would be to subtract a monthlength * from w repeatedly while counting.) */ if (is_leap(year)) { /* use days_since_leapyear[] in a leap year */ for (i = ARRAY_SIZE(days_since_leapyear) - 1; i > 0 && days_since_leapyear[i] > w; --i) /* just loop */; r->monthday = w - days_since_leapyear[i] + 1; } else { for (i = ARRAY_SIZE(days_since_year) - 1; i > 0 && days_since_year[i] > w; --i) /* just loop */; r->monthday = w - days_since_year[i] + 1; } r->month = i + 1; } static bool time_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_time_info *info = par->matchinfo; unsigned int packet_time; struct xtm current_time; s64 stamp; /* * We cannot use get_seconds() instead of __net_timestamp() here. * Suppose you have two rules: * 1. match before 13:00 * 2. match after 13:00 * If you match against processing time (get_seconds) it * may happen that the same packet matches both rules if * it arrived at the right moment before 13:00. */ if (skb->tstamp.tv64 == 0) __net_timestamp((struct sk_buff *)skb); stamp = ktime_to_ns(skb->tstamp); stamp = div_s64(stamp, NSEC_PER_SEC); if (info->flags & XT_TIME_LOCAL_TZ) /* Adjust for local timezone */ stamp -= 60 * sys_tz.tz_minuteswest; /* * xt_time will match when _all_ of the following hold: * - 'now' is in the global time range date_start..date_end * - 'now' is in the monthday mask * - 'now' is in the weekday mask * - 'now' is in the daytime range time_start..time_end * (and by default, libxt_time will set these so as to match) */ if (stamp < info->date_start || stamp > info->date_stop) return false; packet_time = localtime_1(&current_time, stamp); if (info->daytime_start < info->daytime_stop) { if (packet_time < info->daytime_start || packet_time > info->daytime_stop) return false; } else { if (packet_time < info->daytime_start && packet_time > info->daytime_stop) return false; /** if user asked to ignore 'next day', then e.g. * '1 PM Wed, August 1st' should be treated * like 'Tue 1 PM July 31st'. * * This also causes * 'Monday, "23:00 to 01:00", to match for 2 hours, starting * Monday 23:00 to Tuesday 01:00. */ if ((info->flags & XT_TIME_CONTIGUOUS) && packet_time <= info->daytime_stop) stamp -= SECONDS_PER_DAY; } localtime_2(&current_time, stamp); if (!(info->weekdays_match & (1 << current_time.weekday))) return false; /* Do not spend time computing monthday if all days match anyway */ if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) { localtime_3(&current_time, stamp); if (!(info->monthdays_match & (1 << current_time.monthday))) return false; } return true; } static int time_mt_check(const struct xt_mtchk_param *par) { const struct xt_time_info *info = par->matchinfo; if (info->daytime_start > XT_TIME_MAX_DAYTIME || info->daytime_stop > XT_TIME_MAX_DAYTIME) { pr_info("invalid argument - start or " "stop time greater than 23:59:59\n"); return -EDOM; } if (info->flags & ~XT_TIME_ALL_FLAGS) { pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS); return -EINVAL; } if ((info->flags & XT_TIME_CONTIGUOUS) && info->daytime_start < info->daytime_stop) return -EINVAL; return 0; } static struct xt_match xt_time_mt_reg __read_mostly = { .name = "time", .family = NFPROTO_UNSPEC, .match = time_mt, .checkentry = time_mt_check, .matchsize = sizeof(struct xt_time_info), .me = THIS_MODULE, }; static int __init time_mt_init(void) { int minutes = sys_tz.tz_minuteswest; if (minutes < 0) /* east of Greenwich */ printk(KERN_INFO KBUILD_MODNAME ": kernel timezone is +%02d%02d\n", -minutes / 60, -minutes % 60); else /* west of Greenwich */ printk(KERN_INFO KBUILD_MODNAME ": kernel timezone is -%02d%02d\n", minutes / 60, minutes % 60); return xt_register_match(&xt_time_mt_reg); } static void __exit time_mt_exit(void) { xt_unregister_match(&xt_time_mt_reg); } module_init(time_mt_init); module_exit(time_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: time-based matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_time"); MODULE_ALIAS("ip6t_time");
gpl-2.0
mohamaadhosein/VIPER-KERNEL-D802
drivers/gpu/drm/exynos/exynos_drm_plane.c
4787
4240
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Authors: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include "drmP.h" #include "exynos_drm.h" #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" struct exynos_plane { struct drm_plane base; struct exynos_drm_overlay overlay; bool enabled; }; static const uint32_t formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_NV12, DRM_FORMAT_NV12M, DRM_FORMAT_NV12MT, }; static int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct exynos_plane *exynos_plane = container_of(plane, struct exynos_plane, base); struct exynos_drm_overlay *overlay = &exynos_plane->overlay; struct exynos_drm_crtc_pos pos; unsigned int x = src_x >> 16; unsigned int y = src_y >> 16; int ret; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); pos.crtc_x = crtc_x; pos.crtc_y = crtc_y; pos.crtc_w = crtc_w; pos.crtc_h = crtc_h; pos.fb_x = x; pos.fb_y = y; /* TODO: scale feature */ ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos); if (ret < 0) return ret; exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_crtc_mode_set); exynos_drm_fn_encoder(crtc, &overlay->zpos, exynos_drm_encoder_crtc_plane_commit); exynos_plane->enabled = true; return 0; } static int exynos_disable_plane(struct drm_plane *plane) { struct exynos_plane *exynos_plane = container_of(plane, struct exynos_plane, base); struct exynos_drm_overlay *overlay = &exynos_plane->overlay; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!exynos_plane->enabled) return 0; exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, exynos_drm_encoder_crtc_disable); exynos_plane->enabled = false; exynos_plane->overlay.zpos = DEFAULT_ZPOS; return 0; } static void exynos_plane_destroy(struct drm_plane *plane) { struct exynos_plane *exynos_plane = container_of(plane, struct exynos_plane, base); DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); exynos_disable_plane(plane); drm_plane_cleanup(plane); kfree(exynos_plane); } static struct drm_plane_funcs exynos_plane_funcs = { .update_plane = exynos_update_plane, .disable_plane = exynos_disable_plane, .destroy = exynos_plane_destroy, }; int exynos_plane_init(struct drm_device *dev, unsigned int nr) { struct exynos_plane *exynos_plane; uint32_t possible_crtcs; exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); if (!exynos_plane) return -ENOMEM; /* all CRTCs are available */ possible_crtcs = (1 << MAX_CRTC) - 1; exynos_plane->overlay.zpos = DEFAULT_ZPOS; return drm_plane_init(dev, &exynos_plane->base, possible_crtcs, &exynos_plane_funcs, formats, ARRAY_SIZE(formats), false); } int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_exynos_plane_set_zpos *zpos_req = data; struct drm_mode_object *obj; struct drm_plane *plane; struct exynos_plane *exynos_plane; int ret = 0; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) { if (zpos_req->zpos != DEFAULT_ZPOS) { DRM_ERROR("zpos not within limits\n"); return -EINVAL; } } mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, zpos_req->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { DRM_DEBUG_KMS("Unknown plane ID %d\n", zpos_req->plane_id); ret = -EINVAL; goto out; } plane = obj_to_plane(obj); exynos_plane = container_of(plane, struct exynos_plane, base); exynos_plane->overlay.zpos = zpos_req->zpos; out: mutex_unlock(&dev->mode_config.mutex); return ret; }
gpl-2.0
Smando87/TF201_kernel_src
arch/m68k/lib/uaccess.c
4787
4273
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <asm/uaccess.h> unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long tmp, res; asm volatile ("\n" " tst.l %0\n" " jeq 2f\n" "1: moves.l (%1)+,%3\n" " move.l %3,(%2)+\n" " subq.l #1,%0\n" " jne 1b\n" "2: btst #1,%5\n" " jeq 4f\n" "3: moves.w (%1)+,%3\n" " move.w %3,(%2)+\n" "4: btst #0,%5\n" " jeq 6f\n" "5: moves.b (%1)+,%3\n" " move.b %3,(%2)+\n" "6:\n" " .section .fixup,\"ax\"\n" " .even\n" "10: move.l %0,%3\n" "7: clr.l (%2)+\n" " subq.l #1,%3\n" " jne 7b\n" " lsl.l #2,%0\n" " btst #1,%5\n" " jeq 8f\n" "30: clr.w (%2)+\n" " addq.l #2,%0\n" "8: btst #0,%5\n" " jeq 6b\n" "50: clr.b (%2)+\n" " addq.l #1,%0\n" " jra 6b\n" " .previous\n" "\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,10b\n" " .long 3b,30b\n" " .long 5b,50b\n" " .previous" : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp) : "0" (n / 4), "d" (n & 3)); return res; } EXPORT_SYMBOL(__generic_copy_from_user); unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n) { unsigned long tmp, res; asm volatile ("\n" " tst.l %0\n" " jeq 4f\n" "1: move.l (%1)+,%3\n" "2: moves.l %3,(%2)+\n" "3: subq.l #1,%0\n" " jne 1b\n" "4: btst #1,%5\n" " jeq 6f\n" " move.w (%1)+,%3\n" "5: moves.w %3,(%2)+\n" "6: btst #0,%5\n" " jeq 8f\n" " move.b (%1)+,%3\n" "7: moves.b %3,(%2)+\n" "8:\n" " .section .fixup,\"ax\"\n" " .even\n" "20: lsl.l #2,%0\n" "50: add.l %5,%0\n" " jra 8b\n" " .previous\n" "\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .long 2b,20b\n" " .long 3b,20b\n" " .long 5b,50b\n" " .long 6b,50b\n" " .long 7b,50b\n" " .long 8b,50b\n" " .previous" : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp) : "0" (n / 4), "d" (n & 3)); return res; } EXPORT_SYMBOL(__generic_copy_to_user); /* * Copy a null terminated string from userspace. */ long strncpy_from_user(char *dst, const char __user *src, long count) { long res; char c; if (count <= 0) return count; asm volatile ("\n" "1: moves.b (%2)+,%4\n" " move.b %4,(%1)+\n" " jeq 2f\n" " subq.l #1,%3\n" " jne 1b\n" "2: sub.l %3,%0\n" "3:\n" " .section .fixup,\"ax\"\n" " .even\n" "10: move.l %5,%0\n" " jra 3b\n" " .previous\n" "\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,10b\n" " .previous" : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c) : "i" (-EFAULT), "0" (count)); return res; } EXPORT_SYMBOL(strncpy_from_user); /* * Return the size of a string (including the ending 0) * * Return 0 on exception, a value greater than N if too long */ long strnlen_user(const char __user *src, long n) { char c; long res; asm volatile ("\n" "1: subq.l #1,%1\n" " jmi 3f\n" "2: moves.b (%0)+,%2\n" " tst.b %2\n" " jne 1b\n" " jra 4f\n" "\n" "3: addq.l #1,%0\n" "4: sub.l %4,%0\n" "5:\n" " .section .fixup,\"ax\"\n" " .even\n" "20: sub.l %0,%0\n" " jra 5b\n" " .previous\n" "\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .long 2b,20b\n" " .previous\n" : "=&a" (res), "+d" (n), "=&d" (c) : "0" (src), "r" (src)); return res; } EXPORT_SYMBOL(strnlen_user); /* * Zero Userspace */ unsigned long __clear_user(void __user *to, unsigned long n) { unsigned long res; asm volatile ("\n" " tst.l %0\n" " jeq 3f\n" "1: moves.l %2,(%1)+\n" "2: subq.l #1,%0\n" " jne 1b\n" "3: btst #1,%4\n" " jeq 5f\n" "4: moves.w %2,(%1)+\n" "5: btst #0,%4\n" " jeq 7f\n" "6: moves.b %2,(%1)\n" "7:\n" " .section .fixup,\"ax\"\n" " .even\n" "10: lsl.l #2,%0\n" "40: add.l %4,%0\n" " jra 7b\n" " .previous\n" "\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,10b\n" " .long 2b,10b\n" " .long 4b,40b\n" " .long 5b,40b\n" " .long 6b,40b\n" " .long 7b,40b\n" " .previous" : "=d" (res), "+a" (to) : "r" (0), "0" (n / 4), "d" (n & 3)); return res; } EXPORT_SYMBOL(__clear_user);
gpl-2.0
helicopter88/android_kernel_lge_hammerhead
drivers/staging/comedi/drivers/ni_tio.c
8115
49313
/* comedi/drivers/ni_tio.c Support for NI general purpose counters Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ni_tio Description: National Instruments general purpose counters Devices: Author: J.P. Mellor <jpmellor@rose-hulman.edu>, Herman.Bruyninckx@mech.kuleuven.ac.be, Wim.Meeussen@mech.kuleuven.ac.be, Klaas.Gadeyne@mech.kuleuven.ac.be, Frank Mori Hess <fmhess@users.sourceforge.net> Updated: Thu Nov 16 09:50:32 EST 2006 Status: works This module is not used directly by end-users. Rather, it is used by other drivers (for example ni_660x and ni_pcimio) to provide support for NI's general purpose counters. It was originally based on the counter code from ni_660x.c and ni_mio_common.c. References: DAQ 660x Register-Level Programmer Manual (NI 370505A-01) DAQ 6601/6602 User Manual (NI 322137B-01) 340934b.pdf DAQ-STC reference manual */ /* TODO: Support use of both banks X and Y */ #include "ni_tio_internal.h" static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, unsigned generic_clock_source); static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter); MODULE_AUTHOR("Comedi <comedi@comedi.org>"); MODULE_DESCRIPTION("Comedi support for NI general-purpose counters"); MODULE_LICENSE("GPL"); static inline enum Gi_Counting_Mode_Reg_Bits Gi_Alternate_Sync_Bit(enum ni_gpct_variant variant) { switch (variant) { case ni_gpct_variant_e_series: return 0; break; case ni_gpct_variant_m_series: return Gi_M_Series_Alternate_Sync_Bit; break; case ni_gpct_variant_660x: return Gi_660x_Alternate_Sync_Bit; break; default: BUG(); break; } return 0; } static inline enum Gi_Counting_Mode_Reg_Bits Gi_Prescale_X2_Bit(enum ni_gpct_variant variant) { switch (variant) { case ni_gpct_variant_e_series: return 0; break; case ni_gpct_variant_m_series: return Gi_M_Series_Prescale_X2_Bit; break; case ni_gpct_variant_660x: return Gi_660x_Prescale_X2_Bit; break; default: BUG(); break; } return 0; } static inline enum Gi_Counting_Mode_Reg_Bits Gi_Prescale_X8_Bit(enum ni_gpct_variant variant) { switch (variant) { case ni_gpct_variant_e_series: return 0; break; case ni_gpct_variant_m_series: return Gi_M_Series_Prescale_X8_Bit; break; case ni_gpct_variant_660x: return Gi_660x_Prescale_X8_Bit; break; default: BUG(); break; } return 0; } static inline enum Gi_Counting_Mode_Reg_Bits Gi_HW_Arm_Select_Mask(enum ni_gpct_variant variant) { switch (variant) { case ni_gpct_variant_e_series: return 0; break; case ni_gpct_variant_m_series: return Gi_M_Series_HW_Arm_Select_Mask; break; case ni_gpct_variant_660x: return Gi_660x_HW_Arm_Select_Mask; break; default: BUG(); break; } return 0; } /* clock sources for ni_660x boards, get bits with Gi_Source_Select_Bits() */ enum ni_660x_clock_source { NI_660x_Timebase_1_Clock = 0x0, /* 20MHz */ NI_660x_Source_Pin_i_Clock = 0x1, NI_660x_Next_Gate_Clock = 0xa, NI_660x_Timebase_2_Clock = 0x12, /* 100KHz */ NI_660x_Next_TC_Clock = 0x13, NI_660x_Timebase_3_Clock = 0x1e, /* 80MHz */ NI_660x_Logic_Low_Clock = 0x1f, }; static const unsigned ni_660x_max_rtsi_channel = 6; static inline unsigned NI_660x_RTSI_Clock(unsigned n) { BUG_ON(n > ni_660x_max_rtsi_channel); return 0xb + n; } static const unsigned ni_660x_max_source_pin = 7; static inline unsigned NI_660x_Source_Pin_Clock(unsigned n) { BUG_ON(n > ni_660x_max_source_pin); return 0x2 + n; } /* clock sources for ni e and m series boards, get bits with Gi_Source_Select_Bits() */ enum ni_m_series_clock_source { NI_M_Series_Timebase_1_Clock = 0x0, /* 20MHz */ NI_M_Series_Timebase_2_Clock = 0x12, /* 100KHz */ NI_M_Series_Next_TC_Clock = 0x13, NI_M_Series_Next_Gate_Clock = 0x14, /* when Gi_Src_SubSelect = 0 */ NI_M_Series_PXI_Star_Trigger_Clock = 0x14, /* when Gi_Src_SubSelect = 1 */ NI_M_Series_PXI10_Clock = 0x1d, NI_M_Series_Timebase_3_Clock = 0x1e, /* 80MHz, when Gi_Src_SubSelect = 0 */ NI_M_Series_Analog_Trigger_Out_Clock = 0x1e, /* when Gi_Src_SubSelect = 1 */ NI_M_Series_Logic_Low_Clock = 0x1f, }; static const unsigned ni_m_series_max_pfi_channel = 15; static inline unsigned NI_M_Series_PFI_Clock(unsigned n) { BUG_ON(n > ni_m_series_max_pfi_channel); if (n < 10) return 1 + n; else return 0xb + n; } static const unsigned ni_m_series_max_rtsi_channel = 7; static inline unsigned NI_M_Series_RTSI_Clock(unsigned n) { BUG_ON(n > ni_m_series_max_rtsi_channel); if (n == 7) return 0x1b; else return 0xb + n; } enum ni_660x_gate_select { NI_660x_Source_Pin_i_Gate_Select = 0x0, NI_660x_Gate_Pin_i_Gate_Select = 0x1, NI_660x_Next_SRC_Gate_Select = 0xa, NI_660x_Next_Out_Gate_Select = 0x14, NI_660x_Logic_Low_Gate_Select = 0x1f, }; static const unsigned ni_660x_max_gate_pin = 7; static inline unsigned NI_660x_Gate_Pin_Gate_Select(unsigned n) { BUG_ON(n > ni_660x_max_gate_pin); return 0x2 + n; } static inline unsigned NI_660x_RTSI_Gate_Select(unsigned n) { BUG_ON(n > ni_660x_max_rtsi_channel); return 0xb + n; } enum ni_m_series_gate_select { NI_M_Series_Timestamp_Mux_Gate_Select = 0x0, NI_M_Series_AI_START2_Gate_Select = 0x12, NI_M_Series_PXI_Star_Trigger_Gate_Select = 0x13, NI_M_Series_Next_Out_Gate_Select = 0x14, NI_M_Series_AI_START1_Gate_Select = 0x1c, NI_M_Series_Next_SRC_Gate_Select = 0x1d, NI_M_Series_Analog_Trigger_Out_Gate_Select = 0x1e, NI_M_Series_Logic_Low_Gate_Select = 0x1f, }; static inline unsigned NI_M_Series_RTSI_Gate_Select(unsigned n) { BUG_ON(n > ni_m_series_max_rtsi_channel); if (n == 7) return 0x1b; return 0xb + n; } static inline unsigned NI_M_Series_PFI_Gate_Select(unsigned n) { BUG_ON(n > ni_m_series_max_pfi_channel); if (n < 10) return 1 + n; return 0xb + n; } static inline unsigned Gi_Source_Select_Bits(unsigned source) { return (source << Gi_Source_Select_Shift) & Gi_Source_Select_Mask; } static inline unsigned Gi_Gate_Select_Bits(unsigned gate_select) { return (gate_select << Gi_Gate_Select_Shift) & Gi_Gate_Select_Mask; } enum ni_660x_second_gate_select { NI_660x_Source_Pin_i_Second_Gate_Select = 0x0, NI_660x_Up_Down_Pin_i_Second_Gate_Select = 0x1, NI_660x_Next_SRC_Second_Gate_Select = 0xa, NI_660x_Next_Out_Second_Gate_Select = 0x14, NI_660x_Selected_Gate_Second_Gate_Select = 0x1e, NI_660x_Logic_Low_Second_Gate_Select = 0x1f, }; static const unsigned ni_660x_max_up_down_pin = 7; static inline unsigned NI_660x_Up_Down_Pin_Second_Gate_Select(unsigned n) { BUG_ON(n > ni_660x_max_up_down_pin); return 0x2 + n; } static inline unsigned NI_660x_RTSI_Second_Gate_Select(unsigned n) { BUG_ON(n > ni_660x_max_rtsi_channel); return 0xb + n; } static const unsigned int counter_status_mask = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING; static int __init ni_tio_init_module(void) { return 0; } module_init(ni_tio_init_module); static void __exit ni_tio_cleanup_module(void) { } module_exit(ni_tio_cleanup_module); struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device *dev, void (*write_register) (struct ni_gpct * counter, unsigned bits, enum ni_gpct_register reg), unsigned (*read_register) (struct ni_gpct *counter, enum ni_gpct_register reg), enum ni_gpct_variant variant, unsigned num_counters) { unsigned i; struct ni_gpct_device *counter_dev = kzalloc(sizeof(struct ni_gpct_device), GFP_KERNEL); if (counter_dev == NULL) return NULL; counter_dev->dev = dev; counter_dev->write_register = write_register; counter_dev->read_register = read_register; counter_dev->variant = variant; spin_lock_init(&counter_dev->regs_lock); BUG_ON(num_counters == 0); counter_dev->counters = kzalloc(sizeof(struct ni_gpct) * num_counters, GFP_KERNEL); if (counter_dev->counters == NULL) { kfree(counter_dev); return NULL; } for (i = 0; i < num_counters; ++i) { counter_dev->counters[i].counter_dev = counter_dev; spin_lock_init(&counter_dev->counters[i].lock); } counter_dev->num_counters = num_counters; return counter_dev; } EXPORT_SYMBOL_GPL(ni_gpct_device_construct); void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev) { if (counter_dev->counters == NULL) return; kfree(counter_dev->counters); kfree(counter_dev); } EXPORT_SYMBOL_GPL(ni_gpct_device_destroy); static int ni_tio_second_gate_registers_present(const struct ni_gpct_device *counter_dev) { switch (counter_dev->variant) { case ni_gpct_variant_e_series: return 0; break; case ni_gpct_variant_m_series: case ni_gpct_variant_660x: return 1; break; default: BUG(); break; } return 0; } static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter) { write_register(counter, Gi_Reset_Bit(counter->counter_index), NITIO_Gxx_Joint_Reset_Reg(counter->counter_index)); } void ni_tio_init_counter(struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; ni_tio_reset_count_and_disarm(counter); /* initialize counter registers */ counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] = 0x0; write_register(counter, counter_dev-> regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)], NITIO_Gi_Autoincrement_Reg(counter->counter_index)); ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), ~0, Gi_Synchronize_Gate_Bit); ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0, 0); counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0; write_register(counter, counter_dev-> regs[NITIO_Gi_LoadA_Reg(counter->counter_index)], NITIO_Gi_LoadA_Reg(counter->counter_index)); counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0; write_register(counter, counter_dev-> regs[NITIO_Gi_LoadB_Reg(counter->counter_index)], NITIO_Gi_LoadB_Reg(counter->counter_index)); ni_tio_set_bits(counter, NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0, 0); if (ni_tio_counting_mode_registers_present(counter_dev)) { ni_tio_set_bits(counter, NITIO_Gi_Counting_Mode_Reg(counter-> counter_index), ~0, 0); } if (ni_tio_second_gate_registers_present(counter_dev)) { counter_dev-> regs[NITIO_Gi_Second_Gate_Reg(counter->counter_index)] = 0x0; write_register(counter, counter_dev-> regs[NITIO_Gi_Second_Gate_Reg (counter->counter_index)], NITIO_Gi_Second_Gate_Reg(counter-> counter_index)); } ni_tio_set_bits(counter, NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0, 0x0); ni_tio_set_bits(counter, NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), ~0, 0x0); } EXPORT_SYMBOL_GPL(ni_tio_init_counter); static unsigned int ni_tio_counter_status(struct ni_gpct *counter) { unsigned int status = 0; const unsigned bits = read_register(counter, NITIO_Gxx_Status_Reg(counter-> counter_index)); if (bits & Gi_Armed_Bit(counter->counter_index)) { status |= COMEDI_COUNTER_ARMED; if (bits & Gi_Counting_Bit(counter->counter_index)) status |= COMEDI_COUNTER_COUNTING; } return status; } static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned counting_mode_reg = NITIO_Gi_Counting_Mode_Reg(counter->counter_index); static const uint64_t min_normal_sync_period_ps = 25000; const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter, ni_tio_generic_clock_src_select (counter)); if (ni_tio_counting_mode_registers_present(counter_dev) == 0) return; switch (ni_tio_get_soft_copy(counter, counting_mode_reg) & Gi_Counting_Mode_Mask) { case Gi_Counting_Mode_QuadratureX1_Bits: case Gi_Counting_Mode_QuadratureX2_Bits: case Gi_Counting_Mode_QuadratureX4_Bits: case Gi_Counting_Mode_Sync_Source_Bits: force_alt_sync = 1; break; default: break; } /* It's not clear what we should do if clock_period is unknown, so we are not using the alt sync bit in that case, but allow the caller to decide by using the force_alt_sync parameter. */ if (force_alt_sync || (clock_period_ps && clock_period_ps < min_normal_sync_period_ps)) { ni_tio_set_bits(counter, counting_mode_reg, Gi_Alternate_Sync_Bit(counter_dev->variant), Gi_Alternate_Sync_Bit(counter_dev->variant)); } else { ni_tio_set_bits(counter, counting_mode_reg, Gi_Alternate_Sync_Bit(counter_dev->variant), 0x0); } } static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode) { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned mode_reg_mask; unsigned mode_reg_values; unsigned input_select_bits = 0; /* these bits map directly on to the mode register */ static const unsigned mode_reg_direct_mask = NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK | NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK | NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT | NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT; mode_reg_mask = mode_reg_direct_mask | Gi_Reload_Source_Switching_Bit; mode_reg_values = mode & mode_reg_direct_mask; switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) { case NI_GPCT_RELOAD_SOURCE_FIXED_BITS: break; case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS: mode_reg_values |= Gi_Reload_Source_Switching_Bit; break; case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS: input_select_bits |= Gi_Gate_Select_Load_Source_Bit; mode_reg_mask |= Gi_Gating_Mode_Mask; mode_reg_values |= Gi_Level_Gating_Bits; break; default: break; } ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), mode_reg_mask, mode_reg_values); if (ni_tio_counting_mode_registers_present(counter_dev)) { unsigned counting_mode_bits = 0; counting_mode_bits |= (mode >> NI_GPCT_COUNTING_MODE_SHIFT) & Gi_Counting_Mode_Mask; counting_mode_bits |= ((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT) << Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask; if (mode & NI_GPCT_INDEX_ENABLE_BIT) counting_mode_bits |= Gi_Index_Mode_Bit; ni_tio_set_bits(counter, NITIO_Gi_Counting_Mode_Reg(counter-> counter_index), Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask | Gi_Index_Mode_Bit, counting_mode_bits); ni_tio_set_sync_mode(counter, 0); } ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), Gi_Up_Down_Mask, (mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) << Gi_Up_Down_Shift); if (mode & NI_GPCT_OR_GATE_BIT) input_select_bits |= Gi_Or_Gate_Bit; if (mode & NI_GPCT_INVERT_OUTPUT_BIT) input_select_bits |= Gi_Output_Polarity_Bit; ni_tio_set_bits(counter, NITIO_Gi_Input_Select_Reg(counter->counter_index), Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit | Gi_Output_Polarity_Bit, input_select_bits); return 0; } int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger) { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned command_transient_bits = 0; if (arm) { switch (start_trigger) { case NI_GPCT_ARM_IMMEDIATE: command_transient_bits |= Gi_Arm_Bit; break; case NI_GPCT_ARM_PAIRED_IMMEDIATE: command_transient_bits |= Gi_Arm_Bit | Gi_Arm_Copy_Bit; break; default: break; } if (ni_tio_counting_mode_registers_present(counter_dev)) { unsigned counting_mode_bits = 0; switch (start_trigger) { case NI_GPCT_ARM_IMMEDIATE: case NI_GPCT_ARM_PAIRED_IMMEDIATE: break; default: if (start_trigger & NI_GPCT_ARM_UNKNOWN) { /* pass-through the least significant bits so we can figure out what select later */ unsigned hw_arm_select_bits = (start_trigger << Gi_HW_Arm_Select_Shift) & Gi_HW_Arm_Select_Mask (counter_dev->variant); counting_mode_bits |= Gi_HW_Arm_Enable_Bit | hw_arm_select_bits; } else { return -EINVAL; } break; } ni_tio_set_bits(counter, NITIO_Gi_Counting_Mode_Reg (counter->counter_index), Gi_HW_Arm_Select_Mask (counter_dev->variant) | Gi_HW_Arm_Enable_Bit, counting_mode_bits); } } else { command_transient_bits |= Gi_Disarm_Bit; } ni_tio_set_bits_transient(counter, NITIO_Gi_Command_Reg(counter->counter_index), 0, 0, command_transient_bits); return 0; } EXPORT_SYMBOL_GPL(ni_tio_arm); static unsigned ni_660x_source_select_bits(unsigned int clock_source) { unsigned ni_660x_clock; unsigned i; const unsigned clock_select_bits = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK; switch (clock_select_bits) { case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Timebase_1_Clock; break; case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Timebase_2_Clock; break; case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Timebase_3_Clock; break; case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Logic_Low_Clock; break; case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Source_Pin_i_Clock; break; case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Next_Gate_Clock; break; case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS: ni_660x_clock = NI_660x_Next_TC_Clock; break; default: for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) { ni_660x_clock = NI_660x_RTSI_Clock(i); break; } } if (i <= ni_660x_max_rtsi_channel) break; for (i = 0; i <= ni_660x_max_source_pin; ++i) { if (clock_select_bits == NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) { ni_660x_clock = NI_660x_Source_Pin_Clock(i); break; } } if (i <= ni_660x_max_source_pin) break; ni_660x_clock = 0; BUG(); break; } return Gi_Source_Select_Bits(ni_660x_clock); } static unsigned ni_m_series_source_select_bits(unsigned int clock_source) { unsigned ni_m_series_clock; unsigned i; const unsigned clock_select_bits = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK; switch (clock_select_bits) { case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Timebase_1_Clock; break; case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Timebase_2_Clock; break; case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Timebase_3_Clock; break; case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Logic_Low_Clock; break; case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Next_Gate_Clock; break; case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Next_TC_Clock; break; case NI_GPCT_PXI10_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_PXI10_Clock; break; case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_PXI_Star_Trigger_Clock; break; case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS: ni_m_series_clock = NI_M_Series_Analog_Trigger_Out_Clock; break; default: for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) { ni_m_series_clock = NI_M_Series_RTSI_Clock(i); break; } } if (i <= ni_m_series_max_rtsi_channel) break; for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { if (clock_select_bits == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) { ni_m_series_clock = NI_M_Series_PFI_Clock(i); break; } } if (i <= ni_m_series_max_pfi_channel) break; printk(KERN_ERR "invalid clock source 0x%lx\n", (unsigned long)clock_source); BUG(); ni_m_series_clock = 0; break; } return Gi_Source_Select_Bits(ni_m_series_clock); }; static void ni_tio_set_source_subselect(struct ni_gpct *counter, unsigned int clock_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned second_gate_reg = NITIO_Gi_Second_Gate_Reg(counter->counter_index); if (counter_dev->variant != ni_gpct_variant_m_series) return; switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) { /* Gi_Source_Subselect is zero */ case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: counter_dev->regs[second_gate_reg] &= ~Gi_Source_Subselect_Bit; break; /* Gi_Source_Subselect is one */ case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS: case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS: counter_dev->regs[second_gate_reg] |= Gi_Source_Subselect_Bit; break; /* Gi_Source_Subselect doesn't matter */ default: return; break; } write_register(counter, counter_dev->regs[second_gate_reg], second_gate_reg); } static int ni_tio_set_clock_src(struct ni_gpct *counter, unsigned int clock_source, unsigned int period_ns) { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned input_select_bits = 0; static const uint64_t pico_per_nano = 1000; /*FIXME: validate clock source */ switch (counter_dev->variant) { case ni_gpct_variant_660x: input_select_bits |= ni_660x_source_select_bits(clock_source); break; case ni_gpct_variant_e_series: case ni_gpct_variant_m_series: input_select_bits |= ni_m_series_source_select_bits(clock_source); break; default: BUG(); break; } if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT) input_select_bits |= Gi_Source_Polarity_Bit; ni_tio_set_bits(counter, NITIO_Gi_Input_Select_Reg(counter->counter_index), Gi_Source_Select_Mask | Gi_Source_Polarity_Bit, input_select_bits); ni_tio_set_source_subselect(counter, clock_source); if (ni_tio_counting_mode_registers_present(counter_dev)) { const unsigned prescaling_mode = clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK; unsigned counting_mode_bits = 0; switch (prescaling_mode) { case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS: break; case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS: counting_mode_bits |= Gi_Prescale_X2_Bit(counter_dev->variant); break; case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS: counting_mode_bits |= Gi_Prescale_X8_Bit(counter_dev->variant); break; default: return -EINVAL; break; } ni_tio_set_bits(counter, NITIO_Gi_Counting_Mode_Reg(counter-> counter_index), Gi_Prescale_X2_Bit(counter_dev->variant) | Gi_Prescale_X8_Bit(counter_dev->variant), counting_mode_bits); } counter->clock_period_ps = pico_per_nano * period_ns; ni_tio_set_sync_mode(counter, 0); return 0; } static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter, NITIO_Gi_Counting_Mode_Reg (counter-> counter_index)); unsigned bits = 0; if (ni_tio_get_soft_copy(counter, NITIO_Gi_Input_Select_Reg (counter->counter_index)) & Gi_Source_Polarity_Bit) bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT; if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant)) bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS; if (counting_mode_bits & Gi_Prescale_X8_Bit(counter_dev->variant)) bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS; return bits; } static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned second_gate_reg = NITIO_Gi_Second_Gate_Reg(counter->counter_index); unsigned clock_source = 0; unsigned i; const unsigned input_select = (ni_tio_get_soft_copy(counter, NITIO_Gi_Input_Select_Reg (counter->counter_index)) & Gi_Source_Select_Mask) >> Gi_Source_Select_Shift; switch (input_select) { case NI_M_Series_Timebase_1_Clock: clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS; break; case NI_M_Series_Timebase_2_Clock: clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS; break; case NI_M_Series_Timebase_3_Clock: if (counter_dev->regs[second_gate_reg] & Gi_Source_Subselect_Bit) clock_source = NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS; else clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS; break; case NI_M_Series_Logic_Low_Clock: clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS; break; case NI_M_Series_Next_Gate_Clock: if (counter_dev->regs[second_gate_reg] & Gi_Source_Subselect_Bit) clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS; else clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS; break; case NI_M_Series_PXI10_Clock: clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS; break; case NI_M_Series_Next_TC_Clock: clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS; break; default: for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { if (input_select == NI_M_Series_RTSI_Clock(i)) { clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i); break; } } if (i <= ni_m_series_max_rtsi_channel) break; for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { if (input_select == NI_M_Series_PFI_Clock(i)) { clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i); break; } } if (i <= ni_m_series_max_pfi_channel) break; BUG(); break; } clock_source |= ni_tio_clock_src_modifiers(counter); return clock_source; } static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter) { unsigned clock_source = 0; unsigned i; const unsigned input_select = (ni_tio_get_soft_copy(counter, NITIO_Gi_Input_Select_Reg (counter->counter_index)) & Gi_Source_Select_Mask) >> Gi_Source_Select_Shift; switch (input_select) { case NI_660x_Timebase_1_Clock: clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS; break; case NI_660x_Timebase_2_Clock: clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS; break; case NI_660x_Timebase_3_Clock: clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS; break; case NI_660x_Logic_Low_Clock: clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS; break; case NI_660x_Source_Pin_i_Clock: clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS; break; case NI_660x_Next_Gate_Clock: clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS; break; case NI_660x_Next_TC_Clock: clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS; break; default: for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { if (input_select == NI_660x_RTSI_Clock(i)) { clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i); break; } } if (i <= ni_660x_max_rtsi_channel) break; for (i = 0; i <= ni_660x_max_source_pin; ++i) { if (input_select == NI_660x_Source_Pin_Clock(i)) { clock_source = NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i); break; } } if (i <= ni_660x_max_source_pin) break; BUG(); break; } clock_source |= ni_tio_clock_src_modifiers(counter); return clock_source; } static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter) { switch (counter->counter_dev->variant) { case ni_gpct_variant_e_series: case ni_gpct_variant_m_series: return ni_m_series_clock_src_select(counter); break; case ni_gpct_variant_660x: return ni_660x_clock_src_select(counter); break; default: BUG(); break; } return 0; } static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, unsigned generic_clock_source) { uint64_t clock_period_ps; switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) { case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: clock_period_ps = 50000; break; case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS: clock_period_ps = 10000000; break; case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: clock_period_ps = 12500; break; case NI_GPCT_PXI10_CLOCK_SRC_BITS: clock_period_ps = 100000; break; default: /* clock period is specified by user with prescaling already taken into account. */ return counter->clock_period_ps; break; } switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS: break; case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS: clock_period_ps *= 2; break; case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS: clock_period_ps *= 8; break; default: BUG(); break; } return clock_period_ps; } static void ni_tio_get_clock_src(struct ni_gpct *counter, unsigned int *clock_source, unsigned int *period_ns) { static const unsigned pico_per_nano = 1000; uint64_t temp64; *clock_source = ni_tio_generic_clock_src_select(counter); temp64 = ni_tio_clock_period_ps(counter, *clock_source); do_div(temp64, pico_per_nano); *period_ns = temp64; } static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter, unsigned int gate_source) { const unsigned mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask; unsigned mode_values = 0; if (gate_source & CR_INVERT) mode_values |= Gi_Gate_Polarity_Bit; if (gate_source & CR_EDGE) mode_values |= Gi_Rising_Edge_Gating_Bits; else mode_values |= Gi_Level_Gating_Bits; ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), mode_mask, mode_values); } static int ni_660x_set_first_gate(struct ni_gpct *counter, unsigned int gate_source) { const unsigned selected_gate = CR_CHAN(gate_source); /* bits of selected_gate that may be meaningful to input select register */ const unsigned selected_gate_mask = 0x1f; unsigned ni_660x_gate_select; unsigned i; switch (selected_gate) { case NI_GPCT_NEXT_SOURCE_GATE_SELECT: ni_660x_gate_select = NI_660x_Next_SRC_Gate_Select; break; case NI_GPCT_NEXT_OUT_GATE_SELECT: case NI_GPCT_LOGIC_LOW_GATE_SELECT: case NI_GPCT_SOURCE_PIN_i_GATE_SELECT: case NI_GPCT_GATE_PIN_i_GATE_SELECT: ni_660x_gate_select = selected_gate & selected_gate_mask; break; default: for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) { ni_660x_gate_select = selected_gate & selected_gate_mask; break; } } if (i <= ni_660x_max_rtsi_channel) break; for (i = 0; i <= ni_660x_max_gate_pin; ++i) { if (selected_gate == NI_GPCT_GATE_PIN_GATE_SELECT(i)) { ni_660x_gate_select = selected_gate & selected_gate_mask; break; } } if (i <= ni_660x_max_gate_pin) break; return -EINVAL; break; } ni_tio_set_bits(counter, NITIO_Gi_Input_Select_Reg(counter->counter_index), Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_660x_gate_select)); return 0; } static int ni_m_series_set_first_gate(struct ni_gpct *counter, unsigned int gate_source) { const unsigned selected_gate = CR_CHAN(gate_source); /* bits of selected_gate that may be meaningful to input select register */ const unsigned selected_gate_mask = 0x1f; unsigned ni_m_series_gate_select; unsigned i; switch (selected_gate) { case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT: case NI_GPCT_AI_START2_GATE_SELECT: case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT: case NI_GPCT_NEXT_OUT_GATE_SELECT: case NI_GPCT_AI_START1_GATE_SELECT: case NI_GPCT_NEXT_SOURCE_GATE_SELECT: case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT: case NI_GPCT_LOGIC_LOW_GATE_SELECT: ni_m_series_gate_select = selected_gate & selected_gate_mask; break; default: for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) { ni_m_series_gate_select = selected_gate & selected_gate_mask; break; } } if (i <= ni_m_series_max_rtsi_channel) break; for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { if (selected_gate == NI_GPCT_PFI_GATE_SELECT(i)) { ni_m_series_gate_select = selected_gate & selected_gate_mask; break; } } if (i <= ni_m_series_max_pfi_channel) break; return -EINVAL; break; } ni_tio_set_bits(counter, NITIO_Gi_Input_Select_Reg(counter->counter_index), Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_m_series_gate_select)); return 0; } static int ni_660x_set_second_gate(struct ni_gpct *counter, unsigned int gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned second_gate_reg = NITIO_Gi_Second_Gate_Reg(counter->counter_index); const unsigned selected_second_gate = CR_CHAN(gate_source); /* bits of second_gate that may be meaningful to second gate register */ static const unsigned selected_second_gate_mask = 0x1f; unsigned ni_660x_second_gate_select; unsigned i; switch (selected_second_gate) { case NI_GPCT_SOURCE_PIN_i_GATE_SELECT: case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT: case NI_GPCT_SELECTED_GATE_GATE_SELECT: case NI_GPCT_NEXT_OUT_GATE_SELECT: case NI_GPCT_LOGIC_LOW_GATE_SELECT: ni_660x_second_gate_select = selected_second_gate & selected_second_gate_mask; break; case NI_GPCT_NEXT_SOURCE_GATE_SELECT: ni_660x_second_gate_select = NI_660x_Next_SRC_Second_Gate_Select; break; default: for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { if (selected_second_gate == NI_GPCT_RTSI_GATE_SELECT(i)) { ni_660x_second_gate_select = selected_second_gate & selected_second_gate_mask; break; } } if (i <= ni_660x_max_rtsi_channel) break; for (i = 0; i <= ni_660x_max_up_down_pin; ++i) { if (selected_second_gate == NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) { ni_660x_second_gate_select = selected_second_gate & selected_second_gate_mask; break; } } if (i <= ni_660x_max_up_down_pin) break; return -EINVAL; break; } counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit; counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask; counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Select_Bits(ni_660x_second_gate_select); write_register(counter, counter_dev->regs[second_gate_reg], second_gate_reg); return 0; } static int ni_m_series_set_second_gate(struct ni_gpct *counter, unsigned int gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned second_gate_reg = NITIO_Gi_Second_Gate_Reg(counter->counter_index); const unsigned selected_second_gate = CR_CHAN(gate_source); /* bits of second_gate that may be meaningful to second gate register */ static const unsigned selected_second_gate_mask = 0x1f; unsigned ni_m_series_second_gate_select; /* FIXME: We don't know what the m-series second gate codes are, so we'll just pass the bits through for now. */ switch (selected_second_gate) { default: ni_m_series_second_gate_select = selected_second_gate & selected_second_gate_mask; break; } counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit; counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask; counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Select_Bits(ni_m_series_second_gate_select); write_register(counter, counter_dev->regs[second_gate_reg], second_gate_reg); return 0; } int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index, unsigned int gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned second_gate_reg = NITIO_Gi_Second_Gate_Reg(counter->counter_index); switch (gate_index) { case 0: if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) { ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter-> counter_index), Gi_Gating_Mode_Mask, Gi_Gating_Disabled_Bits); return 0; } ni_tio_set_first_gate_modifiers(counter, gate_source); switch (counter_dev->variant) { case ni_gpct_variant_e_series: case ni_gpct_variant_m_series: return ni_m_series_set_first_gate(counter, gate_source); break; case ni_gpct_variant_660x: return ni_660x_set_first_gate(counter, gate_source); break; default: BUG(); break; } break; case 1: if (ni_tio_second_gate_registers_present(counter_dev) == 0) return -EINVAL; if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) { counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Mode_Bit; write_register(counter, counter_dev->regs[second_gate_reg], second_gate_reg); return 0; } if (gate_source & CR_INVERT) { counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Polarity_Bit; } else { counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Polarity_Bit; } switch (counter_dev->variant) { case ni_gpct_variant_m_series: return ni_m_series_set_second_gate(counter, gate_source); break; case ni_gpct_variant_660x: return ni_660x_set_second_gate(counter, gate_source); break; default: BUG(); break; } break; default: return -EINVAL; break; } return 0; } EXPORT_SYMBOL_GPL(ni_tio_set_gate_src); static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index, unsigned int source) { struct ni_gpct_device *counter_dev = counter->counter_dev; if (counter_dev->variant == ni_gpct_variant_m_series) { unsigned int abz_reg, shift, mask; abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index); switch (index) { case NI_GPCT_SOURCE_ENCODER_A: shift = 10; break; case NI_GPCT_SOURCE_ENCODER_B: shift = 5; break; case NI_GPCT_SOURCE_ENCODER_Z: shift = 0; break; default: return -EINVAL; break; } mask = 0x1f << shift; if (source > 0x1f) { /* Disable gate */ source = 0x1f; } counter_dev->regs[abz_reg] &= ~mask; counter_dev->regs[abz_reg] |= (source << shift) & mask; write_register(counter, counter_dev->regs[abz_reg], abz_reg); /* printk("%s %x %d %d\n", __func__, counter_dev->regs[abz_reg], index, source); */ return 0; } return -EINVAL; } static unsigned ni_660x_first_gate_to_generic_gate_source(unsigned ni_660x_gate_select) { unsigned i; switch (ni_660x_gate_select) { case NI_660x_Source_Pin_i_Gate_Select: return NI_GPCT_SOURCE_PIN_i_GATE_SELECT; break; case NI_660x_Gate_Pin_i_Gate_Select: return NI_GPCT_GATE_PIN_i_GATE_SELECT; break; case NI_660x_Next_SRC_Gate_Select: return NI_GPCT_NEXT_SOURCE_GATE_SELECT; break; case NI_660x_Next_Out_Gate_Select: return NI_GPCT_NEXT_OUT_GATE_SELECT; break; case NI_660x_Logic_Low_Gate_Select: return NI_GPCT_LOGIC_LOW_GATE_SELECT; break; default: for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { if (ni_660x_gate_select == NI_660x_RTSI_Gate_Select(i)) { return NI_GPCT_RTSI_GATE_SELECT(i); break; } } if (i <= ni_660x_max_rtsi_channel) break; for (i = 0; i <= ni_660x_max_gate_pin; ++i) { if (ni_660x_gate_select == NI_660x_Gate_Pin_Gate_Select(i)) { return NI_GPCT_GATE_PIN_GATE_SELECT(i); break; } } if (i <= ni_660x_max_gate_pin) break; BUG(); break; } return 0; }; static unsigned ni_m_series_first_gate_to_generic_gate_source(unsigned ni_m_series_gate_select) { unsigned i; switch (ni_m_series_gate_select) { case NI_M_Series_Timestamp_Mux_Gate_Select: return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT; break; case NI_M_Series_AI_START2_Gate_Select: return NI_GPCT_AI_START2_GATE_SELECT; break; case NI_M_Series_PXI_Star_Trigger_Gate_Select: return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT; break; case NI_M_Series_Next_Out_Gate_Select: return NI_GPCT_NEXT_OUT_GATE_SELECT; break; case NI_M_Series_AI_START1_Gate_Select: return NI_GPCT_AI_START1_GATE_SELECT; break; case NI_M_Series_Next_SRC_Gate_Select: return NI_GPCT_NEXT_SOURCE_GATE_SELECT; break; case NI_M_Series_Analog_Trigger_Out_Gate_Select: return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT; break; case NI_M_Series_Logic_Low_Gate_Select: return NI_GPCT_LOGIC_LOW_GATE_SELECT; break; default: for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { if (ni_m_series_gate_select == NI_M_Series_RTSI_Gate_Select(i)) { return NI_GPCT_RTSI_GATE_SELECT(i); break; } } if (i <= ni_m_series_max_rtsi_channel) break; for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { if (ni_m_series_gate_select == NI_M_Series_PFI_Gate_Select(i)) { return NI_GPCT_PFI_GATE_SELECT(i); break; } } if (i <= ni_m_series_max_pfi_channel) break; BUG(); break; } return 0; }; static unsigned ni_660x_second_gate_to_generic_gate_source(unsigned ni_660x_gate_select) { unsigned i; switch (ni_660x_gate_select) { case NI_660x_Source_Pin_i_Second_Gate_Select: return NI_GPCT_SOURCE_PIN_i_GATE_SELECT; break; case NI_660x_Up_Down_Pin_i_Second_Gate_Select: return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT; break; case NI_660x_Next_SRC_Second_Gate_Select: return NI_GPCT_NEXT_SOURCE_GATE_SELECT; break; case NI_660x_Next_Out_Second_Gate_Select: return NI_GPCT_NEXT_OUT_GATE_SELECT; break; case NI_660x_Selected_Gate_Second_Gate_Select: return NI_GPCT_SELECTED_GATE_GATE_SELECT; break; case NI_660x_Logic_Low_Second_Gate_Select: return NI_GPCT_LOGIC_LOW_GATE_SELECT; break; default: for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { if (ni_660x_gate_select == NI_660x_RTSI_Second_Gate_Select(i)) { return NI_GPCT_RTSI_GATE_SELECT(i); break; } } if (i <= ni_660x_max_rtsi_channel) break; for (i = 0; i <= ni_660x_max_up_down_pin; ++i) { if (ni_660x_gate_select == NI_660x_Up_Down_Pin_Second_Gate_Select(i)) { return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i); break; } } if (i <= ni_660x_max_up_down_pin) break; BUG(); break; } return 0; }; static unsigned ni_m_series_second_gate_to_generic_gate_source(unsigned ni_m_series_gate_select) { /*FIXME: the second gate sources for the m series are undocumented, so we just return * the raw bits for now. */ switch (ni_m_series_gate_select) { default: return ni_m_series_gate_select; break; } return 0; }; static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index, unsigned int *gate_source) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned mode_bits = ni_tio_get_soft_copy(counter, NITIO_Gi_Mode_Reg (counter-> counter_index)); const unsigned second_gate_reg = NITIO_Gi_Second_Gate_Reg(counter->counter_index); unsigned gate_select_bits; switch (gate_index) { case 0: if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits) { *gate_source = NI_GPCT_DISABLED_GATE_SELECT; return 0; } else { gate_select_bits = (ni_tio_get_soft_copy(counter, NITIO_Gi_Input_Select_Reg (counter->counter_index)) & Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift; } switch (counter_dev->variant) { case ni_gpct_variant_e_series: case ni_gpct_variant_m_series: *gate_source = ni_m_series_first_gate_to_generic_gate_source (gate_select_bits); break; case ni_gpct_variant_660x: *gate_source = ni_660x_first_gate_to_generic_gate_source (gate_select_bits); break; default: BUG(); break; } if (mode_bits & Gi_Gate_Polarity_Bit) *gate_source |= CR_INVERT; if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) *gate_source |= CR_EDGE; break; case 1: if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits || (counter_dev->regs[second_gate_reg] & Gi_Second_Gate_Mode_Bit) == 0) { *gate_source = NI_GPCT_DISABLED_GATE_SELECT; return 0; } else { gate_select_bits = (counter_dev->regs[second_gate_reg] & Gi_Second_Gate_Select_Mask) >> Gi_Second_Gate_Select_Shift; } switch (counter_dev->variant) { case ni_gpct_variant_e_series: case ni_gpct_variant_m_series: *gate_source = ni_m_series_second_gate_to_generic_gate_source (gate_select_bits); break; case ni_gpct_variant_660x: *gate_source = ni_660x_second_gate_to_generic_gate_source (gate_select_bits); break; default: BUG(); break; } if (counter_dev->regs[second_gate_reg] & Gi_Second_Gate_Polarity_Bit) { *gate_source |= CR_INVERT; } /* second gate can't have edge/level mode set independently */ if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) *gate_source |= CR_EDGE; break; default: return -EINVAL; break; } return 0; } int ni_tio_insn_config(struct ni_gpct *counter, struct comedi_insn *insn, unsigned int *data) { switch (data[0]) { case INSN_CONFIG_SET_COUNTER_MODE: return ni_tio_set_counter_mode(counter, data[1]); break; case INSN_CONFIG_ARM: return ni_tio_arm(counter, 1, data[1]); break; case INSN_CONFIG_DISARM: ni_tio_arm(counter, 0, 0); return 0; break; case INSN_CONFIG_GET_COUNTER_STATUS: data[1] = ni_tio_counter_status(counter); data[2] = counter_status_mask; return 0; break; case INSN_CONFIG_SET_CLOCK_SRC: return ni_tio_set_clock_src(counter, data[1], data[2]); break; case INSN_CONFIG_GET_CLOCK_SRC: ni_tio_get_clock_src(counter, &data[1], &data[2]); return 0; break; case INSN_CONFIG_SET_GATE_SRC: return ni_tio_set_gate_src(counter, data[1], data[2]); break; case INSN_CONFIG_GET_GATE_SRC: return ni_tio_get_gate_src(counter, data[1], &data[2]); break; case INSN_CONFIG_SET_OTHER_SRC: return ni_tio_set_other_src(counter, data[1], data[2]); break; case INSN_CONFIG_RESET: ni_tio_reset_count_and_disarm(counter); return 0; break; default: break; } return -EINVAL; } EXPORT_SYMBOL_GPL(ni_tio_insn_config); int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn, unsigned int *data) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned channel = CR_CHAN(insn->chanspec); unsigned first_read; unsigned second_read; unsigned correct_read; if (insn->n < 1) return 0; switch (channel) { case 0: ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), Gi_Save_Trace_Bit, 0); ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), Gi_Save_Trace_Bit, Gi_Save_Trace_Bit); /* The count doesn't get latched until the next clock edge, so it is possible the count may change (once) while we are reading. Since the read of the SW_Save_Reg isn't atomic (apparently even when it's a 32 bit register according to 660x docs), we need to read twice and make sure the reading hasn't changed. If it has, a third read will be correct since the count value will definitely have latched by then. */ first_read = read_register(counter, NITIO_Gi_SW_Save_Reg(counter->counter_index)); second_read = read_register(counter, NITIO_Gi_SW_Save_Reg(counter->counter_index)); if (first_read != second_read) correct_read = read_register(counter, NITIO_Gi_SW_Save_Reg(counter-> counter_index)); else correct_read = first_read; data[0] = correct_read; return 0; break; case 1: data[0] = counter_dev-> regs[NITIO_Gi_LoadA_Reg(counter->counter_index)]; break; case 2: data[0] = counter_dev-> regs[NITIO_Gi_LoadB_Reg(counter->counter_index)]; break; } return 0; } EXPORT_SYMBOL_GPL(ni_tio_rinsn); static unsigned ni_tio_next_load_register(struct ni_gpct *counter) { const unsigned bits = read_register(counter, NITIO_Gxx_Status_Reg(counter-> counter_index)); if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) return NITIO_Gi_LoadB_Reg(counter->counter_index); else return NITIO_Gi_LoadA_Reg(counter->counter_index); } int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn, unsigned int *data) { struct ni_gpct_device *counter_dev = counter->counter_dev; const unsigned channel = CR_CHAN(insn->chanspec); unsigned load_reg; if (insn->n < 1) return 0; switch (channel) { case 0: /* Unsafe if counter is armed. Should probably check status and return -EBUSY if armed. */ /* Don't disturb load source select, just use whichever load register is already selected. */ load_reg = ni_tio_next_load_register(counter); write_register(counter, data[0], load_reg); ni_tio_set_bits_transient(counter, NITIO_Gi_Command_Reg(counter-> counter_index), 0, 0, Gi_Load_Bit); /* restore state of load reg to whatever the user set last set it to */ write_register(counter, counter_dev->regs[load_reg], load_reg); break; case 1: counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = data[0]; write_register(counter, data[0], NITIO_Gi_LoadA_Reg(counter->counter_index)); break; case 2: counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = data[0]; write_register(counter, data[0], NITIO_Gi_LoadB_Reg(counter->counter_index)); break; default: return -EINVAL; break; } return 0; } EXPORT_SYMBOL_GPL(ni_tio_winsn);
gpl-2.0
zparallax/amplitude_aosp_12_1
block/genhd.c
180
44935
/* * gendisk handling */ #include <linux/module.h> #include <linux/fs.h> #include <linux/genhd.h> #include <linux/kdev_t.h> #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/kobj_map.h> #include <linux/mutex.h> #include <linux/idr.h> #include <linux/log2.h> #include "blk.h" static DEFINE_MUTEX(block_class_lock); struct kobject *block_depr; /* for extended dynamic devt allocation, currently only one major is used */ #define MAX_EXT_DEVT (1 << MINORBITS) /* For extended devt allocation. ext_devt_mutex prevents look up * results from going away underneath its user. */ static DEFINE_MUTEX(ext_devt_mutex); static DEFINE_IDR(ext_devt_idr); static struct device_type disk_type; static void disk_alloc_events(struct gendisk *disk); static void disk_add_events(struct gendisk *disk); static void disk_del_events(struct gendisk *disk); static void disk_release_events(struct gendisk *disk); /** * disk_get_part - get partition * @disk: disk to look partition from * @partno: partition number * * Look for partition @partno from @disk. If found, increment * reference count and return it. * * CONTEXT: * Don't care. * * RETURNS: * Pointer to the found partition on success, NULL if not found. */ struct hd_struct *disk_get_part(struct gendisk *disk, int partno) { struct hd_struct *part = NULL; struct disk_part_tbl *ptbl; if (unlikely(partno < 0)) return NULL; rcu_read_lock(); ptbl = rcu_dereference(disk->part_tbl); if (likely(partno < ptbl->len)) { part = rcu_dereference(ptbl->part[partno]); if (part) get_device(part_to_dev(part)); } rcu_read_unlock(); return part; } EXPORT_SYMBOL_GPL(disk_get_part); /** * disk_part_iter_init - initialize partition iterator * @piter: iterator to initialize * @disk: disk to iterate over * @flags: DISK_PITER_* flags * * Initialize @piter so that it iterates over partitions of @disk. * * CONTEXT: * Don't care. */ void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, unsigned int flags) { struct disk_part_tbl *ptbl; rcu_read_lock(); ptbl = rcu_dereference(disk->part_tbl); piter->disk = disk; piter->part = NULL; if (flags & DISK_PITER_REVERSE) piter->idx = ptbl->len - 1; else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) piter->idx = 0; else piter->idx = 1; piter->flags = flags; rcu_read_unlock(); } EXPORT_SYMBOL_GPL(disk_part_iter_init); /** * disk_part_iter_next - proceed iterator to the next partition and return it * @piter: iterator of interest * * Proceed @piter to the next partition and return it. * * CONTEXT: * Don't care. */ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) { struct disk_part_tbl *ptbl; int inc, end; /* put the last partition */ disk_put_part(piter->part); piter->part = NULL; /* get part_tbl */ rcu_read_lock(); ptbl = rcu_dereference(piter->disk->part_tbl); /* determine iteration parameters */ if (piter->flags & DISK_PITER_REVERSE) { inc = -1; if (piter->flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) end = -1; else end = 0; } else { inc = 1; end = ptbl->len; } /* iterate to the next partition */ for (; piter->idx != end; piter->idx += inc) { struct hd_struct *part; part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; if (!part->nr_sects && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && piter->idx == 0)) continue; get_device(part_to_dev(part)); piter->part = part; piter->idx += inc; break; } rcu_read_unlock(); return piter->part; } EXPORT_SYMBOL_GPL(disk_part_iter_next); /** * disk_part_iter_exit - finish up partition iteration * @piter: iter of interest * * Called when iteration is over. Cleans up @piter. * * CONTEXT: * Don't care. */ void disk_part_iter_exit(struct disk_part_iter *piter) { disk_put_part(piter->part); piter->part = NULL; } EXPORT_SYMBOL_GPL(disk_part_iter_exit); static inline int sector_in_part(struct hd_struct *part, sector_t sector) { return part->start_sect <= sector && sector < part->start_sect + part->nr_sects; } /** * disk_map_sector_rcu - map sector to partition * @disk: gendisk of interest * @sector: sector to map * * Find out which partition @sector maps to on @disk. This is * primarily used for stats accounting. * * CONTEXT: * RCU read locked. The returned partition pointer is valid only * while preemption is disabled. * * RETURNS: * Found partition on success, part0 is returned if no partition matches */ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) { struct disk_part_tbl *ptbl; struct hd_struct *part; int i; ptbl = rcu_dereference(disk->part_tbl); part = rcu_dereference(ptbl->last_lookup); if (part && sector_in_part(part, sector)) return part; for (i = 1; i < ptbl->len; i++) { part = rcu_dereference(ptbl->part[i]); if (part && sector_in_part(part, sector)) { rcu_assign_pointer(ptbl->last_lookup, part); return part; } } return &disk->part0; } EXPORT_SYMBOL_GPL(disk_map_sector_rcu); /* * Can be deleted altogether. Later. * */ static struct blk_major_name { struct blk_major_name *next; int major; char name[16]; } *major_names[BLKDEV_MAJOR_HASH_SIZE]; /* index in the above - for now: assume no multimajor ranges */ static inline int major_to_index(unsigned major) { return major % BLKDEV_MAJOR_HASH_SIZE; } #ifdef CONFIG_PROC_FS void blkdev_show(struct seq_file *seqf, off_t offset) { struct blk_major_name *dp; if (offset < BLKDEV_MAJOR_HASH_SIZE) { mutex_lock(&block_class_lock); for (dp = major_names[offset]; dp; dp = dp->next) seq_printf(seqf, "%3d %s\n", dp->major, dp->name); mutex_unlock(&block_class_lock); } } #endif /* CONFIG_PROC_FS */ /** * register_blkdev - register a new block device * * @major: the requested major device number [1..255]. If @major=0, try to * allocate any unused major number. * @name: the name of the new block device as a zero terminated string * * The @name must be unique within the system. * * The return value depends on the @major input parameter. * - if a major device number was requested in range [1..255] then the * function returns zero on success, or a negative error code * - if any unused major number was requested with @major=0 parameter * then the return value is the allocated major number in range * [1..255] or a negative error code otherwise */ int register_blkdev(unsigned int major, const char *name) { struct blk_major_name **n, *p; int index, ret = 0; mutex_lock(&block_class_lock); /* temporary */ if (major == 0) { for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { if (major_names[index] == NULL) break; } if (index == 0) { printk("register_blkdev: failed to get major for %s\n", name); ret = -EBUSY; goto out; } major = index; ret = major; } p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL); if (p == NULL) { ret = -ENOMEM; goto out; } p->major = major; strlcpy(p->name, name, sizeof(p->name)); p->next = NULL; index = major_to_index(major); for (n = &major_names[index]; *n; n = &(*n)->next) { if ((*n)->major == major) break; } if (!*n) *n = p; else ret = -EBUSY; if (ret < 0) { printk("register_blkdev: cannot get major %d for %s\n", major, name); kfree(p); } out: mutex_unlock(&block_class_lock); return ret; } EXPORT_SYMBOL(register_blkdev); void unregister_blkdev(unsigned int major, const char *name) { struct blk_major_name **n; struct blk_major_name *p = NULL; int index = major_to_index(major); mutex_lock(&block_class_lock); for (n = &major_names[index]; *n; n = &(*n)->next) if ((*n)->major == major) break; if (!*n || strcmp((*n)->name, name)) { WARN_ON(1); } else { p = *n; *n = p->next; } mutex_unlock(&block_class_lock); kfree(p); } EXPORT_SYMBOL(unregister_blkdev); static struct kobj_map *bdev_map; /** * blk_mangle_minor - scatter minor numbers apart * @minor: minor number to mangle * * Scatter consecutively allocated @minor number apart if MANGLE_DEVT * is enabled. Mangling twice gives the original value. * * RETURNS: * Mangled value. * * CONTEXT: * Don't care. */ static int blk_mangle_minor(int minor) { #ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT int i; for (i = 0; i < MINORBITS / 2; i++) { int low = minor & (1 << i); int high = minor & (1 << (MINORBITS - 1 - i)); int distance = MINORBITS - 1 - 2 * i; minor ^= low | high; /* clear both bits */ low <<= distance; /* swap the positions */ high >>= distance; minor |= low | high; /* and set */ } #endif return minor; } /** * blk_alloc_devt - allocate a dev_t for a partition * @part: partition to allocate dev_t for * @devt: out parameter for resulting dev_t * * Allocate a dev_t for block device. * * RETURNS: * 0 on success, allocated dev_t is returned in *@devt. -errno on * failure. * * CONTEXT: * Might sleep. */ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) { struct gendisk *disk = part_to_disk(part); int idx, rc; /* in consecutive minor range? */ if (part->partno < disk->minors) { *devt = MKDEV(disk->major, disk->first_minor + part->partno); return 0; } /* allocate ext devt */ do { if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL)) return -ENOMEM; rc = idr_get_new(&ext_devt_idr, part, &idx); } while (rc == -EAGAIN); if (rc) return rc; if (idx > MAX_EXT_DEVT) { idr_remove(&ext_devt_idr, idx); return -EBUSY; } *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); return 0; } /** * blk_free_devt - free a dev_t * @devt: dev_t to free * * Free @devt which was allocated using blk_alloc_devt(). * * CONTEXT: * Might sleep. */ void blk_free_devt(dev_t devt) { might_sleep(); if (devt == MKDEV(0, 0)) return; if (MAJOR(devt) == BLOCK_EXT_MAJOR) { mutex_lock(&ext_devt_mutex); idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); mutex_unlock(&ext_devt_mutex); } } static char *bdevt_str(dev_t devt, char *buf) { if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { char tbuf[BDEVT_SIZE]; snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt)); snprintf(buf, BDEVT_SIZE, "%-9s", tbuf); } else snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt)); return buf; } /* * Register device numbers dev..(dev+range-1) * range must be nonzero * The hash chain is sorted on range, so that subranges can override. */ void blk_register_region(dev_t devt, unsigned long range, struct module *module, struct kobject *(*probe)(dev_t, int *, void *), int (*lock)(dev_t, void *), void *data) { kobj_map(bdev_map, devt, range, module, probe, lock, data); } EXPORT_SYMBOL(blk_register_region); void blk_unregister_region(dev_t devt, unsigned long range) { kobj_unmap(bdev_map, devt, range); } EXPORT_SYMBOL(blk_unregister_region); static struct kobject *exact_match(dev_t devt, int *partno, void *data) { struct gendisk *p = data; return &disk_to_dev(p)->kobj; } static int exact_lock(dev_t devt, void *data) { struct gendisk *p = data; if (!get_disk(p)) return -1; return 0; } static void register_disk(struct gendisk *disk) { struct device *ddev = disk_to_dev(disk); struct block_device *bdev; struct disk_part_iter piter; struct hd_struct *part; int err; ddev->parent = disk->driverfs_dev; dev_set_name(ddev, disk->disk_name); /* delay uevents, until we scanned partition table */ dev_set_uevent_suppress(ddev, 1); if (device_add(ddev)) return; if (!sysfs_deprecated) { err = sysfs_create_link(block_depr, &ddev->kobj, kobject_name(&ddev->kobj)); if (err) { device_del(ddev); return; } } disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); /* No minors to use for partitions */ if (!disk_part_scan_enabled(disk)) goto exit; /* No such device (e.g., media were just removed) */ if (!get_capacity(disk)) goto exit; bdev = bdget_disk(disk, 0); if (!bdev) goto exit; bdev->bd_invalidated = 1; err = blkdev_get(bdev, FMODE_READ, NULL); if (err < 0) goto exit; blkdev_put(bdev, FMODE_READ); exit: /* announce disk after possible partitions are created */ dev_set_uevent_suppress(ddev, 0); kobject_uevent(&ddev->kobj, KOBJ_ADD); /* announce possible partitions */ disk_part_iter_init(&piter, disk, 0); while ((part = disk_part_iter_next(&piter))) kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); disk_part_iter_exit(&piter); } /** * add_disk - add partitioning information to kernel list * @disk: per-device partitioning information * * This function registers the partitioning information in @disk * with the kernel. * * FIXME: error handling */ void add_disk(struct gendisk *disk) { struct backing_dev_info *bdi; dev_t devt; int retval; /* minors == 0 indicates to use ext devt from part0 and should * be accompanied with EXT_DEVT flag. Make sure all * parameters make sense. */ WARN_ON(disk->minors && !(disk->major || disk->first_minor)); WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT)); disk->flags |= GENHD_FL_UP; retval = blk_alloc_devt(&disk->part0, &devt); if (retval) { WARN_ON(1); return; } disk_to_dev(disk)->devt = devt; /* ->major and ->first_minor aren't supposed to be * dereferenced from here on, but set them just in case. */ disk->major = MAJOR(devt); disk->first_minor = MINOR(devt); disk_alloc_events(disk); /* Register BDI before referencing it from bdev */ bdi = &disk->queue->backing_dev_info; bdi_register_dev(bdi, disk_devt(disk)); blk_register_region(disk_devt(disk), disk->minors, NULL, exact_match, exact_lock, disk); register_disk(disk); blk_register_queue(disk); /* * Take an extra ref on queue which will be put on disk_release() * so that it sticks around as long as @disk is there. */ WARN_ON_ONCE(!blk_get_queue(disk->queue)); retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, "bdi"); WARN_ON(retval); disk_add_events(disk); } EXPORT_SYMBOL(add_disk); void del_gendisk(struct gendisk *disk) { struct disk_part_iter piter; struct hd_struct *part; disk_del_events(disk); /* invalidate stuff */ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); while ((part = disk_part_iter_next(&piter))) { invalidate_partition(disk, part->partno); delete_partition(disk, part->partno); } disk_part_iter_exit(&piter); invalidate_partition(disk, 0); blk_free_devt(disk_to_dev(disk)->devt); set_capacity(disk, 0); disk->flags &= ~GENHD_FL_UP; sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); bdi_unregister(&disk->queue->backing_dev_info); blk_unregister_queue(disk); blk_unregister_region(disk_devt(disk), disk->minors); part_stat_set_all(&disk->part0, 0); disk->part0.stamp = 0; kobject_put(disk->part0.holder_dir); kobject_put(disk->slave_dir); disk->driverfs_dev = NULL; if (!sysfs_deprecated) sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); device_del(disk_to_dev(disk)); } EXPORT_SYMBOL(del_gendisk); /** * get_gendisk - get partitioning information for a given device * @devt: device to get partitioning information for * @partno: returned partition index * * This function gets the structure containing partitioning * information for the given device @devt. */ struct gendisk *get_gendisk(dev_t devt, int *partno) { struct gendisk *disk = NULL; if (MAJOR(devt) != BLOCK_EXT_MAJOR) { struct kobject *kobj; kobj = kobj_lookup(bdev_map, devt, partno); if (kobj) disk = dev_to_disk(kobj_to_dev(kobj)); } else { struct hd_struct *part; mutex_lock(&ext_devt_mutex); part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); if (part && get_disk(part_to_disk(part))) { *partno = part->partno; disk = part_to_disk(part); } mutex_unlock(&ext_devt_mutex); } return disk; } EXPORT_SYMBOL(get_gendisk); /** * bdget_disk - do bdget() by gendisk and partition number * @disk: gendisk of interest * @partno: partition number * * Find partition @partno from @disk, do bdget() on it. * * CONTEXT: * Don't care. * * RETURNS: * Resulting block_device on success, NULL on failure. */ struct block_device *bdget_disk(struct gendisk *disk, int partno) { struct hd_struct *part; struct block_device *bdev = NULL; part = disk_get_part(disk, partno); if (part) bdev = bdget(part_devt(part)); disk_put_part(part); return bdev; } EXPORT_SYMBOL(bdget_disk); /* * print a full list of all partitions - intended for places where the root * filesystem can't be mounted and thus to give the victim some idea of what * went wrong */ void __init printk_all_partitions(void) { struct class_dev_iter iter; struct device *dev; class_dev_iter_init(&iter, &block_class, NULL, &disk_type); while ((dev = class_dev_iter_next(&iter))) { struct gendisk *disk = dev_to_disk(dev); struct disk_part_iter piter; struct hd_struct *part; char name_buf[BDEVNAME_SIZE]; char devt_buf[BDEVT_SIZE]; char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5]; /* * Don't show empty devices or things that have been * suppressed */ if (get_capacity(disk) == 0 || (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) continue; /* * Note, unlike /proc/partitions, I am showing the * numbers in hex - the same format as the root= * option takes. */ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) { bool is_part0 = part == &disk->part0; uuid_buf[0] = '\0'; if (part->info) snprintf(uuid_buf, sizeof(uuid_buf), "%pU", part->info->uuid); printk("%s%s %10llu %s %s", is_part0 ? "" : " ", bdevt_str(part_devt(part), devt_buf), (unsigned long long)part->nr_sects >> 1, disk_name(disk, part->partno, name_buf), uuid_buf); if (is_part0) { if (disk->driverfs_dev != NULL && disk->driverfs_dev->driver != NULL) printk(" driver: %s\n", disk->driverfs_dev->driver->name); else printk(" (driver?)\n"); } else printk("\n"); } disk_part_iter_exit(&piter); } class_dev_iter_exit(&iter); } #ifdef CONFIG_PROC_FS /* iterator */ static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos) { loff_t skip = *pos; struct class_dev_iter *iter; struct device *dev; iter = kmalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return ERR_PTR(-ENOMEM); seqf->private = iter; class_dev_iter_init(iter, &block_class, NULL, &disk_type); do { dev = class_dev_iter_next(iter); if (!dev) return NULL; } while (skip--); return dev_to_disk(dev); } static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos) { struct device *dev; (*pos)++; dev = class_dev_iter_next(seqf->private); if (dev) return dev_to_disk(dev); return NULL; } static void disk_seqf_stop(struct seq_file *seqf, void *v) { struct class_dev_iter *iter = seqf->private; /* stop is called even after start failed :-( */ if (iter) { class_dev_iter_exit(iter); kfree(iter); } } static void *show_partition_start(struct seq_file *seqf, loff_t *pos) { static void *p; p = disk_seqf_start(seqf, pos); if (!IS_ERR_OR_NULL(p) && !*pos) seq_puts(seqf, "major minor #blocks name\n\n"); return p; } static int show_partition(struct seq_file *seqf, void *v) { struct gendisk *sgp = v; struct disk_part_iter piter; struct hd_struct *part; char buf[BDEVNAME_SIZE]; /* Don't show non-partitionable removeable devices or empty devices */ if (!get_capacity(sgp) || (!disk_max_parts(sgp) && (sgp->flags & GENHD_FL_REMOVABLE))) return 0; if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) return 0; /* show the full disk and all non-0 size partitions of it */ disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) seq_printf(seqf, "%4d %7d %10llu %s\n", MAJOR(part_devt(part)), MINOR(part_devt(part)), (unsigned long long)part->nr_sects >> 1, disk_name(sgp, part->partno, buf)); disk_part_iter_exit(&piter); return 0; } static const struct seq_operations partitions_op = { .start = show_partition_start, .next = disk_seqf_next, .stop = disk_seqf_stop, .show = show_partition }; static int partitions_open(struct inode *inode, struct file *file) { return seq_open(file, &partitions_op); } static const struct file_operations proc_partitions_operations = { .open = partitions_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif static struct kobject *base_probe(dev_t devt, int *partno, void *data) { if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) /* Make old-style 2.4 aliases work */ request_module("block-major-%d", MAJOR(devt)); return NULL; } static int __init genhd_device_init(void) { int error, ret; block_class.dev_kobj = sysfs_dev_block_kobj; error = class_register(&block_class); if (unlikely(error)) return error; bdev_map = kobj_map_init(base_probe, &block_class_lock); blk_dev_init(); ret = register_blkdev(BLOCK_EXT_MAJOR, "blkext"); if(ret) return ret; /* create top-level block dir */ if (!sysfs_deprecated) block_depr = kobject_create_and_add("block", NULL); return 0; } subsys_initcall(genhd_device_init); static ssize_t disk_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%d\n", disk->minors); } static ssize_t disk_ext_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%d\n", disk_max_parts(disk)); } static ssize_t disk_removable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%d\n", (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); } static ssize_t disk_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0); } static ssize_t disk_capability_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%x\n", disk->flags); } static ssize_t disk_alignment_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); } static ssize_t disk_discard_alignment_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); } static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show, NULL); static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); #endif #ifdef CONFIG_FAIL_IO_TIMEOUT static struct device_attribute dev_attr_fail_timeout = __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show, part_timeout_store); #endif static struct attribute *disk_attrs[] = { &dev_attr_range.attr, &dev_attr_ext_range.attr, &dev_attr_removable.attr, &dev_attr_ro.attr, &dev_attr_size.attr, &dev_attr_alignment_offset.attr, &dev_attr_discard_alignment.attr, &dev_attr_capability.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif #ifdef CONFIG_FAIL_IO_TIMEOUT &dev_attr_fail_timeout.attr, #endif NULL }; static struct attribute_group disk_attr_group = { .attrs = disk_attrs, }; static const struct attribute_group *disk_attr_groups[] = { &disk_attr_group, NULL }; /** * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way * @disk: disk to replace part_tbl for * @new_ptbl: new part_tbl to install * * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The * original ptbl is freed using RCU callback. * * LOCKING: * Matching bd_mutx locked. */ static void disk_replace_part_tbl(struct gendisk *disk, struct disk_part_tbl *new_ptbl) { struct disk_part_tbl *old_ptbl = disk->part_tbl; rcu_assign_pointer(disk->part_tbl, new_ptbl); if (old_ptbl) { rcu_assign_pointer(old_ptbl->last_lookup, NULL); kfree_rcu(old_ptbl, rcu_head); } } /** * disk_expand_part_tbl - expand disk->part_tbl * @disk: disk to expand part_tbl for * @partno: expand such that this partno can fit in * * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl * uses RCU to allow unlocked dereferencing for stats and other stuff. * * LOCKING: * Matching bd_mutex locked, might sleep. * * RETURNS: * 0 on success, -errno on failure. */ int disk_expand_part_tbl(struct gendisk *disk, int partno) { struct disk_part_tbl *old_ptbl = disk->part_tbl; struct disk_part_tbl *new_ptbl; int len = old_ptbl ? old_ptbl->len : 0; int target = partno + 1; size_t size; int i; /* disk_max_parts() is zero during initialization, ignore if so */ if (disk_max_parts(disk) && target > disk_max_parts(disk)) return -EINVAL; if (target <= len) return 0; size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]); new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id); if (!new_ptbl) return -ENOMEM; new_ptbl->len = target; for (i = 0; i < len; i++) rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); disk_replace_part_tbl(disk, new_ptbl); return 0; } static void disk_release(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); disk_release_events(disk); kfree(disk->random); disk_replace_part_tbl(disk, NULL); free_part_stats(&disk->part0); free_part_info(&disk->part0); if (disk->queue) blk_put_queue(disk->queue); kfree(disk); } static int disk_uevent(struct device *dev, struct kobj_uevent_env *env) { struct gendisk *disk = dev_to_disk(dev); struct disk_part_iter piter; struct hd_struct *part; int cnt = 0; disk_part_iter_init(&piter, disk, 0); while((part = disk_part_iter_next(&piter))) cnt++; disk_part_iter_exit(&piter); add_uevent_var(env, "NPARTS=%u", cnt); #ifdef CONFIG_USB_STORAGE_DETECT if (disk->interfaces == GENHD_IF_USB) { add_uevent_var(env, "MEDIAPRST=%d", disk->media_present); printk(KERN_INFO "%s %d, disk->media_present=%d, cnt=%d\n", __func__, __LINE__, disk->media_present, cnt); } #endif return 0; } struct class block_class = { .name = "block", }; static char *block_devnode(struct device *dev, umode_t *mode) { struct gendisk *disk = dev_to_disk(dev); if (disk->devnode) return disk->devnode(disk, mode); return NULL; } static struct device_type disk_type = { .name = "disk", .groups = disk_attr_groups, .release = disk_release, .devnode = block_devnode, .uevent = disk_uevent, }; #ifdef CONFIG_PROC_FS /* * aggregate disk stat collector. Uses the same stats that the sysfs * entries do, above, but makes them available through one seq_file. * * The output looks suspiciously like /proc/partitions with a bunch of * extra fields. */ static int diskstats_show(struct seq_file *seqf, void *v) { struct gendisk *gp = v; struct disk_part_iter piter; struct hd_struct *hd; char buf[BDEVNAME_SIZE]; int cpu; /* if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next) seq_puts(seqf, "major minor name" " rio rmerge rsect ruse wio wmerge " "wsect wuse running use aveq" "\n\n"); */ disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); while ((hd = disk_part_iter_next(&piter))) { cpu = part_stat_lock(); part_round_stats(cpu, hd); part_stat_unlock(); seq_printf(seqf, "%4d %7d %s %lu %lu %lu " "%u %lu %lu %lu %u %u %u %u\n", MAJOR(part_devt(hd)), MINOR(part_devt(hd)), disk_name(gp, hd->partno, buf), part_stat_read(hd, ios[READ]), part_stat_read(hd, merges[READ]), part_stat_read(hd, sectors[READ]), jiffies_to_msecs(part_stat_read(hd, ticks[READ])), part_stat_read(hd, ios[WRITE]), part_stat_read(hd, merges[WRITE]), part_stat_read(hd, sectors[WRITE]), jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), part_in_flight(hd), jiffies_to_msecs(part_stat_read(hd, io_ticks)), jiffies_to_msecs(part_stat_read(hd, time_in_queue)) ); } disk_part_iter_exit(&piter); return 0; } static const struct seq_operations diskstats_op = { .start = disk_seqf_start, .next = disk_seqf_next, .stop = disk_seqf_stop, .show = diskstats_show }; static int diskstats_open(struct inode *inode, struct file *file) { return seq_open(file, &diskstats_op); } static const struct file_operations proc_diskstats_operations = { .open = diskstats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_genhd_init(void) { proc_create("diskstats", 0, NULL, &proc_diskstats_operations); proc_create("partitions", 0, NULL, &proc_partitions_operations); return 0; } module_init(proc_genhd_init); #endif /* CONFIG_PROC_FS */ dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); struct class_dev_iter iter; struct device *dev; class_dev_iter_init(&iter, &block_class, NULL, &disk_type); while ((dev = class_dev_iter_next(&iter))) { struct gendisk *disk = dev_to_disk(dev); struct hd_struct *part; if (strcmp(dev_name(dev), name)) continue; if (partno < disk->minors) { /* We need to return the right devno, even * if the partition doesn't exist yet. */ devt = MKDEV(MAJOR(dev->devt), MINOR(dev->devt) + partno); break; } part = disk_get_part(disk, partno); if (part) { devt = part_devt(part); disk_put_part(part); break; } disk_put_part(part); } class_dev_iter_exit(&iter); return devt; } EXPORT_SYMBOL(blk_lookup_devt); struct gendisk *alloc_disk(int minors) { return alloc_disk_node(minors, -1); } EXPORT_SYMBOL(alloc_disk); struct gendisk *alloc_disk_node(int minors, int node_id) { struct gendisk *disk; disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL | __GFP_ZERO, node_id); if (disk) { if (!init_part_stats(&disk->part0)) { kfree(disk); return NULL; } disk->node_id = node_id; if (disk_expand_part_tbl(disk, 0)) { free_part_stats(&disk->part0); kfree(disk); return NULL; } disk->part_tbl->part[0] = &disk->part0; hd_ref_init(&disk->part0); disk->minors = minors; rand_initialize_disk(disk); disk_to_dev(disk)->class = &block_class; disk_to_dev(disk)->type = &disk_type; device_initialize(disk_to_dev(disk)); } return disk; } EXPORT_SYMBOL(alloc_disk_node); struct kobject *get_disk(struct gendisk *disk) { struct module *owner; struct kobject *kobj; if (!disk->fops) return NULL; owner = disk->fops->owner; if (owner && !try_module_get(owner)) return NULL; kobj = kobject_get(&disk_to_dev(disk)->kobj); if (kobj == NULL) { module_put(owner); return NULL; } return kobj; } EXPORT_SYMBOL(get_disk); void put_disk(struct gendisk *disk) { if (disk) kobject_put(&disk_to_dev(disk)->kobj); } EXPORT_SYMBOL(put_disk); static void set_disk_ro_uevent(struct gendisk *gd, int ro) { char event[] = "DISK_RO=1"; char *envp[] = { event, NULL }; if (!ro) event[8] = '0'; kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); } void set_device_ro(struct block_device *bdev, int flag) { bdev->bd_part->policy = flag; } EXPORT_SYMBOL(set_device_ro); void set_disk_ro(struct gendisk *disk, int flag) { struct disk_part_iter piter; struct hd_struct *part; if (disk->part0.policy != flag) { set_disk_ro_uevent(disk, flag); disk->part0.policy = flag; } disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); while ((part = disk_part_iter_next(&piter))) part->policy = flag; disk_part_iter_exit(&piter); } EXPORT_SYMBOL(set_disk_ro); int bdev_read_only(struct block_device *bdev) { if (!bdev) return 0; return bdev->bd_part->policy; } EXPORT_SYMBOL(bdev_read_only); int invalidate_partition(struct gendisk *disk, int partno) { int res = 0; struct block_device *bdev = bdget_disk(disk, partno); if (bdev) { fsync_bdev(bdev); res = __invalidate_device(bdev, true); bdput(bdev); } return res; } EXPORT_SYMBOL(invalidate_partition); /* * Disk events - monitor disk events like media change and eject request. */ struct disk_events { struct list_head node; /* all disk_event's */ struct gendisk *disk; /* the associated disk */ spinlock_t lock; struct mutex block_mutex; /* protects blocking */ int block; /* event blocking depth */ unsigned int pending; /* events already sent out */ unsigned int clearing; /* events being cleared */ long poll_msecs; /* interval, -1 for default */ struct delayed_work dwork; }; static const char *disk_events_strs[] = { [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change", [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request", }; static char *disk_uevents[] = { [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1", [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1", }; /* list of all disk_events */ static DEFINE_MUTEX(disk_events_mutex); static LIST_HEAD(disk_events); /* disable in-kernel polling by default */ static unsigned long disk_events_dfl_poll_msecs = 0; static unsigned long disk_events_poll_jiffies(struct gendisk *disk) { struct disk_events *ev = disk->ev; long intv_msecs = 0; /* * If device-specific poll interval is set, always use it. If * the default is being used, poll iff there are events which * can't be monitored asynchronously. */ if (ev->poll_msecs >= 0) intv_msecs = ev->poll_msecs; else if (disk->events & ~disk->async_events) intv_msecs = disk_events_dfl_poll_msecs; return msecs_to_jiffies(intv_msecs); } /** * disk_block_events - block and flush disk event checking * @disk: disk to block events for * * On return from this function, it is guaranteed that event checking * isn't in progress and won't happen until unblocked by * disk_unblock_events(). Events blocking is counted and the actual * unblocking happens after the matching number of unblocks are done. * * Note that this intentionally does not block event checking from * disk_clear_events(). * * CONTEXT: * Might sleep. */ void disk_block_events(struct gendisk *disk) { struct disk_events *ev = disk->ev; unsigned long flags; bool cancel; if (!ev) return; /* * Outer mutex ensures that the first blocker completes canceling * the event work before further blockers are allowed to finish. */ mutex_lock(&ev->block_mutex); spin_lock_irqsave(&ev->lock, flags); cancel = !ev->block++; spin_unlock_irqrestore(&ev->lock, flags); if (cancel) cancel_delayed_work_sync(&disk->ev->dwork); mutex_unlock(&ev->block_mutex); } static void __disk_unblock_events(struct gendisk *disk, bool check_now) { struct disk_events *ev = disk->ev; unsigned long intv; unsigned long flags; spin_lock_irqsave(&ev->lock, flags); if (WARN_ON_ONCE(ev->block <= 0)) goto out_unlock; if (--ev->block) goto out_unlock; /* * Not exactly a latency critical operation, set poll timer * slack to 25% and kick event check. */ intv = disk_events_poll_jiffies(disk); set_timer_slack(&ev->dwork.timer, intv / 4); if (check_now) queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); else if (intv) queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); out_unlock: spin_unlock_irqrestore(&ev->lock, flags); } /** * disk_unblock_events - unblock disk event checking * @disk: disk to unblock events for * * Undo disk_block_events(). When the block count reaches zero, it * starts events polling if configured. * * CONTEXT: * Don't care. Safe to call from irq context. */ void disk_unblock_events(struct gendisk *disk) { if (disk->ev) __disk_unblock_events(disk, false); } /** * disk_flush_events - schedule immediate event checking and flushing * @disk: disk to check and flush events for * @mask: events to flush * * Schedule immediate event checking on @disk if not blocked. Events in * @mask are scheduled to be cleared from the driver. Note that this * doesn't clear the events from @disk->ev. * * CONTEXT: * If @mask is non-zero must be called with bdev->bd_mutex held. */ void disk_flush_events(struct gendisk *disk, unsigned int mask) { struct disk_events *ev = disk->ev; if (!ev) return; spin_lock_irq(&ev->lock); ev->clearing |= mask; if (!ev->block) { cancel_delayed_work(&ev->dwork); queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); } spin_unlock_irq(&ev->lock); } /** * disk_clear_events - synchronously check, clear and return pending events * @disk: disk to fetch and clear events from * @mask: mask of events to be fetched and clearted * * Disk events are synchronously checked and pending events in @mask * are cleared and returned. This ignores the block count. * * CONTEXT: * Might sleep. */ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) { const struct block_device_operations *bdops = disk->fops; struct disk_events *ev = disk->ev; unsigned int pending; if (!ev) { /* for drivers still using the old ->media_changed method */ if ((mask & DISK_EVENT_MEDIA_CHANGE) && bdops->media_changed && bdops->media_changed(disk)) return DISK_EVENT_MEDIA_CHANGE; return 0; } /* tell the workfn about the events being cleared */ spin_lock_irq(&ev->lock); ev->clearing |= mask; spin_unlock_irq(&ev->lock); /* uncondtionally schedule event check and wait for it to finish */ disk_block_events(disk); queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); flush_delayed_work(&ev->dwork); __disk_unblock_events(disk, false); /* then, fetch and clear pending events */ spin_lock_irq(&ev->lock); WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ pending = ev->pending & mask; ev->pending &= ~mask; spin_unlock_irq(&ev->lock); return pending; } static void disk_events_workfn(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct disk_events *ev = container_of(dwork, struct disk_events, dwork); struct gendisk *disk = ev->disk; char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; unsigned int clearing = ev->clearing; unsigned int events = 0; unsigned long intv; int nr_events = 0, i; #ifdef CONFIG_USB_STORAGE_DETECT if (disk->interfaces != GENHD_IF_USB) /* check events */ events = disk->fops->check_events(disk, clearing); #endif /* accumulate pending events and schedule next poll if necessary */ spin_lock_irq(&ev->lock); events &= ~ev->pending; ev->pending |= events; ev->clearing &= ~clearing; intv = disk_events_poll_jiffies(disk); if (!ev->block && intv) queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); spin_unlock_irq(&ev->lock); /* * Tell userland about new events. Only the events listed in * @disk->events are reported. Unlisted events are processed the * same internally but never get reported to userland. */ for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) if (events & disk->events & (1 << i)) envp[nr_events++] = disk_uevents[i]; #ifdef CONFIG_USB_STORAGE_DETECT if (disk->interfaces != GENHD_IF_USB) { if (nr_events) kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); } #endif } /* * A disk events enabled device has the following sysfs nodes under * its /sys/block/X/ directory. * * events : list of all supported events * events_async : list of events which can be detected w/o polling * events_poll_msecs : polling interval, 0: disable, -1: system default */ static ssize_t __disk_events_show(unsigned int events, char *buf) { const char *delim = ""; ssize_t pos = 0; int i; for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++) if (events & (1 << i)) { pos += sprintf(buf + pos, "%s%s", delim, disk_events_strs[i]); delim = " "; } if (pos) pos += sprintf(buf + pos, "\n"); return pos; } static ssize_t disk_events_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return __disk_events_show(disk->events, buf); } static ssize_t disk_events_async_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return __disk_events_show(disk->async_events, buf); } static ssize_t disk_events_poll_msecs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%ld\n", disk->ev->poll_msecs); } static ssize_t disk_events_poll_msecs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); long intv; if (!count || !sscanf(buf, "%ld", &intv)) return -EINVAL; if (intv < 0 && intv != -1) return -EINVAL; disk_block_events(disk); disk->ev->poll_msecs = intv; __disk_unblock_events(disk, true); return count; } static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL); static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL); static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR, disk_events_poll_msecs_show, disk_events_poll_msecs_store); static const struct attribute *disk_events_attrs[] = { &dev_attr_events.attr, &dev_attr_events_async.attr, &dev_attr_events_poll_msecs.attr, NULL, }; /* * The default polling interval can be specified by the kernel * parameter block.events_dfl_poll_msecs which defaults to 0 * (disable). This can also be modified runtime by writing to * /sys/module/block/events_dfl_poll_msecs. */ static int disk_events_set_dfl_poll_msecs(const char *val, const struct kernel_param *kp) { struct disk_events *ev; int ret; ret = param_set_ulong(val, kp); if (ret < 0) return ret; mutex_lock(&disk_events_mutex); list_for_each_entry(ev, &disk_events, node) disk_flush_events(ev->disk, 0); mutex_unlock(&disk_events_mutex); return 0; } static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = { .set = disk_events_set_dfl_poll_msecs, .get = param_get_ulong, }; #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "block." module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops, &disk_events_dfl_poll_msecs, 0644); /* * disk_{alloc|add|del|release}_events - initialize and destroy disk_events. */ static void disk_alloc_events(struct gendisk *disk) { struct disk_events *ev; if (!disk->fops->check_events) return; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) { pr_warn("%s: failed to initialize events\n", disk->disk_name); return; } INIT_LIST_HEAD(&ev->node); ev->disk = disk; spin_lock_init(&ev->lock); mutex_init(&ev->block_mutex); ev->block = 1; ev->poll_msecs = -1; INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); disk->ev = ev; } static void disk_add_events(struct gendisk *disk) { if (!disk->ev) return; /* FIXME: error handling */ if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0) pr_warn("%s: failed to create sysfs files for events\n", disk->disk_name); mutex_lock(&disk_events_mutex); list_add_tail(&disk->ev->node, &disk_events); mutex_unlock(&disk_events_mutex); /* * Block count is initialized to 1 and the following initial * unblock kicks it into action. */ __disk_unblock_events(disk, true); } static void disk_del_events(struct gendisk *disk) { if (!disk->ev) return; disk_block_events(disk); mutex_lock(&disk_events_mutex); list_del_init(&disk->ev->node); mutex_unlock(&disk_events_mutex); sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs); } static void disk_release_events(struct gendisk *disk) { /* the block count should be 1 from disk_del_events() */ WARN_ON_ONCE(disk->ev && disk->ev->block != 1); kfree(disk->ev); }
gpl-2.0
siis/pfwall
net/mac80211/wme.c
180
3688
/* * Copyright 2004, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/types.h> #include <net/ip.h> #include <net/pkt_sched.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "wme.h" /* Default mapping in classifier to work with default * queue setup. */ const int ieee802_1d_to_ac[8] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO }; static int wme_downgrade_ac(struct sk_buff *skb) { switch (skb->priority) { case 6: case 7: skb->priority = 5; /* VO -> VI */ return 0; case 4: case 5: skb->priority = 3; /* VI -> BE */ return 0; case 0: case 3: skb->priority = 2; /* BE -> BK */ return 0; default: return -1; } } /* Indicate which queue to use. */ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct sta_info *sta = NULL; const u8 *ra = NULL; bool qos = false; if (local->hw.queues < 4 || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE); } rcu_read_lock(); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { qos = test_sta_flag(sta, WLAN_STA_WME); break; } case NL80211_IFTYPE_AP: ra = skb->data; break; case NL80211_IFTYPE_WDS: ra = sdata->u.wds.remote_addr; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: ra = skb->data; break; #endif case NL80211_IFTYPE_STATION: ra = sdata->u.mgd.bssid; break; case NL80211_IFTYPE_ADHOC: ra = skb->data; break; default: break; } if (!sta && ra && !is_multicast_ether_addr(ra)) { sta = sta_info_get(sdata, ra); if (sta) qos = test_sta_flag(sta, WLAN_STA_WME); } rcu_read_unlock(); if (!qos) { skb->priority = 0; /* required for correct WPA/11i MIC */ return IEEE80211_AC_BE; } /* use the data classifier to determine what 802.1d tag the * data frame has */ skb->priority = cfg80211_classify8021d(skb); return ieee80211_downgrade_queue(local, skb); } u16 ieee80211_downgrade_queue(struct ieee80211_local *local, struct sk_buff *skb) { /* in case we are a client verify acm is not set for this ac */ while (unlikely(local->wmm_acm & BIT(skb->priority))) { if (wme_downgrade_ac(skb)) { /* * This should not really happen. The AP has marked all * lower ACs to require admission control which is not * a reasonable configuration. Allow the frame to be * transmitted using AC_BK as a workaround. */ break; } } /* look up which queue to use for frames with this 1d tag */ return ieee802_1d_to_ac[skb->priority]; } void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; /* Fill in the QoS header if there is one. */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *p = ieee80211_get_qos_ctl(hdr); u8 ack_policy = 0, tid; tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; if (unlikely(sdata->local->wifi_wme_noack_test)) ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; /* qos header is 2 bytes */ *p++ = ack_policy | tid; *p = ieee80211_vif_is_mesh(&sdata->vif) ? (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; } }
gpl-2.0
androidarmv6/zte-kernel-msm7x27
arch/m68knommu/mm/kmap.c
948
1153
/* * linux/arch/m68knommu/mm/kmap.c * * Copyright (C) 2000 Lineo, <davidm@snapgear.com> * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com> */ #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> #include <asm/system.h> #undef DEBUG /* * Map some physical address range into the kernel address space. */ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { return (void *)physaddr; } /* * Unmap a ioremap()ed region again. */ void iounmap(void *addr) { } /* * __iounmap unmaps nearly everything, so be careful * it doesn't free currently pointer/page tables anymore but it * wans't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { } /* * Set new cache mode for some kernel address space. * The caller must push data for that range itself, if such data may already * be in the cache. */ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) { }
gpl-2.0
UnknownzD/I9103_XW_Kernel
sound/pci/ctxfi/ctpcm.c
948
11247
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctpcm.c * * @Brief * This file contains the definition of the pcm device functions. * * @Author Liu Chun * @Date Apr 2 2008 * */ #include "ctpcm.h" #include "cttimer.h" #include <linux/slab.h> #include <sound/pcm.h> /* Hardware descriptions for playback */ static struct snd_pcm_hardware ct_pcm_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_192000), .rate_min = 8000, .rate_max = 192000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (64), .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware ct_spdif_passthru_playback_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_32000), .rate_min = 32000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (64), .period_bytes_max = (128*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; /* Hardware descriptions for capture */ static struct snd_pcm_hardware ct_pcm_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_96000), .rate_min = 8000, .rate_max = 96000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = (384), .period_bytes_max = (64*1024), .periods_min = 2, .periods_max = 1024, .fifo_size = 0, }; static void ct_atc_pcm_interrupt(struct ct_atc_pcm *atc_pcm) { struct ct_atc_pcm *apcm = atc_pcm; if (!apcm->substream) return; snd_pcm_period_elapsed(apcm->substream); } static void ct_atc_pcm_free_substream(struct snd_pcm_runtime *runtime) { struct ct_atc_pcm *apcm = runtime->private_data; struct ct_atc *atc = snd_pcm_substream_chip(apcm->substream); atc->pcm_release_resources(atc, apcm); ct_timer_instance_free(apcm->timer); kfree(apcm); runtime->private_data = NULL; } /* pcm playback operations */ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm; int err; apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); if (!apcm) return -ENOMEM; apcm->substream = substream; apcm->interrupt = ct_atc_pcm_interrupt; runtime->private_data = apcm; runtime->private_free = ct_atc_pcm_free_substream; if (IEC958 == substream->pcm->device) { runtime->hw = ct_spdif_passthru_playback_hw; atc->spdif_out_passthru(atc, 1); } else { runtime->hw = ct_pcm_playback_hw; if (FRONT == substream->pcm->device) runtime->hw.channels_max = 8; } err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { kfree(apcm); return err; } err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024, UINT_MAX); if (err < 0) { kfree(apcm); return err; } apcm->timer = ct_timer_instance_new(atc->timer, apcm); if (!apcm->timer) return -ENOMEM; return 0; } static int ct_pcm_playback_close(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); /* TODO: Notify mixer inactive. */ if (IEC958 == substream->pcm->device) atc->spdif_out_passthru(atc, 0); /* The ct_atc_pcm object will be freed by runtime->private_free */ return 0; } static int ct_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct ct_atc_pcm *apcm = substream->runtime->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; /* clear previous resources */ atc->pcm_release_resources(atc, apcm); return err; } static int ct_pcm_hw_free(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct ct_atc_pcm *apcm = substream->runtime->private_data; /* clear previous resources */ atc->pcm_release_resources(atc, apcm); /* Free snd-allocated pages */ return snd_pcm_lib_free_pages(substream); } static int ct_pcm_playback_prepare(struct snd_pcm_substream *substream) { int err; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; if (IEC958 == substream->pcm->device) err = atc->spdif_passthru_playback_prepare(atc, apcm); else err = atc->pcm_playback_prepare(atc, apcm); if (err < 0) { printk(KERN_ERR "ctxfi: Preparing pcm playback failed!!!\n"); return err; } return 0; } static int ct_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: atc->pcm_playback_start(atc, apcm); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: atc->pcm_playback_stop(atc, apcm); break; default: break; } return 0; } static snd_pcm_uframes_t ct_pcm_playback_pointer(struct snd_pcm_substream *substream) { unsigned long position; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; /* Read out playback position */ position = atc->pcm_playback_position(atc, apcm); position = bytes_to_frames(runtime, position); if (position >= runtime->buffer_size) position = 0; return position; } /* pcm capture operations */ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm; int err; apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); if (!apcm) return -ENOMEM; apcm->started = 0; apcm->substream = substream; apcm->interrupt = ct_atc_pcm_interrupt; runtime->private_data = apcm; runtime->private_free = ct_atc_pcm_free_substream; runtime->hw = ct_pcm_capture_hw; runtime->hw.rate_max = atc->rsr * atc->msr; err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { kfree(apcm); return err; } err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024, UINT_MAX); if (err < 0) { kfree(apcm); return err; } apcm->timer = ct_timer_instance_new(atc->timer, apcm); if (!apcm->timer) return -ENOMEM; return 0; } static int ct_pcm_capture_close(struct snd_pcm_substream *substream) { /* The ct_atc_pcm object will be freed by runtime->private_free */ /* TODO: Notify mixer inactive. */ return 0; } static int ct_pcm_capture_prepare(struct snd_pcm_substream *substream) { int err; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; err = atc->pcm_capture_prepare(atc, apcm); if (err < 0) { printk(KERN_ERR "ctxfi: Preparing pcm capture failed!!!\n"); return err; } return 0; } static int ct_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: atc->pcm_capture_start(atc, apcm); break; case SNDRV_PCM_TRIGGER_STOP: atc->pcm_capture_stop(atc, apcm); break; default: atc->pcm_capture_stop(atc, apcm); break; } return 0; } static snd_pcm_uframes_t ct_pcm_capture_pointer(struct snd_pcm_substream *substream) { unsigned long position; struct ct_atc *atc = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct ct_atc_pcm *apcm = runtime->private_data; /* Read out playback position */ position = atc->pcm_capture_position(atc, apcm); position = bytes_to_frames(runtime, position); if (position >= runtime->buffer_size) position = 0; return position; } /* PCM operators for playback */ static struct snd_pcm_ops ct_pcm_playback_ops = { .open = ct_pcm_playback_open, .close = ct_pcm_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = ct_pcm_hw_params, .hw_free = ct_pcm_hw_free, .prepare = ct_pcm_playback_prepare, .trigger = ct_pcm_playback_trigger, .pointer = ct_pcm_playback_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* PCM operators for capture */ static struct snd_pcm_ops ct_pcm_capture_ops = { .open = ct_pcm_capture_open, .close = ct_pcm_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = ct_pcm_hw_params, .hw_free = ct_pcm_hw_free, .prepare = ct_pcm_capture_prepare, .trigger = ct_pcm_capture_trigger, .pointer = ct_pcm_capture_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* Create ALSA pcm device */ int ct_alsa_pcm_create(struct ct_atc *atc, enum CTALSADEVS device, const char *device_name) { struct snd_pcm *pcm; int err; int playback_count, capture_count; playback_count = (IEC958 == device) ? 1 : 8; capture_count = (FRONT == device) ? 1 : 0; err = snd_pcm_new(atc->card, "ctxfi", device, playback_count, capture_count, &pcm); if (err < 0) { printk(KERN_ERR "ctxfi: snd_pcm_new failed!! Err=%d\n", err); return err; } pcm->private_data = atc; pcm->info_flags = 0; pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX; strlcpy(pcm->name, device_name, sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ct_pcm_playback_ops); if (FRONT == device) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &ct_pcm_capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(atc->pci), 128*1024, 128*1024); #ifdef CONFIG_PM atc->pcms[device] = pcm; #endif return 0; }
gpl-2.0
Victor-android/kernel_huawei
security/lsm_audit.c
1204
8893
/* * common LSM auditing functions * * Based on code written for SELinux by : * Stephen Smalley, <sds@epoch.ncsc.mil> * James Morris <jmorris@redhat.com> * Author : Etienne Basset, <etienne.basset@ensta.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/fs.h> #include <linux/init.h> #include <net/sock.h> #include <linux/un.h> #include <net/af_unix.h> #include <linux/audit.h> #include <linux/ipv6.h> #include <linux/ip.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/dccp.h> #include <linux/sctp.h> #include <linux/lsm_audit.h> /** * ipv4_skb_to_auditdata : fill auditdata from skb * @skb : the skb * @ad : the audit data to fill * @proto : the layer 4 protocol * * return 0 on success */ int ipv4_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto) { int ret = 0; struct iphdr *ih; ih = ip_hdr(skb); if (ih == NULL) return -EINVAL; ad->u.net.v4info.saddr = ih->saddr; ad->u.net.v4info.daddr = ih->daddr; if (proto) *proto = ih->protocol; /* non initial fragment */ if (ntohs(ih->frag_off) & IP_OFFSET) return 0; switch (ih->protocol) { case IPPROTO_TCP: { struct tcphdr *th = tcp_hdr(skb); if (th == NULL) break; ad->u.net.sport = th->source; ad->u.net.dport = th->dest; break; } case IPPROTO_UDP: { struct udphdr *uh = udp_hdr(skb); if (uh == NULL) break; ad->u.net.sport = uh->source; ad->u.net.dport = uh->dest; break; } case IPPROTO_DCCP: { struct dccp_hdr *dh = dccp_hdr(skb); if (dh == NULL) break; ad->u.net.sport = dh->dccph_sport; ad->u.net.dport = dh->dccph_dport; break; } case IPPROTO_SCTP: { struct sctphdr *sh = sctp_hdr(skb); if (sh == NULL) break; ad->u.net.sport = sh->source; ad->u.net.dport = sh->dest; break; } default: ret = -EINVAL; } return ret; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * ipv6_skb_to_auditdata : fill auditdata from skb * @skb : the skb * @ad : the audit data to fill * @proto : the layer 4 protocol * * return 0 on success */ int ipv6_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto) { int offset, ret = 0; struct ipv6hdr *ip6; u8 nexthdr; ip6 = ipv6_hdr(skb); if (ip6 == NULL) return -EINVAL; ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr); ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr); ret = 0; /* IPv6 can have several extension header before the Transport header * skip them */ offset = skb_network_offset(skb); offset += sizeof(*ip6); nexthdr = ip6->nexthdr; offset = ipv6_skip_exthdr(skb, offset, &nexthdr); if (offset < 0) return 0; if (proto) *proto = nexthdr; switch (nexthdr) { case IPPROTO_TCP: { struct tcphdr _tcph, *th; th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); if (th == NULL) break; ad->u.net.sport = th->source; ad->u.net.dport = th->dest; break; } case IPPROTO_UDP: { struct udphdr _udph, *uh; uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); if (uh == NULL) break; ad->u.net.sport = uh->source; ad->u.net.dport = uh->dest; break; } case IPPROTO_DCCP: { struct dccp_hdr _dccph, *dh; dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph); if (dh == NULL) break; ad->u.net.sport = dh->dccph_sport; ad->u.net.dport = dh->dccph_dport; break; } case IPPROTO_SCTP: { struct sctphdr _sctph, *sh; sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); if (sh == NULL) break; ad->u.net.sport = sh->source; ad->u.net.dport = sh->dest; break; } default: ret = -EINVAL; } return ret; } #endif static inline void print_ipv6_addr(struct audit_buffer *ab, struct in6_addr *addr, __be16 port, char *name1, char *name2) { if (!ipv6_addr_any(addr)) audit_log_format(ab, " %s=%pI6c", name1, addr); if (port) audit_log_format(ab, " %s=%d", name2, ntohs(port)); } static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr, __be16 port, char *name1, char *name2) { if (addr) audit_log_format(ab, " %s=%pI4", name1, &addr); if (port) audit_log_format(ab, " %s=%d", name2, ntohs(port)); } /** * dump_common_audit_data - helper to dump common audit data * @a : common audit data * */ static void dump_common_audit_data(struct audit_buffer *ab, struct common_audit_data *a) { struct inode *inode = NULL; struct task_struct *tsk = current; if (a->tsk) tsk = a->tsk; if (tsk && tsk->pid) { audit_log_format(ab, " pid=%d comm=", tsk->pid); audit_log_untrustedstring(ab, tsk->comm); } switch (a->type) { case LSM_AUDIT_DATA_NONE: return; case LSM_AUDIT_DATA_IPC: audit_log_format(ab, " key=%d ", a->u.ipc_id); break; case LSM_AUDIT_DATA_CAP: audit_log_format(ab, " capability=%d ", a->u.cap); break; case LSM_AUDIT_DATA_FS: if (a->u.fs.path.dentry) { struct dentry *dentry = a->u.fs.path.dentry; if (a->u.fs.path.mnt) { audit_log_d_path(ab, "path=", &a->u.fs.path); } else { audit_log_format(ab, " name="); audit_log_untrustedstring(ab, dentry->d_name.name); } inode = dentry->d_inode; } else if (a->u.fs.inode) { struct dentry *dentry; inode = a->u.fs.inode; dentry = d_find_alias(inode); if (dentry) { audit_log_format(ab, " name="); audit_log_untrustedstring(ab, dentry->d_name.name); dput(dentry); } } if (inode) audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id, inode->i_ino); break; case LSM_AUDIT_DATA_TASK: tsk = a->u.tsk; if (tsk && tsk->pid) { audit_log_format(ab, " pid=%d comm=", tsk->pid); audit_log_untrustedstring(ab, tsk->comm); } break; case LSM_AUDIT_DATA_NET: if (a->u.net.sk) { struct sock *sk = a->u.net.sk; struct unix_sock *u; int len = 0; char *p = NULL; switch (sk->sk_family) { case AF_INET: { struct inet_sock *inet = inet_sk(sk); print_ipv4_addr(ab, inet->inet_rcv_saddr, inet->inet_sport, "laddr", "lport"); print_ipv4_addr(ab, inet->inet_daddr, inet->inet_dport, "faddr", "fport"); break; } case AF_INET6: { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *inet6 = inet6_sk(sk); print_ipv6_addr(ab, &inet6->rcv_saddr, inet->inet_sport, "laddr", "lport"); print_ipv6_addr(ab, &inet6->daddr, inet->inet_dport, "faddr", "fport"); break; } case AF_UNIX: u = unix_sk(sk); if (u->dentry) { struct path path = { .dentry = u->dentry, .mnt = u->mnt }; audit_log_d_path(ab, "path=", &path); break; } if (!u->addr) break; len = u->addr->len-sizeof(short); p = &u->addr->name->sun_path[0]; audit_log_format(ab, " path="); if (*p) audit_log_untrustedstring(ab, p); else audit_log_n_hex(ab, p, len); break; } } switch (a->u.net.family) { case AF_INET: print_ipv4_addr(ab, a->u.net.v4info.saddr, a->u.net.sport, "saddr", "src"); print_ipv4_addr(ab, a->u.net.v4info.daddr, a->u.net.dport, "daddr", "dest"); break; case AF_INET6: print_ipv6_addr(ab, &a->u.net.v6info.saddr, a->u.net.sport, "saddr", "src"); print_ipv6_addr(ab, &a->u.net.v6info.daddr, a->u.net.dport, "daddr", "dest"); break; } if (a->u.net.netif > 0) { struct net_device *dev; /* NOTE: we always use init's namespace */ dev = dev_get_by_index(&init_net, a->u.net.netif); if (dev) { audit_log_format(ab, " netif=%s", dev->name); dev_put(dev); } } break; #ifdef CONFIG_KEYS case LSM_AUDIT_DATA_KEY: audit_log_format(ab, " key_serial=%u", a->u.key_struct.key); if (a->u.key_struct.key_desc) { audit_log_format(ab, " key_desc="); audit_log_untrustedstring(ab, a->u.key_struct.key_desc); } break; #endif case LSM_AUDIT_DATA_KMOD: audit_log_format(ab, " kmod="); audit_log_untrustedstring(ab, a->u.kmod_name); break; } /* switch (a->type) */ } /** * common_lsm_audit - generic LSM auditing function * @a: auxiliary audit data * * setup the audit buffer for common security information * uses callback to print LSM specific information */ void common_lsm_audit(struct common_audit_data *a) { struct audit_buffer *ab; if (a == NULL) return; /* we use GFP_ATOMIC so we won't sleep */ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC); if (ab == NULL) return; if (a->lsm_pre_audit) a->lsm_pre_audit(ab, a); dump_common_audit_data(ab, a); if (a->lsm_post_audit) a->lsm_post_audit(ab, a); audit_log_end(ab); }
gpl-2.0
halfline/linux
drivers/net/wireless/ath/ath9k/ar9003_rtt.c
1972
7186
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "hw-ops.h" #include "ar9003_phy.h" #include "ar9003_rtt.h" #define RTT_RESTORE_TIMEOUT 1000 #define RTT_ACCESS_TIMEOUT 100 #define RTT_BAD_VALUE 0x0bad0bad /* * RTT (Radio Retention Table) hardware implementation information * * There is an internal table (i.e. the rtt) for each chain (or bank). * Each table contains 6 entries and each entry is corresponding to * a specific calibration parameter as depicted below. * 0~2 - DC offset DAC calibration: loop, low, high (offsetI/Q_...) * 3 - Filter cal (filterfc) * 4 - RX gain settings * 5 - Peak detector offset calibration (agc_caldac) */ void ar9003_hw_rtt_enable(struct ath_hw *ah) { REG_WRITE(ah, AR_PHY_RTT_CTRL, 1); } void ar9003_hw_rtt_disable(struct ath_hw *ah) { REG_WRITE(ah, AR_PHY_RTT_CTRL, 0); } void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask) { REG_RMW_FIELD(ah, AR_PHY_RTT_CTRL, AR_PHY_RTT_CTRL_RESTORE_MASK, rtt_mask); } bool ar9003_hw_rtt_force_restore(struct ath_hw *ah) { if (!ath9k_hw_wait(ah, AR_PHY_RTT_CTRL, AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE, 0, RTT_RESTORE_TIMEOUT)) return false; REG_RMW_FIELD(ah, AR_PHY_RTT_CTRL, AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE, 1); if (!ath9k_hw_wait(ah, AR_PHY_RTT_CTRL, AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE, 0, RTT_RESTORE_TIMEOUT)) return false; return true; } static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain, u32 index, u32 data28) { u32 val; val = SM(data28, AR_PHY_RTT_SW_RTT_TABLE_DATA); REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain), val); val = SM(0, AR_PHY_RTT_SW_RTT_TABLE_ACCESS) | SM(1, AR_PHY_RTT_SW_RTT_TABLE_WRITE) | SM(index, AR_PHY_RTT_SW_RTT_TABLE_ADDR); REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val); udelay(1); val |= SM(1, AR_PHY_RTT_SW_RTT_TABLE_ACCESS); REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val); udelay(1); if (!ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0, RTT_ACCESS_TIMEOUT)) return; val &= ~SM(1, AR_PHY_RTT_SW_RTT_TABLE_WRITE); REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val); udelay(1); ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0, RTT_ACCESS_TIMEOUT); } void ar9003_hw_rtt_load_hist(struct ath_hw *ah) { int chain, i; for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { if (!(ah->caps.rx_chainmask & (1 << chain))) continue; for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { ar9003_hw_rtt_load_hist_entry(ah, chain, i, ah->caldata->rtt_table[chain][i]); ath_dbg(ath9k_hw_common(ah), CALIBRATE, "Load RTT value at idx %d, chain %d: 0x%x\n", i, chain, ah->caldata->rtt_table[chain][i]); } } } static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain) { int agc, caldac; if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) return; if ((index != 5) || (chain >= 2)) return; agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE); if (!agc) return; caldac = ah->caldata->caldac[chain]; ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF; caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7); ah->caldata->rtt_table[chain][index] |= (caldac << 4); } static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index) { u32 val; val = SM(0, AR_PHY_RTT_SW_RTT_TABLE_ACCESS) | SM(0, AR_PHY_RTT_SW_RTT_TABLE_WRITE) | SM(index, AR_PHY_RTT_SW_RTT_TABLE_ADDR); REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val); udelay(1); val |= SM(1, AR_PHY_RTT_SW_RTT_TABLE_ACCESS); REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val); udelay(1); if (!ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0, RTT_ACCESS_TIMEOUT)) return RTT_BAD_VALUE; val = MS(REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain)), AR_PHY_RTT_SW_RTT_TABLE_DATA); return val; } void ar9003_hw_rtt_fill_hist(struct ath_hw *ah) { int chain, i; for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { if (!(ah->caps.rx_chainmask & (1 << chain))) continue; for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { ah->caldata->rtt_table[chain][i] = ar9003_hw_rtt_fill_hist_entry(ah, chain, i); ar9003_hw_patch_rtt(ah, i, chain); ath_dbg(ath9k_hw_common(ah), CALIBRATE, "RTT value at idx %d, chain %d is: 0x%x\n", i, chain, ah->caldata->rtt_table[chain][i]); } } set_bit(RTT_DONE, &ah->caldata->cal_flags); } void ar9003_hw_rtt_clear_hist(struct ath_hw *ah) { int chain, i; for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { if (!(ah->caps.rx_chainmask & (1 << chain))) continue; for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0); } if (ah->caldata) clear_bit(RTT_DONE, &ah->caldata->cal_flags); } bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan) { bool restore; if (!ah->caldata) return false; if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) { if (IS_CHAN_2GHZ(chan)){ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, ah->caldata->caldac[0]); REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, ah->caldata->caldac[1]); } else { REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0), AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, ah->caldata->caldac[0]); REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1), AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, ah->caldata->caldac[1]); } REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1), AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1); REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0), AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1); } if (!test_bit(RTT_DONE, &ah->caldata->cal_flags)) return false; ar9003_hw_rtt_enable(ah); if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) ar9003_hw_rtt_set_mask(ah, 0x30); else ar9003_hw_rtt_set_mask(ah, 0x10); if (!ath9k_hw_rfbus_req(ah)) { ath_err(ath9k_hw_common(ah), "Could not stop baseband\n"); restore = false; goto fail; } ar9003_hw_rtt_load_hist(ah); restore = ar9003_hw_rtt_force_restore(ah); fail: ath9k_hw_rfbus_done(ah); ar9003_hw_rtt_disable(ah); return restore; }
gpl-2.0
lirokoa/android_kernel_samsung_n80xx
drivers/char/ttyprintk.c
1972
5337
/* * linux/drivers/char/ttyprintk.c * * Copyright (C) 2010 Samo Pogacnik * * This program is free software; you can redistribute it and/or modify * it under the smems of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ /* * This pseudo device allows user to make printk messages. It is possible * to store "console" messages inline with kernel messages for better analyses * of the boot process, for example. */ #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> struct ttyprintk_port { struct tty_port port; struct mutex port_write_mutex; }; static struct ttyprintk_port tpk_port; /* * Our simple preformatting supports transparent output of (time-stamped) * printk messages (also suitable for logging service): * - any cr is replaced by nl * - adds a ttyprintk source tag in front of each line * - too long message is fragmeted, with '\'nl between fragments * - TPK_STR_SIZE isn't really the write_room limiting factor, bcause * it is emptied on the fly during preformatting. */ #define TPK_STR_SIZE 508 /* should be bigger then max expected line length */ #define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */ static const char *tpk_tag = "[U] "; /* U for User */ static int tpk_curr; static int tpk_printk(const unsigned char *buf, int count) { static char tmp[TPK_STR_SIZE + 4]; int i = tpk_curr; if (buf == NULL) { /* flush tmp[] */ if (tpk_curr > 0) { /* non nl or cr terminated message - add nl */ tmp[tpk_curr + 0] = '\n'; tmp[tpk_curr + 1] = '\0'; printk(KERN_INFO "%s%s", tpk_tag, tmp); tpk_curr = 0; } return i; } for (i = 0; i < count; i++) { tmp[tpk_curr] = buf[i]; if (tpk_curr < TPK_STR_SIZE) { switch (buf[i]) { case '\r': /* replace cr with nl */ tmp[tpk_curr + 0] = '\n'; tmp[tpk_curr + 1] = '\0'; printk(KERN_INFO "%s%s", tpk_tag, tmp); tpk_curr = 0; if (buf[i + 1] == '\n') i++; break; case '\n': tmp[tpk_curr + 1] = '\0'; printk(KERN_INFO "%s%s", tpk_tag, tmp); tpk_curr = 0; break; default: tpk_curr++; } } else { /* end of tmp buffer reached: cut the message in two */ tmp[tpk_curr + 1] = '\\'; tmp[tpk_curr + 2] = '\n'; tmp[tpk_curr + 3] = '\0'; printk(KERN_INFO "%s%s", tpk_tag, tmp); tpk_curr = 0; } } return count; } /* * TTY operations open function. */ static int tpk_open(struct tty_struct *tty, struct file *filp) { tty->driver_data = &tpk_port; return tty_port_open(&tpk_port.port, tty, filp); } /* * TTY operations close function. */ static void tpk_close(struct tty_struct *tty, struct file *filp) { struct ttyprintk_port *tpkp = tty->driver_data; mutex_lock(&tpkp->port_write_mutex); /* flush tpk_printk buffer */ tpk_printk(NULL, 0); mutex_unlock(&tpkp->port_write_mutex); tty_port_close(&tpkp->port, tty, filp); } /* * TTY operations write function. */ static int tpk_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct ttyprintk_port *tpkp = tty->driver_data; int ret; /* exclusive use of tpk_printk within this tty */ mutex_lock(&tpkp->port_write_mutex); ret = tpk_printk(buf, count); mutex_unlock(&tpkp->port_write_mutex); return ret; } /* * TTY operations write_room function. */ static int tpk_write_room(struct tty_struct *tty) { return TPK_MAX_ROOM; } /* * TTY operations ioctl function. */ static int tpk_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct ttyprintk_port *tpkp = tty->driver_data; if (!tpkp) return -EINVAL; switch (cmd) { /* Stop TIOCCONS */ case TIOCCONS: return -EOPNOTSUPP; default: return -ENOIOCTLCMD; } return 0; } static const struct tty_operations ttyprintk_ops = { .open = tpk_open, .close = tpk_close, .write = tpk_write, .write_room = tpk_write_room, .ioctl = tpk_ioctl, }; struct tty_port_operations null_ops = { }; static struct tty_driver *ttyprintk_driver; static int __init ttyprintk_init(void) { int ret = -ENOMEM; void *rp; ttyprintk_driver = alloc_tty_driver(1); if (!ttyprintk_driver) return ret; ttyprintk_driver->owner = THIS_MODULE; ttyprintk_driver->driver_name = "ttyprintk"; ttyprintk_driver->name = "ttyprintk"; ttyprintk_driver->major = TTYAUX_MAJOR; ttyprintk_driver->minor_start = 3; ttyprintk_driver->num = 1; ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE; ttyprintk_driver->init_termios = tty_std_termios; ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET; ttyprintk_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_set_operations(ttyprintk_driver, &ttyprintk_ops); ret = tty_register_driver(ttyprintk_driver); if (ret < 0) { printk(KERN_ERR "Couldn't register ttyprintk driver\n"); goto error; } /* create our unnumbered device */ rp = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 3), NULL, ttyprintk_driver->name); if (IS_ERR(rp)) { printk(KERN_ERR "Couldn't create ttyprintk device\n"); ret = PTR_ERR(rp); goto error; } tty_port_init(&tpk_port.port); tpk_port.port.ops = &null_ops; mutex_init(&tpk_port.port_write_mutex); return 0; error: put_tty_driver(ttyprintk_driver); ttyprintk_driver = NULL; return ret; } module_init(ttyprintk_init);
gpl-2.0
manishj-patel/netbook_kernel_3.4.5_plus
drivers/edac/amd64_edac.c
3252
74456
#include "amd64_edac.h" #include <asm/amd_nb.h> static struct edac_pci_ctl_info *amd64_ctl_pci; static int report_gart_errors; module_param(report_gart_errors, int, 0644); /* * Set by command line parameter. If BIOS has enabled the ECC, this override is * cleared to prevent re-enabling the hardware by this driver. */ static int ecc_enable_override; module_param(ecc_enable_override, int, 0644); static struct msr __percpu *msrs; /* * count successfully initialized driver instances for setup_pci_device() */ static atomic_t drv_instances = ATOMIC_INIT(0); /* Per-node driver instances */ static struct mem_ctl_info **mcis; static struct ecc_settings **ecc_stngs; /* * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- * or higher value'. * *FIXME: Produce a better mapping/linearisation. */ struct scrubrate { u32 scrubval; /* bit pattern for scrub rate */ u32 bandwidth; /* bandwidth consumed (bytes/sec) */ } scrubrates[] = { { 0x01, 1600000000UL}, { 0x02, 800000000UL}, { 0x03, 400000000UL}, { 0x04, 200000000UL}, { 0x05, 100000000UL}, { 0x06, 50000000UL}, { 0x07, 25000000UL}, { 0x08, 12284069UL}, { 0x09, 6274509UL}, { 0x0A, 3121951UL}, { 0x0B, 1560975UL}, { 0x0C, 781440UL}, { 0x0D, 390720UL}, { 0x0E, 195300UL}, { 0x0F, 97650UL}, { 0x10, 48854UL}, { 0x11, 24427UL}, { 0x12, 12213UL}, { 0x13, 6101UL}, { 0x14, 3051UL}, { 0x15, 1523UL}, { 0x16, 761UL}, { 0x00, 0UL}, /* scrubbing off */ }; static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, u32 *val, const char *func) { int err = 0; err = pci_read_config_dword(pdev, offset, val); if (err) amd64_warn("%s: error reading F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); return err; } int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, u32 val, const char *func) { int err = 0; err = pci_write_config_dword(pdev, offset, val); if (err) amd64_warn("%s: error writing to F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); return err; } /* * * Depending on the family, F2 DCT reads need special handling: * * K8: has a single DCT only * * F10h: each DCT has its own set of regs * DCT0 -> F2x040.. * DCT1 -> F2x140.. * * F15h: we select which DCT we access using F1x10C[DctCfgSel] * */ static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, const char *func) { if (addr >= 0x100) return -EINVAL; return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); } static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, const char *func) { return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); } /* * Select DCT to which PCI cfg accesses are routed */ static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) { u32 reg = 0; amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg); reg &= 0xfffffffe; reg |= dct; amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); } static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, const char *func) { u8 dct = 0; if (addr >= 0x140 && addr <= 0x1a0) { dct = 1; addr -= 0x100; } f15h_select_dct(pvt, dct); return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); } /* * Memory scrubber control interface. For K8, memory scrubbing is handled by * hardware and can involve L2 cache, dcache as well as the main memory. With * F10, this is extended to L3 cache scrubbing on CPU models sporting that * functionality. * * This causes the "units" for the scrubbing speed to vary from 64 byte blocks * (dram) over to cache lines. This is nasty, so we will use bandwidth in * bytes/sec for the setting. * * Currently, we only do dram scrubbing. If the scrubbing is done in software on * other archs, we might not have access to the caches directly. */ /* * scan the scrub rate mapping table for a close or matching bandwidth value to * issue. If requested is too big, then use last maximum value found. */ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) { u32 scrubval; int i; /* * map the configured rate (new_bw) to a value specific to the AMD64 * memory controller and apply to register. Search for the first * bandwidth entry that is greater or equal than the setting requested * and program that. If at last entry, turn off DRAM scrubbing. */ for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { /* * skip scrub rates which aren't recommended * (see F10 BKDG, F3x58) */ if (scrubrates[i].scrubval < min_rate) continue; if (scrubrates[i].bandwidth <= new_bw) break; /* * if no suitable bandwidth found, turn off DRAM scrubbing * entirely by falling back to the last element in the * scrubrates array. */ } scrubval = scrubrates[i].scrubval; pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); if (scrubval) return scrubrates[i].bandwidth; return 0; } static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) { struct amd64_pvt *pvt = mci->pvt_info; u32 min_scrubrate = 0x5; if (boot_cpu_data.x86 == 0xf) min_scrubrate = 0x0; /* F15h Erratum #505 */ if (boot_cpu_data.x86 == 0x15) f15h_select_dct(pvt, 0); return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); } static int amd64_get_scrub_rate(struct mem_ctl_info *mci) { struct amd64_pvt *pvt = mci->pvt_info; u32 scrubval = 0; int i, retval = -EINVAL; /* F15h Erratum #505 */ if (boot_cpu_data.x86 == 0x15) f15h_select_dct(pvt, 0); amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); scrubval = scrubval & 0x001F; for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { if (scrubrates[i].scrubval == scrubval) { retval = scrubrates[i].bandwidth; break; } } return retval; } /* * returns true if the SysAddr given by sys_addr matches the * DRAM base/limit associated with node_id */ static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, unsigned nid) { u64 addr; /* The K8 treats this as a 40-bit value. However, bits 63-40 will be * all ones if the most significant implemented address bit is 1. * Here we discard bits 63-40. See section 3.4.2 of AMD publication * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 * Application Programming. */ addr = sys_addr & 0x000000ffffffffffull; return ((addr >= get_dram_base(pvt, nid)) && (addr <= get_dram_limit(pvt, nid))); } /* * Attempt to map a SysAddr to a node. On success, return a pointer to the * mem_ctl_info structure for the node that the SysAddr maps to. * * On failure, return NULL. */ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, u64 sys_addr) { struct amd64_pvt *pvt; unsigned node_id; u32 intlv_en, bits; /* * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section * 3.4.4.2) registers to map the SysAddr to a node ID. */ pvt = mci->pvt_info; /* * The value of this field should be the same for all DRAM Base * registers. Therefore we arbitrarily choose to read it from the * register for node 0. */ intlv_en = dram_intlv_en(pvt, 0); if (intlv_en == 0) { for (node_id = 0; node_id < DRAM_RANGES; node_id++) { if (amd64_base_limit_match(pvt, sys_addr, node_id)) goto found; } goto err_no_match; } if (unlikely((intlv_en != 0x01) && (intlv_en != 0x03) && (intlv_en != 0x07))) { amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); return NULL; } bits = (((u32) sys_addr) >> 12) & intlv_en; for (node_id = 0; ; ) { if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) break; /* intlv_sel field matches */ if (++node_id >= DRAM_RANGES) goto err_no_match; } /* sanity test for sys_addr */ if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" "range for node %d with node interleaving enabled.\n", __func__, sys_addr, node_id); return NULL; } found: return edac_mc_find((int)node_id); err_no_match: debugf2("sys_addr 0x%lx doesn't match any node\n", (unsigned long)sys_addr); return NULL; } /* * compute the CS base address of the @csrow on the DRAM controller @dct. * For details see F2x[5C:40] in the processor's BKDG */ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, u64 *base, u64 *mask) { u64 csbase, csmask, base_bits, mask_bits; u8 addr_shift; if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { csbase = pvt->csels[dct].csbases[csrow]; csmask = pvt->csels[dct].csmasks[csrow]; base_bits = GENMASK(21, 31) | GENMASK(9, 15); mask_bits = GENMASK(21, 29) | GENMASK(9, 15); addr_shift = 4; } else { csbase = pvt->csels[dct].csbases[csrow]; csmask = pvt->csels[dct].csmasks[csrow >> 1]; addr_shift = 8; if (boot_cpu_data.x86 == 0x15) base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); else base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); } *base = (csbase & base_bits) << addr_shift; *mask = ~0ULL; /* poke holes for the csmask */ *mask &= ~(mask_bits << addr_shift); /* OR them in */ *mask |= (csmask & mask_bits) << addr_shift; } #define for_each_chip_select(i, dct, pvt) \ for (i = 0; i < pvt->csels[dct].b_cnt; i++) #define chip_select_base(i, dct, pvt) \ pvt->csels[dct].csbases[i] #define for_each_chip_select_mask(i, dct, pvt) \ for (i = 0; i < pvt->csels[dct].m_cnt; i++) /* * @input_addr is an InputAddr associated with the node given by mci. Return the * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). */ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) { struct amd64_pvt *pvt; int csrow; u64 base, mask; pvt = mci->pvt_info; for_each_chip_select(csrow, 0, pvt) { if (!csrow_enabled(csrow, 0, pvt)) continue; get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); mask = ~mask; if ((input_addr & mask) == (base & mask)) { debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", (unsigned long)input_addr, csrow, pvt->mc_node_id); return csrow; } } debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", (unsigned long)input_addr, pvt->mc_node_id); return -1; } /* * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) * for the node represented by mci. Info is passed back in *hole_base, * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if * info is invalid. Info may be invalid for either of the following reasons: * * - The revision of the node is not E or greater. In this case, the DRAM Hole * Address Register does not exist. * * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, * indicating that its contents are not valid. * * The values passed back in *hole_base, *hole_offset, and *hole_size are * complete 32-bit values despite the fact that the bitfields in the DHAR * only represent bits 31-24 of the base and offset values. */ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, u64 *hole_offset, u64 *hole_size) { struct amd64_pvt *pvt = mci->pvt_info; u64 base; /* only revE and later have the DRAM Hole Address Register */ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { debugf1(" revision %d for node %d does not support DHAR\n", pvt->ext_model, pvt->mc_node_id); return 1; } /* valid for Fam10h and above */ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); return 1; } if (!dhar_valid(pvt)) { debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", pvt->mc_node_id); return 1; } /* This node has Memory Hoisting */ /* +------------------+--------------------+--------------------+----- * | memory | DRAM hole | relocated | * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | * | | | DRAM hole | * | | | [0x100000000, | * | | | (0x100000000+ | * | | | (0xffffffff-x))] | * +------------------+--------------------+--------------------+----- * * Above is a diagram of physical memory showing the DRAM hole and the * relocated addresses from the DRAM hole. As shown, the DRAM hole * starts at address x (the base address) and extends through address * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the * addresses in the hole so that they start at 0x100000000. */ base = dhar_base(pvt); *hole_base = base; *hole_size = (0x1ull << 32) - base; if (boot_cpu_data.x86 > 0xf) *hole_offset = f10_dhar_offset(pvt); else *hole_offset = k8_dhar_offset(pvt); debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", pvt->mc_node_id, (unsigned long)*hole_base, (unsigned long)*hole_offset, (unsigned long)*hole_size); return 0; } EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); /* * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is * assumed that sys_addr maps to the node given by mci. * * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, * then it is also involved in translating a SysAddr to a DramAddr. Sections * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. * These parts of the documentation are unclear. I interpret them as follows: * * When node n receives a SysAddr, it processes the SysAddr as follows: * * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM * Limit registers for node n. If the SysAddr is not within the range * specified by the base and limit values, then node n ignores the Sysaddr * (since it does not map to node n). Otherwise continue to step 2 below. * * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is * disabled so skip to step 3 below. Otherwise see if the SysAddr is within * the range of relocated addresses (starting at 0x100000000) from the DRAM * hole. If not, skip to step 3 below. Else get the value of the * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the * offset defined by this value from the SysAddr. * * 3. Obtain the base address for node n from the DRAMBase field of the DRAM * Base register for node n. To obtain the DramAddr, subtract the base * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). */ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) { struct amd64_pvt *pvt = mci->pvt_info; u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; int ret = 0; dram_base = get_dram_base(pvt, pvt->mc_node_id); ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size); if (!ret) { if ((sys_addr >= (1ull << 32)) && (sys_addr < ((1ull << 32) + hole_size))) { /* use DHAR to translate SysAddr to DramAddr */ dram_addr = sys_addr - hole_offset; debugf2("using DHAR to translate SysAddr 0x%lx to " "DramAddr 0x%lx\n", (unsigned long)sys_addr, (unsigned long)dram_addr); return dram_addr; } } /* * Translate the SysAddr to a DramAddr as shown near the start of * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 * only deals with 40-bit values. Therefore we discard bits 63-40 of * sys_addr below. If bit 39 of sys_addr is 1 then the bits we * discard are all 1s. Otherwise the bits we discard are all 0s. See * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture * Programmer's Manual Volume 1 Application Programming. */ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; debugf2("using DRAM Base register to translate SysAddr 0x%lx to " "DramAddr 0x%lx\n", (unsigned long)sys_addr, (unsigned long)dram_addr); return dram_addr; } /* * @intlv_en is the value of the IntlvEn field from a DRAM Base register * (section 3.4.4.1). Return the number of bits from a SysAddr that are used * for node interleaving. */ static int num_node_interleave_bits(unsigned intlv_en) { static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; int n; BUG_ON(intlv_en > 7); n = intlv_shift_table[intlv_en]; return n; } /* Translate the DramAddr given by @dram_addr to an InputAddr. */ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) { struct amd64_pvt *pvt; int intlv_shift; u64 input_addr; pvt = mci->pvt_info; /* * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) * concerning translating a DramAddr to an InputAddr. */ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + (dram_addr & 0xfff); debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", intlv_shift, (unsigned long)dram_addr, (unsigned long)input_addr); return input_addr; } /* * Translate the SysAddr represented by @sys_addr to an InputAddr. It is * assumed that @sys_addr maps to the node given by mci. */ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) { u64 input_addr; input_addr = dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", (unsigned long)sys_addr, (unsigned long)input_addr); return input_addr; } /* * @input_addr is an InputAddr associated with the node represented by mci. * Translate @input_addr to a DramAddr and return the result. */ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) { struct amd64_pvt *pvt; unsigned node_id, intlv_shift; u64 bits, dram_addr; u32 intlv_sel; /* * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) * shows how to translate a DramAddr to an InputAddr. Here we reverse * this procedure. When translating from a DramAddr to an InputAddr, the * bits used for node interleaving are discarded. Here we recover these * bits from the IntlvSel field of the DRAM Limit register (section * 3.4.4.2) for the node that input_addr is associated with. */ pvt = mci->pvt_info; node_id = pvt->mc_node_id; BUG_ON(node_id > 7); intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); if (intlv_shift == 0) { debugf1(" InputAddr 0x%lx translates to DramAddr of " "same value\n", (unsigned long)input_addr); return input_addr; } bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) + (input_addr & 0xfff); intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); dram_addr = bits + (intlv_sel << 12); debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " "(%d node interleave bits)\n", (unsigned long)input_addr, (unsigned long)dram_addr, intlv_shift); return dram_addr; } /* * @dram_addr is a DramAddr that maps to the node represented by mci. Convert * @dram_addr to a SysAddr. */ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) { struct amd64_pvt *pvt = mci->pvt_info; u64 hole_base, hole_offset, hole_size, base, sys_addr; int ret = 0; ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size); if (!ret) { if ((dram_addr >= hole_base) && (dram_addr < (hole_base + hole_size))) { sys_addr = dram_addr + hole_offset; debugf1("using DHAR to translate DramAddr 0x%lx to " "SysAddr 0x%lx\n", (unsigned long)dram_addr, (unsigned long)sys_addr); return sys_addr; } } base = get_dram_base(pvt, pvt->mc_node_id); sys_addr = dram_addr + base; /* * The sys_addr we have computed up to this point is a 40-bit value * because the k8 deals with 40-bit values. However, the value we are * supposed to return is a full 64-bit physical address. The AMD * x86-64 architecture specifies that the most significant implemented * address bit through bit 63 of a physical address must be either all * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a * 64-bit value below. See section 3.4.2 of AMD publication 24592: * AMD x86-64 Architecture Programmer's Manual Volume 1 Application * Programming. */ sys_addr |= ~((sys_addr & (1ull << 39)) - 1); debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", pvt->mc_node_id, (unsigned long)dram_addr, (unsigned long)sys_addr); return sys_addr; } /* * @input_addr is an InputAddr associated with the node given by mci. Translate * @input_addr to a SysAddr. */ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, u64 input_addr) { return dram_addr_to_sys_addr(mci, input_addr_to_dram_addr(mci, input_addr)); } /* * Find the minimum and maximum InputAddr values that map to the given @csrow. * Pass back these values in *input_addr_min and *input_addr_max. */ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, u64 *input_addr_min, u64 *input_addr_max) { struct amd64_pvt *pvt; u64 base, mask; pvt = mci->pvt_info; BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt)); get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); *input_addr_min = base & ~mask; *input_addr_max = base | mask; } /* Map the Error address to a PAGE and PAGE OFFSET. */ static inline void error_address_to_page_and_offset(u64 error_address, u32 *page, u32 *offset) { *page = (u32) (error_address >> PAGE_SHIFT); *offset = ((u32) error_address) & ~PAGE_MASK; } /* * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers * of a node that detected an ECC memory error. mci represents the node that * the error address maps to (possibly different from the node that detected * the error). Return the number of the csrow that sys_addr maps to, or -1 on * error. */ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) { int csrow; csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); if (csrow == -1) amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " "address 0x%lx\n", (unsigned long)sys_addr); return csrow; } static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); /* * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs * are ECC capable. */ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) { u8 bit; unsigned long edac_cap = EDAC_FLAG_NONE; bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) ? 19 : 17; if (pvt->dclr0 & BIT(bit)) edac_cap = EDAC_FLAG_SECDED; return edac_cap; } static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); static void amd64_dump_dramcfg_low(u32 dclr, int chan) { debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", (dclr & BIT(16)) ? "un" : "", (dclr & BIT(19)) ? "yes" : "no"); debugf1(" PAR/ERR parity: %s\n", (dclr & BIT(8)) ? "enabled" : "disabled"); if (boot_cpu_data.x86 == 0x10) debugf1(" DCT 128bit mode width: %s\n", (dclr & BIT(11)) ? "128b" : "64b"); debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", (dclr & BIT(12)) ? "yes" : "no", (dclr & BIT(13)) ? "yes" : "no", (dclr & BIT(14)) ? "yes" : "no", (dclr & BIT(15)) ? "yes" : "no"); } /* Display and decode various NB registers for debug purposes. */ static void dump_misc_regs(struct amd64_pvt *pvt) { debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); debugf1(" NB two channel DRAM capable: %s\n", (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); amd64_dump_dramcfg_low(pvt->dclr0, 0); debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " "offset: 0x%08x\n", pvt->dhar, dhar_base(pvt), (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) : f10_dhar_offset(pvt)); debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); amd64_debug_display_dimm_sizes(pvt, 0); /* everything below this point is Fam10h and above */ if (boot_cpu_data.x86 == 0xf) return; amd64_debug_display_dimm_sizes(pvt, 1); amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); /* Only if NOT ganged does dclr1 have valid info */ if (!dct_ganging_enabled(pvt)) amd64_dump_dramcfg_low(pvt->dclr1, 1); } /* * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] */ static void prep_chip_selects(struct amd64_pvt *pvt) { if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; } else { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; } } /* * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers */ static void read_dct_base_mask(struct amd64_pvt *pvt) { int cs; prep_chip_selects(pvt); for_each_chip_select(cs, 0, pvt) { int reg0 = DCSB0 + (cs * 4); int reg1 = DCSB1 + (cs * 4); u32 *base0 = &pvt->csels[0].csbases[cs]; u32 *base1 = &pvt->csels[1].csbases[cs]; if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", cs, *base0, reg0); if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) continue; if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", cs, *base1, reg1); } for_each_chip_select_mask(cs, 0, pvt) { int reg0 = DCSM0 + (cs * 4); int reg1 = DCSM1 + (cs * 4); u32 *mask0 = &pvt->csels[0].csmasks[cs]; u32 *mask1 = &pvt->csels[1].csmasks[cs]; if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", cs, *mask0, reg0); if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) continue; if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", cs, *mask1, reg1); } } static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) { enum mem_type type; /* F15h supports only DDR3 */ if (boot_cpu_data.x86 >= 0x15) type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { if (pvt->dchr0 & DDR3_MODE) type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; else type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; } else { type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; } amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); return type; } /* Get the number of DCT channels the memory controller is using. */ static int k8_early_channel_count(struct amd64_pvt *pvt) { int flag; if (pvt->ext_model >= K8_REV_F) /* RevF (NPT) and later */ flag = pvt->dclr0 & WIDTH_128; else /* RevE and earlier */ flag = pvt->dclr0 & REVE_WIDTH_128; /* not used */ pvt->dclr1 = 0; return (flag) ? 2 : 1; } /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ static u64 get_error_address(struct mce *m) { struct cpuinfo_x86 *c = &boot_cpu_data; u64 addr; u8 start_bit = 1; u8 end_bit = 47; if (c->x86 == 0xf) { start_bit = 3; end_bit = 39; } addr = m->addr & GENMASK(start_bit, end_bit); /* * Erratum 637 workaround */ if (c->x86 == 0x15) { struct amd64_pvt *pvt; u64 cc6_base, tmp_addr; u32 tmp; u8 mce_nid, intlv_en; if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) return addr; mce_nid = amd_get_nb_id(m->extcpu); pvt = mcis[mce_nid]->pvt_info; amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); intlv_en = tmp >> 21 & 0x7; /* add [47:27] + 3 trailing bits */ cc6_base = (tmp & GENMASK(0, 20)) << 3; /* reverse and add DramIntlvEn */ cc6_base |= intlv_en ^ 0x7; /* pin at [47:24] */ cc6_base <<= 24; if (!intlv_en) return cc6_base | (addr & GENMASK(0, 23)); amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); /* faster log2 */ tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1); /* OR DramIntlvSel into bits [14:12] */ tmp_addr |= (tmp & GENMASK(21, 23)) >> 9; /* add remaining [11:0] bits from original MC4_ADDR */ tmp_addr |= addr & GENMASK(0, 11); return cc6_base | tmp_addr; } return addr; } static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) { struct cpuinfo_x86 *c = &boot_cpu_data; int off = range << 3; amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); if (c->x86 == 0xf) return; if (!dram_rw(pvt, range)) return; amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); /* Factor in CC6 save area by reading dst node's limit reg */ if (c->x86 == 0x15) { struct pci_dev *f1 = NULL; u8 nid = dram_dst_node(pvt, range); u32 llim; f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); if (WARN_ON(!f1)) return; amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); pvt->ranges[range].lim.lo &= GENMASK(0, 15); /* {[39:27],111b} */ pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; pvt->ranges[range].lim.hi &= GENMASK(0, 7); /* [47:40] */ pvt->ranges[range].lim.hi |= llim >> 13; pci_dev_put(f1); } } static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, u16 syndrome) { struct mem_ctl_info *src_mci; struct amd64_pvt *pvt = mci->pvt_info; int channel, csrow; u32 page, offset; /* CHIPKILL enabled */ if (pvt->nbcfg & NBCFG_CHIPKILL) { channel = get_channel_from_ecc_syndrome(mci, syndrome); if (channel < 0) { /* * Syndrome didn't map, so we don't know which of the * 2 DIMMs is in error. So we need to ID 'both' of them * as suspect. */ amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " "error reporting race\n", syndrome); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); return; } } else { /* * non-chipkill ecc mode * * The k8 documentation is unclear about how to determine the * channel number when using non-chipkill memory. This method * was obtained from email communication with someone at AMD. * (Wish the email was placed in this comment - norsk) */ channel = ((sys_addr & BIT(3)) != 0); } /* * Find out which node the error address belongs to. This may be * different from the node that detected the error. */ src_mci = find_mc_by_sys_addr(mci, sys_addr); if (!src_mci) { amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", (unsigned long)sys_addr); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); return; } /* Now map the sys_addr to a CSROW */ csrow = sys_addr_to_csrow(src_mci, sys_addr); if (csrow < 0) { edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); } else { error_address_to_page_and_offset(sys_addr, &page, &offset); edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, channel, EDAC_MOD_STR); } } static int ddr2_cs_size(unsigned i, bool dct_width) { unsigned shift = 0; if (i <= 2) shift = i; else if (!(i & 0x1)) shift = i >> 1; else shift = (i + 1) >> 1; return 128 << (shift + !!dct_width); } static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, unsigned cs_mode) { u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; if (pvt->ext_model >= K8_REV_F) { WARN_ON(cs_mode > 11); return ddr2_cs_size(cs_mode, dclr & WIDTH_128); } else if (pvt->ext_model >= K8_REV_D) { unsigned diff; WARN_ON(cs_mode > 10); /* * the below calculation, besides trying to win an obfuscated C * contest, maps cs_mode values to DIMM chip select sizes. The * mappings are: * * cs_mode CS size (mb) * ======= ============ * 0 32 * 1 64 * 2 128 * 3 128 * 4 256 * 5 512 * 6 256 * 7 512 * 8 1024 * 9 1024 * 10 2048 * * Basically, it calculates a value with which to shift the * smallest CS size of 32MB. * * ddr[23]_cs_size have a similar purpose. */ diff = cs_mode/3 + (unsigned)(cs_mode > 5); return 32 << (cs_mode - diff); } else { WARN_ON(cs_mode > 6); return 32 << cs_mode; } } /* * Get the number of DCT channels in use. * * Return: * number of Memory Channels in operation * Pass back: * contents of the DCL0_LOW register */ static int f1x_early_channel_count(struct amd64_pvt *pvt) { int i, j, channels = 0; /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) return 2; /* * Need to check if in unganged mode: In such, there are 2 channels, * but they are not in 128 bit mode and thus the above 'dclr0' status * bit will be OFF. * * Need to check DCT0[0] and DCT1[0] to see if only one of them has * their CSEnable bit on. If so, then SINGLE DIMM case. */ debugf0("Data width is not 128 bits - need more decoding\n"); /* * Check DRAM Bank Address Mapping values for each DIMM to see if there * is more than just one DIMM present in unganged mode. Need to check * both controllers since DIMMs can be placed in either one. */ for (i = 0; i < 2; i++) { u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); for (j = 0; j < 4; j++) { if (DBAM_DIMM(j, dbam) > 0) { channels++; break; } } } if (channels > 2) channels = 2; amd64_info("MCT channel count: %d\n", channels); return channels; } static int ddr3_cs_size(unsigned i, bool dct_width) { unsigned shift = 0; int cs_size = 0; if (i == 0 || i == 3 || i == 4) cs_size = -1; else if (i <= 2) shift = i; else if (i == 12) shift = 7; else if (!(i & 0x1)) shift = i >> 1; else shift = (i + 1) >> 1; if (cs_size != -1) cs_size = (128 * (1 << !!dct_width)) << shift; return cs_size; } static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, unsigned cs_mode) { u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; WARN_ON(cs_mode > 11); if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) return ddr3_cs_size(cs_mode, dclr & WIDTH_128); else return ddr2_cs_size(cs_mode, dclr & WIDTH_128); } /* * F15h supports only 64bit DCT interfaces */ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, unsigned cs_mode) { WARN_ON(cs_mode > 12); return ddr3_cs_size(cs_mode, false); } static void read_dram_ctl_register(struct amd64_pvt *pvt) { if (boot_cpu_data.x86 == 0xf) return; if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); debugf0(" DCTs operate in %s mode.\n", (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); if (!dct_ganging_enabled(pvt)) debugf0(" Address range split per DCT: %s\n", (dct_high_range_enabled(pvt) ? "yes" : "no")); debugf0(" data interleave for ECC: %s, " "DRAM cleared since last warm reset: %s\n", (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), (dct_memory_cleared(pvt) ? "yes" : "no")); debugf0(" channel interleave: %s, " "interleave bits selector: 0x%x\n", (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), dct_sel_interleave_addr(pvt)); } amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); } /* * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory * Interleaving Modes. */ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, bool hi_range_sel, u8 intlv_en) { u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; if (dct_ganging_enabled(pvt)) return 0; if (hi_range_sel) return dct_sel_high; /* * see F2x110[DctSelIntLvAddr] - channel interleave mode */ if (dct_interleave_enabled(pvt)) { u8 intlv_addr = dct_sel_interleave_addr(pvt); /* return DCT select function: 0=DCT0, 1=DCT1 */ if (!intlv_addr) return sys_addr >> 6 & 1; if (intlv_addr & 0x2) { u8 shift = intlv_addr & 0x1 ? 9 : 6; u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; return ((sys_addr >> shift) & 1) ^ temp; } return (sys_addr >> (12 + hweight8(intlv_en))) & 1; } if (dct_high_range_enabled(pvt)) return ~dct_sel_high & 1; return 0; } /* Convert the sys_addr to the normalized DCT address */ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, u64 sys_addr, bool hi_rng, u32 dct_sel_base_addr) { u64 chan_off; u64 dram_base = get_dram_base(pvt, range); u64 hole_off = f10_dhar_offset(pvt); u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16; if (hi_rng) { /* * if * base address of high range is below 4Gb * (bits [47:27] at [31:11]) * DRAM address space on this DCT is hoisted above 4Gb && * sys_addr > 4Gb * * remove hole offset from sys_addr * else * remove high range offset from sys_addr */ if ((!(dct_sel_base_addr >> 16) || dct_sel_base_addr < dhar_base(pvt)) && dhar_valid(pvt) && (sys_addr >= BIT_64(32))) chan_off = hole_off; else chan_off = dct_sel_base_off; } else { /* * if * we have a valid hole && * sys_addr > 4Gb * * remove hole * else * remove dram base to normalize to DCT address */ if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) chan_off = hole_off; else chan_off = dram_base; } return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47)); } /* * checks if the csrow passed in is marked as SPARED, if so returns the new * spare row */ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) { int tmp_cs; if (online_spare_swap_done(pvt, dct) && csrow == online_spare_bad_dramcs(pvt, dct)) { for_each_chip_select(tmp_cs, dct, pvt) { if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { csrow = tmp_cs; break; } } } return csrow; } /* * Iterate over the DRAM DCT "base" and "mask" registers looking for a * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' * * Return: * -EINVAL: NOT FOUND * 0..csrow = Chip-Select Row */ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) { struct mem_ctl_info *mci; struct amd64_pvt *pvt; u64 cs_base, cs_mask; int cs_found = -EINVAL; int csrow; mci = mcis[nid]; if (!mci) return cs_found; pvt = mci->pvt_info; debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); for_each_chip_select(csrow, dct, pvt) { if (!csrow_enabled(csrow, dct, pvt)) continue; get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", csrow, cs_base, cs_mask); cs_mask = ~cs_mask; debugf1(" (InputAddr & ~CSMask)=0x%llx " "(CSBase & ~CSMask)=0x%llx\n", (in_addr & cs_mask), (cs_base & cs_mask)); if ((in_addr & cs_mask) == (cs_base & cs_mask)) { cs_found = f10_process_possible_spare(pvt, dct, csrow); debugf1(" MATCH csrow=%d\n", cs_found); break; } } return cs_found; } /* * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is * swapped with a region located at the bottom of memory so that the GPU can use * the interleaved region and thus two channels. */ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) { u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; if (boot_cpu_data.x86 == 0x10) { /* only revC3 and revE have that feature */ if (boot_cpu_data.x86_model < 4 || (boot_cpu_data.x86_model < 0xa && boot_cpu_data.x86_mask < 3)) return sys_addr; } amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); if (!(swap_reg & 0x1)) return sys_addr; swap_base = (swap_reg >> 3) & 0x7f; swap_limit = (swap_reg >> 11) & 0x7f; rgn_size = (swap_reg >> 20) & 0x7f; tmp_addr = sys_addr >> 27; if (!(sys_addr >> 34) && (((tmp_addr >= swap_base) && (tmp_addr <= swap_limit)) || (tmp_addr < rgn_size))) return sys_addr ^ (u64)swap_base << 27; return sys_addr; } /* For a given @dram_range, check if @sys_addr falls within it. */ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, u64 sys_addr, int *nid, int *chan_sel) { int cs_found = -EINVAL; u64 chan_addr; u32 dct_sel_base; u8 channel; bool high_range = false; u8 node_id = dram_dst_node(pvt, range); u8 intlv_en = dram_intlv_en(pvt, range); u32 intlv_sel = dram_intlv_sel(pvt, range); debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", range, sys_addr, get_dram_limit(pvt, range)); if (dhar_valid(pvt) && dhar_base(pvt) <= sys_addr && sys_addr < BIT_64(32)) { amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", sys_addr); return -EINVAL; } if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) return -EINVAL; sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); dct_sel_base = dct_sel_baseaddr(pvt); /* * check whether addresses >= DctSelBaseAddr[47:27] are to be used to * select between DCT0 and DCT1. */ if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt) && ((sys_addr >> 27) >= (dct_sel_base >> 11))) high_range = true; channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, high_range, dct_sel_base); /* Remove node interleaving, see F1x120 */ if (intlv_en) chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | (chan_addr & 0xfff); /* remove channel interleave */ if (dct_interleave_enabled(pvt) && !dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) { if (dct_sel_interleave_addr(pvt) != 1) { if (dct_sel_interleave_addr(pvt) == 0x3) /* hash 9 */ chan_addr = ((chan_addr >> 10) << 9) | (chan_addr & 0x1ff); else /* A[6] or hash 6 */ chan_addr = ((chan_addr >> 7) << 6) | (chan_addr & 0x3f); } else /* A[12] */ chan_addr = ((chan_addr >> 13) << 12) | (chan_addr & 0xfff); } debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); if (cs_found >= 0) { *nid = node_id; *chan_sel = channel; } return cs_found; } static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, int *node, int *chan_sel) { int cs_found = -EINVAL; unsigned range; for (range = 0; range < DRAM_RANGES; range++) { if (!dram_rw(pvt, range)) continue; if ((get_dram_base(pvt, range) <= sys_addr) && (get_dram_limit(pvt, range) >= sys_addr)) { cs_found = f1x_match_to_this_node(pvt, range, sys_addr, node, chan_sel); if (cs_found >= 0) break; } } return cs_found; } /* * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). * * The @sys_addr is usually an error address received from the hardware * (MCX_ADDR). */ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, u16 syndrome) { struct amd64_pvt *pvt = mci->pvt_info; u32 page, offset; int nid, csrow, chan = 0; csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); if (csrow < 0) { edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); return; } error_address_to_page_and_offset(sys_addr, &page, &offset); /* * We need the syndromes for channel detection only when we're * ganged. Otherwise @chan should already contain the channel at * this point. */ if (dct_ganging_enabled(pvt)) chan = get_channel_from_ecc_syndrome(mci, syndrome); if (chan >= 0) edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, EDAC_MOD_STR); else /* * Channel unknown, report all channels on this CSROW as failed. */ for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, EDAC_MOD_STR); } /* * debug routine to display the memory sizes of all logical DIMMs and its * CSROWs */ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) { int dimm, size0, size1, factor = 0; u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; if (boot_cpu_data.x86 == 0xf) { if (pvt->dclr0 & WIDTH_128) factor = 1; /* K8 families < revF not supported yet */ if (pvt->ext_model < K8_REV_F) return; else WARN_ON(ctrl != 0); } dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases : pvt->csels[0].csbases; debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); /* Dump memory sizes for DIMM and its CSROWs */ for (dimm = 0; dimm < 4; dimm++) { size0 = 0; if (dcsb[dimm*2] & DCSB_CS_ENABLE) size0 = pvt->ops->dbam_to_cs(pvt, ctrl, DBAM_DIMM(dimm, dbam)); size1 = 0; if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) size1 = pvt->ops->dbam_to_cs(pvt, ctrl, DBAM_DIMM(dimm, dbam)); amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", dimm * 2, size0 << factor, dimm * 2 + 1, size1 << factor); } } static struct amd64_family_type amd64_family_types[] = { [K8_CPUS] = { .ctl_name = "K8", .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, .ops = { .early_channel_count = k8_early_channel_count, .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, .dbam_to_cs = k8_dbam_to_chip_select, .read_dct_pci_cfg = k8_read_dct_pci_cfg, } }, [F10_CPUS] = { .ctl_name = "F10h", .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f10_dbam_to_chip_select, .read_dct_pci_cfg = f10_read_dct_pci_cfg, } }, [F15_CPUS] = { .ctl_name = "F15h", .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f15_dbam_to_chip_select, .read_dct_pci_cfg = f15_read_dct_pci_cfg, } }, }; static struct pci_dev *pci_get_related_function(unsigned int vendor, unsigned int device, struct pci_dev *related) { struct pci_dev *dev = NULL; dev = pci_get_device(vendor, device, dev); while (dev) { if ((dev->bus->number == related->bus->number) && (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) break; dev = pci_get_device(vendor, device, dev); } return dev; } /* * These are tables of eigenvectors (one per line) which can be used for the * construction of the syndrome tables. The modified syndrome search algorithm * uses those to find the symbol in error and thus the DIMM. * * Algorithm courtesy of Ross LaFetra from AMD. */ static u16 x4_vectors[] = { 0x2f57, 0x1afe, 0x66cc, 0xdd88, 0x11eb, 0x3396, 0x7f4c, 0xeac8, 0x0001, 0x0002, 0x0004, 0x0008, 0x1013, 0x3032, 0x4044, 0x8088, 0x106b, 0x30d6, 0x70fc, 0xe0a8, 0x4857, 0xc4fe, 0x13cc, 0x3288, 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, 0x1f39, 0x251e, 0xbd6c, 0x6bd8, 0x15c1, 0x2a42, 0x89ac, 0x4758, 0x2b03, 0x1602, 0x4f0c, 0xca08, 0x1f07, 0x3a0e, 0x6b04, 0xbd08, 0x8ba7, 0x465e, 0x244c, 0x1cc8, 0x2b87, 0x164e, 0x642c, 0xdc18, 0x40b9, 0x80de, 0x1094, 0x20e8, 0x27db, 0x1eb6, 0x9dac, 0x7b58, 0x11c1, 0x2242, 0x84ac, 0x4c58, 0x1be5, 0x2d7a, 0x5e34, 0xa718, 0x4b39, 0x8d1e, 0x14b4, 0x28d8, 0x4c97, 0xc87e, 0x11fc, 0x33a8, 0x8e97, 0x497e, 0x2ffc, 0x1aa8, 0x16b3, 0x3d62, 0x4f34, 0x8518, 0x1e2f, 0x391a, 0x5cac, 0xf858, 0x1d9f, 0x3b7a, 0x572c, 0xfe18, 0x15f5, 0x2a5a, 0x5264, 0xa3b8, 0x1dbb, 0x3b66, 0x715c, 0xe3f8, 0x4397, 0xc27e, 0x17fc, 0x3ea8, 0x1617, 0x3d3e, 0x6464, 0xb8b8, 0x23ff, 0x12aa, 0xab6c, 0x56d8, 0x2dfb, 0x1ba6, 0x913c, 0x7328, 0x185d, 0x2ca6, 0x7914, 0x9e28, 0x171b, 0x3e36, 0x7d7c, 0xebe8, 0x4199, 0x82ee, 0x19f4, 0x2e58, 0x4807, 0xc40e, 0x130c, 0x3208, 0x1905, 0x2e0a, 0x5804, 0xac08, 0x213f, 0x132a, 0xadfc, 0x5ba8, 0x19a9, 0x2efe, 0xb5cc, 0x6f88, }; static u16 x8_vectors[] = { 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, }; static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, unsigned v_dim) { unsigned int i, err_sym; for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { u16 s = syndrome; unsigned v_idx = err_sym * v_dim; unsigned v_end = (err_sym + 1) * v_dim; /* walk over all 16 bits of the syndrome */ for (i = 1; i < (1U << 16); i <<= 1) { /* if bit is set in that eigenvector... */ if (v_idx < v_end && vectors[v_idx] & i) { u16 ev_comp = vectors[v_idx++]; /* ... and bit set in the modified syndrome, */ if (s & i) { /* remove it. */ s ^= ev_comp; if (!s) return err_sym; } } else if (s & i) /* can't get to zero, move to next symbol */ break; } } debugf0("syndrome(%x) not found\n", syndrome); return -1; } static int map_err_sym_to_channel(int err_sym, int sym_size) { if (sym_size == 4) switch (err_sym) { case 0x20: case 0x21: return 0; break; case 0x22: case 0x23: return 1; break; default: return err_sym >> 4; break; } /* x8 symbols */ else switch (err_sym) { /* imaginary bits not in a DIMM */ case 0x10: WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", err_sym); return -1; break; case 0x11: return 0; break; case 0x12: return 1; break; default: return err_sym >> 3; break; } return -1; } static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) { struct amd64_pvt *pvt = mci->pvt_info; int err_sym = -1; if (pvt->ecc_sym_sz == 8) err_sym = decode_syndrome(syndrome, x8_vectors, ARRAY_SIZE(x8_vectors), pvt->ecc_sym_sz); else if (pvt->ecc_sym_sz == 4) err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), pvt->ecc_sym_sz); else { amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); return err_sym; } return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); } /* * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR * ADDRESS and process. */ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) { struct amd64_pvt *pvt = mci->pvt_info; u64 sys_addr; u16 syndrome; /* Ensure that the Error Address is VALID */ if (!(m->status & MCI_STATUS_ADDRV)) { amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); return; } sys_addr = get_error_address(m); syndrome = extract_syndrome(m->status); amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); } /* Handle any Un-correctable Errors (UEs) */ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) { struct mem_ctl_info *log_mci, *src_mci = NULL; int csrow; u64 sys_addr; u32 page, offset; log_mci = mci; if (!(m->status & MCI_STATUS_ADDRV)) { amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); return; } sys_addr = get_error_address(m); /* * Find out which node the error address belongs to. This may be * different from the node that detected the error. */ src_mci = find_mc_by_sys_addr(mci, sys_addr); if (!src_mci) { amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", (unsigned long)sys_addr); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); return; } log_mci = src_mci; csrow = sys_addr_to_csrow(log_mci, sys_addr); if (csrow < 0) { amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", (unsigned long)sys_addr); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); } else { error_address_to_page_and_offset(sys_addr, &page, &offset); edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); } } static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, struct mce *m) { u16 ec = EC(m->status); u8 xec = XEC(m->status, 0x1f); u8 ecc_type = (m->status >> 45) & 0x3; /* Bail early out if this was an 'observed' error */ if (PP(ec) == NBSL_PP_OBS) return; /* Do only ECC errors */ if (xec && xec != F10_NBSL_EXT_ERR_ECC) return; if (ecc_type == 2) amd64_handle_ce(mci, m); else if (ecc_type == 1) amd64_handle_ue(mci, m); } void amd64_decode_bus_error(int node_id, struct mce *m) { __amd64_decode_bus_error(mcis[node_id], m); } /* * Use pvt->F2 which contains the F2 CPU PCI device to get the related * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. */ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) { /* Reserve the ADDRESS MAP Device */ pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); if (!pvt->F1) { amd64_err("error address map device not found: " "vendor %x device 0x%x (broken BIOS?)\n", PCI_VENDOR_ID_AMD, f1_id); return -ENODEV; } /* Reserve the MISC Device */ pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2); if (!pvt->F3) { pci_dev_put(pvt->F1); pvt->F1 = NULL; amd64_err("error F3 device not found: " "vendor %x device 0x%x (broken BIOS?)\n", PCI_VENDOR_ID_AMD, f3_id); return -ENODEV; } debugf1("F1: %s\n", pci_name(pvt->F1)); debugf1("F2: %s\n", pci_name(pvt->F2)); debugf1("F3: %s\n", pci_name(pvt->F3)); return 0; } static void free_mc_sibling_devs(struct amd64_pvt *pvt) { pci_dev_put(pvt->F1); pci_dev_put(pvt->F3); } /* * Retrieve the hardware registers of the memory controller (this includes the * 'Address Map' and 'Misc' device regs) */ static void read_mc_regs(struct amd64_pvt *pvt) { struct cpuinfo_x86 *c = &boot_cpu_data; u64 msr_val; u32 tmp; unsigned range; /* * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since * those are Read-As-Zero */ rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); /* check first whether TOP_MEM2 is enabled */ rdmsrl(MSR_K8_SYSCFG, msr_val); if (msr_val & (1U << 21)) { rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); } else debugf0(" TOP_MEM2 disabled.\n"); amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); read_dram_ctl_register(pvt); for (range = 0; range < DRAM_RANGES; range++) { u8 rw; /* read settings for this DRAM range */ read_dram_base_limit_regs(pvt, range); rw = dram_rw(pvt, range); if (!rw) continue; debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", range, get_dram_base(pvt, range), get_dram_limit(pvt, range)); debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", (rw & 0x1) ? "R" : "-", (rw & 0x2) ? "W" : "-", dram_intlv_sel(pvt, range), dram_dst_node(pvt, range)); } read_dct_base_mask(pvt); amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); if (!dct_ganging_enabled(pvt)) { amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); } pvt->ecc_sym_sz = 4; if (c->x86 >= 0x10) { amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); /* F10h, revD and later can do x8 ECC too */ if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) pvt->ecc_sym_sz = 8; } dump_misc_regs(pvt); } /* * NOTE: CPU Revision Dependent code * * Input: * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) * k8 private pointer to --> * DRAM Bank Address mapping register * node_id * DCL register where dual_channel_active is * * The DBAM register consists of 4 sets of 4 bits each definitions: * * Bits: CSROWs * 0-3 CSROWs 0 and 1 * 4-7 CSROWs 2 and 3 * 8-11 CSROWs 4 and 5 * 12-15 CSROWs 6 and 7 * * Values range from: 0 to 15 * The meaning of the values depends on CPU revision and dual-channel state, * see relevant BKDG more info. * * The memory controller provides for total of only 8 CSROWs in its current * architecture. Each "pair" of CSROWs normally represents just one DIMM in * single channel or two (2) DIMMs in dual channel mode. * * The following code logic collapses the various tables for CSROW based on CPU * revision. * * Returns: * The number of PAGE_SIZE pages on the specified CSROW number it * encompasses * */ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) { u32 cs_mode, nr_pages; u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; /* * The math on this doesn't look right on the surface because x/2*4 can * be simplified to x*2 but this expression makes use of the fact that * it is integral math where 1/2=0. This intermediate value becomes the * number of bits to shift the DBAM register to extract the proper CSROW * field. */ cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF; nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); debugf0(" nr_pages= %u channel-count = %d\n", nr_pages, pvt->channel_count); return nr_pages; } /* * Initialize the array of csrow attribute instances, based on the values * from pci config hardware registers. */ static int init_csrows(struct mem_ctl_info *mci) { struct csrow_info *csrow; struct amd64_pvt *pvt = mci->pvt_info; u64 input_addr_min, input_addr_max, sys_addr, base, mask; u32 val; int i, empty = 1; amd64_read_pci_cfg(pvt->F3, NBCFG, &val); pvt->nbcfg = val; debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", pvt->mc_node_id, val, !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); for_each_chip_select(i, 0, pvt) { csrow = &mci->csrows[i]; if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) { debugf1("----CSROW %d EMPTY for node %d\n", i, pvt->mc_node_id); continue; } debugf1("----CSROW %d VALID for MC node %d\n", i, pvt->mc_node_id); empty = 0; if (csrow_enabled(i, 0, pvt)) csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); if (csrow_enabled(i, 1, pvt)) csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i); find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); sys_addr = input_addr_to_sys_addr(mci, input_addr_min); csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); sys_addr = input_addr_to_sys_addr(mci, input_addr_max); csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); get_cs_base_and_mask(pvt, i, 0, &base, &mask); csrow->page_mask = ~mask; /* 8 bytes of resolution */ csrow->mtype = amd64_determine_memory_type(pvt, i); debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", (unsigned long)input_addr_min, (unsigned long)input_addr_max); debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", (unsigned long)sys_addr, csrow->page_mask); debugf1(" nr_pages: %u first_page: 0x%lx " "last_page: 0x%lx\n", (unsigned)csrow->nr_pages, csrow->first_page, csrow->last_page); /* * determine whether CHIPKILL or JUST ECC or NO ECC is operating */ if (pvt->nbcfg & NBCFG_ECC_ENABLE) csrow->edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ? EDAC_S4ECD4ED : EDAC_SECDED; else csrow->edac_mode = EDAC_NONE; } return empty; } /* get all cores on this DCT */ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) { int cpu; for_each_online_cpu(cpu) if (amd_get_nb_id(cpu) == nid) cpumask_set_cpu(cpu, mask); } /* check MCG_CTL on all the cpus on this node */ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) { cpumask_var_t mask; int cpu, nbe; bool ret = false; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { amd64_warn("%s: Error allocating mask\n", __func__); return false; } get_cpus_on_this_dct_cpumask(mask, nid); rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); for_each_cpu(cpu, mask) { struct msr *reg = per_cpu_ptr(msrs, cpu); nbe = reg->l & MSR_MCGCTL_NBE; debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", cpu, reg->q, (nbe ? "enabled" : "disabled")); if (!nbe) goto out; } ret = true; out: free_cpumask_var(mask); return ret; } static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) { cpumask_var_t cmask; int cpu; if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { amd64_warn("%s: error allocating mask\n", __func__); return false; } get_cpus_on_this_dct_cpumask(cmask, nid); rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); for_each_cpu(cpu, cmask) { struct msr *reg = per_cpu_ptr(msrs, cpu); if (on) { if (reg->l & MSR_MCGCTL_NBE) s->flags.nb_mce_enable = 1; reg->l |= MSR_MCGCTL_NBE; } else { /* * Turn off NB MCE reporting only when it was off before */ if (!s->flags.nb_mce_enable) reg->l &= ~MSR_MCGCTL_NBE; } } wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); free_cpumask_var(cmask); return 0; } static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, struct pci_dev *F3) { bool ret = true; u32 value, mask = 0x3; /* UECC/CECC enable */ if (toggle_ecc_err_reporting(s, nid, ON)) { amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); return false; } amd64_read_pci_cfg(F3, NBCTL, &value); s->old_nbctl = value & mask; s->nbctl_valid = true; value |= mask; amd64_write_pci_cfg(F3, NBCTL, value); amd64_read_pci_cfg(F3, NBCFG, &value); debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", nid, value, !!(value & NBCFG_ECC_ENABLE)); if (!(value & NBCFG_ECC_ENABLE)) { amd64_warn("DRAM ECC disabled on this node, enabling...\n"); s->flags.nb_ecc_prev = 0; /* Attempt to turn on DRAM ECC Enable */ value |= NBCFG_ECC_ENABLE; amd64_write_pci_cfg(F3, NBCFG, value); amd64_read_pci_cfg(F3, NBCFG, &value); if (!(value & NBCFG_ECC_ENABLE)) { amd64_warn("Hardware rejected DRAM ECC enable," "check memory DIMM configuration.\n"); ret = false; } else { amd64_info("Hardware accepted DRAM ECC Enable\n"); } } else { s->flags.nb_ecc_prev = 1; } debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", nid, value, !!(value & NBCFG_ECC_ENABLE)); return ret; } static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, struct pci_dev *F3) { u32 value, mask = 0x3; /* UECC/CECC enable */ if (!s->nbctl_valid) return; amd64_read_pci_cfg(F3, NBCTL, &value); value &= ~mask; value |= s->old_nbctl; amd64_write_pci_cfg(F3, NBCTL, value); /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ if (!s->flags.nb_ecc_prev) { amd64_read_pci_cfg(F3, NBCFG, &value); value &= ~NBCFG_ECC_ENABLE; amd64_write_pci_cfg(F3, NBCFG, value); } /* restore the NB Enable MCGCTL bit */ if (toggle_ecc_err_reporting(s, nid, OFF)) amd64_warn("Error restoring NB MCGCTL settings!\n"); } /* * EDAC requires that the BIOS have ECC enabled before * taking over the processing of ECC errors. A command line * option allows to force-enable hardware ECC later in * enable_ecc_error_reporting(). */ static const char *ecc_msg = "ECC disabled in the BIOS or no ECC capability, module will not load.\n" " Either enable ECC checking or force module loading by setting " "'ecc_enable_override'.\n" " (Note that use of the override may cause unknown side effects.)\n"; static bool ecc_enabled(struct pci_dev *F3, u8 nid) { u32 value; u8 ecc_en = 0; bool nb_mce_en = false; amd64_read_pci_cfg(F3, NBCFG, &value); ecc_en = !!(value & NBCFG_ECC_ENABLE); amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); if (!nb_mce_en) amd64_notice("NB MCE bank disabled, set MSR " "0x%08x[4] on node %d to enable.\n", MSR_IA32_MCG_CTL, nid); if (!ecc_en || !nb_mce_en) { amd64_notice("%s", ecc_msg); return false; } return true; } struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + ARRAY_SIZE(amd64_inj_attrs) + 1]; struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) { unsigned int i = 0, j = 0; for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) sysfs_attrs[i] = amd64_dbg_attrs[i]; if (boot_cpu_data.x86 >= 0x10) for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) sysfs_attrs[i] = amd64_inj_attrs[j]; sysfs_attrs[i] = terminator; mci->mc_driver_sysfs_attributes = sysfs_attrs; } static void setup_mci_misc_attrs(struct mem_ctl_info *mci, struct amd64_family_type *fam) { struct amd64_pvt *pvt = mci->pvt_info; mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE; if (pvt->nbcap & NBCAP_SECDED) mci->edac_ctl_cap |= EDAC_FLAG_SECDED; if (pvt->nbcap & NBCAP_CHIPKILL) mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; mci->edac_cap = amd64_determine_edac_cap(pvt); mci->mod_name = EDAC_MOD_STR; mci->mod_ver = EDAC_AMD64_VERSION; mci->ctl_name = fam->ctl_name; mci->dev_name = pci_name(pvt->F2); mci->ctl_page_to_phys = NULL; /* memory scrubber interface */ mci->set_sdram_scrub_rate = amd64_set_scrub_rate; mci->get_sdram_scrub_rate = amd64_get_scrub_rate; } /* * returns a pointer to the family descriptor on success, NULL otherwise. */ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) { u8 fam = boot_cpu_data.x86; struct amd64_family_type *fam_type = NULL; switch (fam) { case 0xf: fam_type = &amd64_family_types[K8_CPUS]; pvt->ops = &amd64_family_types[K8_CPUS].ops; break; case 0x10: fam_type = &amd64_family_types[F10_CPUS]; pvt->ops = &amd64_family_types[F10_CPUS].ops; break; case 0x15: fam_type = &amd64_family_types[F15_CPUS]; pvt->ops = &amd64_family_types[F15_CPUS].ops; break; default: amd64_err("Unsupported family!\n"); return NULL; } pvt->ext_model = boot_cpu_data.x86_model >> 4; amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, (fam == 0xf ? (pvt->ext_model >= K8_REV_F ? "revF or later " : "revE or earlier ") : ""), pvt->mc_node_id); return fam_type; } static int amd64_init_one_instance(struct pci_dev *F2) { struct amd64_pvt *pvt = NULL; struct amd64_family_type *fam_type = NULL; struct mem_ctl_info *mci = NULL; int err = 0, ret; u8 nid = get_node_id(F2); ret = -ENOMEM; pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); if (!pvt) goto err_ret; pvt->mc_node_id = nid; pvt->F2 = F2; ret = -EINVAL; fam_type = amd64_per_family_init(pvt); if (!fam_type) goto err_free; ret = -ENODEV; err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id); if (err) goto err_free; read_mc_regs(pvt); /* * We need to determine how many memory channels there are. Then use * that information for calculating the size of the dynamic instance * tables in the 'mci' structure. */ ret = -EINVAL; pvt->channel_count = pvt->ops->early_channel_count(pvt); if (pvt->channel_count < 0) goto err_siblings; ret = -ENOMEM; mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); if (!mci) goto err_siblings; mci->pvt_info = pvt; mci->dev = &pvt->F2->dev; setup_mci_misc_attrs(mci, fam_type); if (init_csrows(mci)) mci->edac_cap = EDAC_FLAG_NONE; set_mc_sysfs_attrs(mci); ret = -ENODEV; if (edac_mc_add_mc(mci)) { debugf1("failed edac_mc_add_mc()\n"); goto err_add_mc; } /* register stuff with EDAC MCE */ if (report_gart_errors) amd_report_gart_errors(true); amd_register_ecc_decoder(amd64_decode_bus_error); mcis[nid] = mci; atomic_inc(&drv_instances); return 0; err_add_mc: edac_mc_free(mci); err_siblings: free_mc_sibling_devs(pvt); err_free: kfree(pvt); err_ret: return ret; } static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, const struct pci_device_id *mc_type) { u8 nid = get_node_id(pdev); struct pci_dev *F3 = node_to_amd_nb(nid)->misc; struct ecc_settings *s; int ret = 0; ret = pci_enable_device(pdev); if (ret < 0) { debugf0("ret=%d\n", ret); return -EIO; } ret = -ENOMEM; s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL); if (!s) goto err_out; ecc_stngs[nid] = s; if (!ecc_enabled(F3, nid)) { ret = -ENODEV; if (!ecc_enable_override) goto err_enable; amd64_warn("Forcing ECC on!\n"); if (!enable_ecc_error_reporting(s, nid, F3)) goto err_enable; } ret = amd64_init_one_instance(pdev); if (ret < 0) { amd64_err("Error probing instance: %d\n", nid); restore_ecc_error_reporting(s, nid, F3); } return ret; err_enable: kfree(s); ecc_stngs[nid] = NULL; err_out: return ret; } static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct amd64_pvt *pvt; u8 nid = get_node_id(pdev); struct pci_dev *F3 = node_to_amd_nb(nid)->misc; struct ecc_settings *s = ecc_stngs[nid]; /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; pvt = mci->pvt_info; restore_ecc_error_reporting(s, nid, F3); free_mc_sibling_devs(pvt); /* unregister from EDAC MCE */ amd_report_gart_errors(false); amd_unregister_ecc_decoder(amd64_decode_bus_error); kfree(ecc_stngs[nid]); ecc_stngs[nid] = NULL; /* Free the EDAC CORE resources */ mci->pvt_info = NULL; mcis[nid] = NULL; kfree(pvt); edac_mc_free(mci); } /* * This table is part of the interface for loading drivers for PCI devices. The * PCI core identifies what devices are on a system during boot, and then * inquiry this table to see if this driver is for a given device found. */ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { { .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = 0, .class_mask = 0, }, { .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = 0, .class_mask = 0, }, { .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_15H_NB_F2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = 0, .class_mask = 0, }, {0, } }; MODULE_DEVICE_TABLE(pci, amd64_pci_table); static struct pci_driver amd64_pci_driver = { .name = EDAC_MOD_STR, .probe = amd64_probe_one_instance, .remove = __devexit_p(amd64_remove_one_instance), .id_table = amd64_pci_table, }; static void setup_pci_device(void) { struct mem_ctl_info *mci; struct amd64_pvt *pvt; if (amd64_ctl_pci) return; mci = mcis[0]; if (mci) { pvt = mci->pvt_info; amd64_ctl_pci = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); if (!amd64_ctl_pci) { pr_warning("%s(): Unable to create PCI control\n", __func__); pr_warning("%s(): PCI error report via EDAC not set\n", __func__); } } } static int __init amd64_edac_init(void) { int err = -ENODEV; printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); opstate_init(); if (amd_cache_northbridges() < 0) goto err_ret; err = -ENOMEM; mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); if (!(mcis && ecc_stngs)) goto err_free; msrs = msrs_alloc(); if (!msrs) goto err_free; err = pci_register_driver(&amd64_pci_driver); if (err) goto err_pci; err = -ENODEV; if (!atomic_read(&drv_instances)) goto err_no_instances; setup_pci_device(); return 0; err_no_instances: pci_unregister_driver(&amd64_pci_driver); err_pci: msrs_free(msrs); msrs = NULL; err_free: kfree(mcis); mcis = NULL; kfree(ecc_stngs); ecc_stngs = NULL; err_ret: return err; } static void __exit amd64_edac_exit(void) { if (amd64_ctl_pci) edac_pci_release_generic_ctl(amd64_ctl_pci); pci_unregister_driver(&amd64_pci_driver); kfree(ecc_stngs); ecc_stngs = NULL; kfree(mcis); mcis = NULL; msrs_free(msrs); msrs = NULL; } module_init(amd64_edac_init); module_exit(amd64_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " "Dave Peterson, Thayne Harbaugh"); MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " EDAC_AMD64_VERSION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
AccentureMobilityServices/kernel
drivers/net/phy/smsc.c
3252
6773
/* * drivers/net/phy/smsc.c * * Driver for SMSC PHYs * * Author: Herbert Valerio Riedel * * Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/netdevice.h> #define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ #define MII_LAN83C185_IM 30 /* Interrupt Mask */ #define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ #define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ #define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ #define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ #define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ #define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ #define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ #define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ #define MII_LAN83C185_ISF_INT_ALL (0x0e) #define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ MII_LAN83C185_ISF_INT7) #define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ static int smsc_phy_config_intr(struct phy_device *phydev) { int rc = phy_write (phydev, MII_LAN83C185_IM, ((PHY_INTERRUPT_ENABLED == phydev->interrupts) ? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS : 0)); return rc < 0 ? rc : 0; } static int smsc_phy_ack_interrupt(struct phy_device *phydev) { int rc = phy_read (phydev, MII_LAN83C185_ISF); return rc < 0 ? rc : 0; } static int smsc_phy_config_init(struct phy_device *phydev) { int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) return rc; /* Enable energy detect mode for this SMSC Transceivers */ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); if (rc < 0) return rc; return smsc_phy_ack_interrupt (phydev); } static int lan911x_config_init(struct phy_device *phydev) { return smsc_phy_ack_interrupt(phydev); } static struct phy_driver lan83c185_driver = { .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ .phy_id_mask = 0xfffffff0, .name = "SMSC LAN83C185", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, } }; static struct phy_driver lan8187_driver = { .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */ .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8187", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, } }; static struct phy_driver lan8700_driver = { .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */ .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8700", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, } }; static struct phy_driver lan911x_int_driver = { .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */ .phy_id_mask = 0xfffffff0, .name = "SMSC LAN911x Internal PHY", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = lan911x_config_init, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, } }; static struct phy_driver lan8710_driver = { .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */ .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8710/LAN8720", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE, } }; static int __init smsc_init(void) { int ret; ret = phy_driver_register (&lan83c185_driver); if (ret) goto err1; ret = phy_driver_register (&lan8187_driver); if (ret) goto err2; ret = phy_driver_register (&lan8700_driver); if (ret) goto err3; ret = phy_driver_register (&lan911x_int_driver); if (ret) goto err4; ret = phy_driver_register (&lan8710_driver); if (ret) goto err5; return 0; err5: phy_driver_unregister (&lan911x_int_driver); err4: phy_driver_unregister (&lan8700_driver); err3: phy_driver_unregister (&lan8187_driver); err2: phy_driver_unregister (&lan83c185_driver); err1: return ret; } static void __exit smsc_exit(void) { phy_driver_unregister (&lan8710_driver); phy_driver_unregister (&lan911x_int_driver); phy_driver_unregister (&lan8700_driver); phy_driver_unregister (&lan8187_driver); phy_driver_unregister (&lan83c185_driver); } MODULE_DESCRIPTION("SMSC PHY driver"); MODULE_AUTHOR("Herbert Valerio Riedel"); MODULE_LICENSE("GPL"); module_init(smsc_init); module_exit(smsc_exit); static struct mdio_device_id __maybe_unused smsc_tbl[] = { { 0x0007c0a0, 0xfffffff0 }, { 0x0007c0b0, 0xfffffff0 }, { 0x0007c0c0, 0xfffffff0 }, { 0x0007c0d0, 0xfffffff0 }, { 0x0007c0f0, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, smsc_tbl);
gpl-2.0
vijay03/optfs
drivers/staging/octeon/cvmx-interrupt-decodes.c
4788
13959
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2009 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * * Automatically generated functions useful for enabling * and decoding RSL_INT_BLOCKS interrupts. * */ #include <asm/octeon/octeon.h> #include "cvmx-gmxx-defs.h" #include "cvmx-pcsx-defs.h" #include "cvmx-pcsxx-defs.h" #include "cvmx-spxx-defs.h" #include "cvmx-stxx-defs.h" #ifndef PRINT_ERROR #define PRINT_ERROR(format, ...) #endif /** * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t */ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block), cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block))); gmx_rx_int_en.u64 = 0; if (OCTEON_IS_MODEL(OCTEON_CN56XX)) { /* Skipping gmx_rx_int_en.s.reserved_29_63 */ gmx_rx_int_en.s.hg2cc = 1; gmx_rx_int_en.s.hg2fld = 1; gmx_rx_int_en.s.undat = 1; gmx_rx_int_en.s.uneop = 1; gmx_rx_int_en.s.unsop = 1; gmx_rx_int_en.s.bad_term = 1; gmx_rx_int_en.s.bad_seq = 1; gmx_rx_int_en.s.rem_fault = 1; gmx_rx_int_en.s.loc_fault = 1; gmx_rx_int_en.s.pause_drp = 1; /* Skipping gmx_rx_int_en.s.reserved_16_18 */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; /* Skipping gmx_rx_int_en.s.reserved_9_9 */ gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /* Skipping gmx_rx_int_en.s.reserved_5_6 */ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; /* Skipping gmx_rx_int_en.s.reserved_2_2 */ gmx_rx_int_en.s.carext = 1; /* Skipping gmx_rx_int_en.s.reserved_0_0 */ } if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { /* Skipping gmx_rx_int_en.s.reserved_19_63 */ /*gmx_rx_int_en.s.phy_dupx = 1; */ /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; gmx_rx_int_en.s.niberr = 1; gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ gmx_rx_int_en.s.alnerr = 1; /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; gmx_rx_int_en.s.maxerr = 1; gmx_rx_int_en.s.carext = 1; gmx_rx_int_en.s.minerr = 1; } if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { /* Skipping gmx_rx_int_en.s.reserved_20_63 */ gmx_rx_int_en.s.pause_drp = 1; /*gmx_rx_int_en.s.phy_dupx = 1; */ /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; gmx_rx_int_en.s.niberr = 1; gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /* Skipping gmx_rx_int_en.s.reserved_6_6 */ gmx_rx_int_en.s.alnerr = 1; /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; /* Skipping gmx_rx_int_en.s.reserved_2_2 */ gmx_rx_int_en.s.carext = 1; /* Skipping gmx_rx_int_en.s.reserved_0_0 */ } if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { /* Skipping gmx_rx_int_en.s.reserved_19_63 */ /*gmx_rx_int_en.s.phy_dupx = 1; */ /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; gmx_rx_int_en.s.niberr = 1; gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ gmx_rx_int_en.s.alnerr = 1; /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; gmx_rx_int_en.s.maxerr = 1; gmx_rx_int_en.s.carext = 1; gmx_rx_int_en.s.minerr = 1; } if (OCTEON_IS_MODEL(OCTEON_CN31XX)) { /* Skipping gmx_rx_int_en.s.reserved_19_63 */ /*gmx_rx_int_en.s.phy_dupx = 1; */ /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; gmx_rx_int_en.s.niberr = 1; gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ gmx_rx_int_en.s.alnerr = 1; /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; gmx_rx_int_en.s.maxerr = 1; gmx_rx_int_en.s.carext = 1; gmx_rx_int_en.s.minerr = 1; } if (OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Skipping gmx_rx_int_en.s.reserved_20_63 */ gmx_rx_int_en.s.pause_drp = 1; /*gmx_rx_int_en.s.phy_dupx = 1; */ /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; gmx_rx_int_en.s.niberr = 1; gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ gmx_rx_int_en.s.alnerr = 1; /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; gmx_rx_int_en.s.maxerr = 1; gmx_rx_int_en.s.carext = 1; gmx_rx_int_en.s.minerr = 1; } if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { /* Skipping gmx_rx_int_en.s.reserved_29_63 */ gmx_rx_int_en.s.hg2cc = 1; gmx_rx_int_en.s.hg2fld = 1; gmx_rx_int_en.s.undat = 1; gmx_rx_int_en.s.uneop = 1; gmx_rx_int_en.s.unsop = 1; gmx_rx_int_en.s.bad_term = 1; gmx_rx_int_en.s.bad_seq = 0; gmx_rx_int_en.s.rem_fault = 1; gmx_rx_int_en.s.loc_fault = 0; gmx_rx_int_en.s.pause_drp = 1; /* Skipping gmx_rx_int_en.s.reserved_16_18 */ /*gmx_rx_int_en.s.ifgerr = 1; */ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ gmx_rx_int_en.s.ovrerr = 1; /* Skipping gmx_rx_int_en.s.reserved_9_9 */ gmx_rx_int_en.s.skperr = 1; gmx_rx_int_en.s.rcverr = 1; /* Skipping gmx_rx_int_en.s.reserved_5_6 */ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ gmx_rx_int_en.s.jabber = 1; /* Skipping gmx_rx_int_en.s.reserved_2_2 */ gmx_rx_int_en.s.carext = 1; /* Skipping gmx_rx_int_en.s.reserved_0_0 */ } cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64); } /** * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t */ void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block) { union cvmx_pcsx_intx_en_reg pcs_int_en_reg; cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block), cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block))); pcs_int_en_reg.u64 = 0; if (OCTEON_IS_MODEL(OCTEON_CN56XX)) { /* Skipping pcs_int_en_reg.s.reserved_12_63 */ /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */ pcs_int_en_reg.s.sync_bad_en = 1; pcs_int_en_reg.s.an_bad_en = 1; pcs_int_en_reg.s.rxlock_en = 1; pcs_int_en_reg.s.rxbad_en = 1; /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */ pcs_int_en_reg.s.txbad_en = 1; pcs_int_en_reg.s.txfifo_en = 1; pcs_int_en_reg.s.txfifu_en = 1; pcs_int_en_reg.s.an_err_en = 1; /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */ /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */ } if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { /* Skipping pcs_int_en_reg.s.reserved_12_63 */ /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */ pcs_int_en_reg.s.sync_bad_en = 1; pcs_int_en_reg.s.an_bad_en = 1; pcs_int_en_reg.s.rxlock_en = 1; pcs_int_en_reg.s.rxbad_en = 1; /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */ pcs_int_en_reg.s.txbad_en = 1; pcs_int_en_reg.s.txfifo_en = 1; pcs_int_en_reg.s.txfifu_en = 1; pcs_int_en_reg.s.an_err_en = 1; /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */ /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */ } cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64); } /** * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t */ void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index) { union cvmx_pcsxx_int_en_reg pcsx_int_en_reg; cvmx_write_csr(CVMX_PCSXX_INT_REG(index), cvmx_read_csr(CVMX_PCSXX_INT_REG(index))); pcsx_int_en_reg.u64 = 0; if (OCTEON_IS_MODEL(OCTEON_CN56XX)) { /* Skipping pcsx_int_en_reg.s.reserved_6_63 */ pcsx_int_en_reg.s.algnlos_en = 1; pcsx_int_en_reg.s.synlos_en = 1; pcsx_int_en_reg.s.bitlckls_en = 1; pcsx_int_en_reg.s.rxsynbad_en = 1; pcsx_int_en_reg.s.rxbad_en = 1; pcsx_int_en_reg.s.txflt_en = 1; } if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { /* Skipping pcsx_int_en_reg.s.reserved_6_63 */ pcsx_int_en_reg.s.algnlos_en = 1; pcsx_int_en_reg.s.synlos_en = 1; pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */ pcsx_int_en_reg.s.rxsynbad_en = 1; pcsx_int_en_reg.s.rxbad_en = 1; pcsx_int_en_reg.s.txflt_en = 1; } cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64); } /** * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t */ void __cvmx_interrupt_spxx_int_msk_enable(int index) { union cvmx_spxx_int_msk spx_int_msk; cvmx_write_csr(CVMX_SPXX_INT_REG(index), cvmx_read_csr(CVMX_SPXX_INT_REG(index))); spx_int_msk.u64 = 0; if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { /* Skipping spx_int_msk.s.reserved_12_63 */ spx_int_msk.s.calerr = 1; spx_int_msk.s.syncerr = 1; spx_int_msk.s.diperr = 1; spx_int_msk.s.tpaovr = 1; spx_int_msk.s.rsverr = 1; spx_int_msk.s.drwnng = 1; spx_int_msk.s.clserr = 1; spx_int_msk.s.spiovr = 1; /* Skipping spx_int_msk.s.reserved_2_3 */ spx_int_msk.s.abnorm = 1; spx_int_msk.s.prtnxa = 1; } if (OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Skipping spx_int_msk.s.reserved_12_63 */ spx_int_msk.s.calerr = 1; spx_int_msk.s.syncerr = 1; spx_int_msk.s.diperr = 1; spx_int_msk.s.tpaovr = 1; spx_int_msk.s.rsverr = 1; spx_int_msk.s.drwnng = 1; spx_int_msk.s.clserr = 1; spx_int_msk.s.spiovr = 1; /* Skipping spx_int_msk.s.reserved_2_3 */ spx_int_msk.s.abnorm = 1; spx_int_msk.s.prtnxa = 1; } cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64); } /** * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t */ void __cvmx_interrupt_stxx_int_msk_enable(int index) { union cvmx_stxx_int_msk stx_int_msk; cvmx_write_csr(CVMX_STXX_INT_REG(index), cvmx_read_csr(CVMX_STXX_INT_REG(index))); stx_int_msk.u64 = 0; if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { /* Skipping stx_int_msk.s.reserved_8_63 */ stx_int_msk.s.frmerr = 1; stx_int_msk.s.unxfrm = 1; stx_int_msk.s.nosync = 1; stx_int_msk.s.diperr = 1; stx_int_msk.s.datovr = 1; stx_int_msk.s.ovrbst = 1; stx_int_msk.s.calpar1 = 1; stx_int_msk.s.calpar0 = 1; } if (OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Skipping stx_int_msk.s.reserved_8_63 */ stx_int_msk.s.frmerr = 1; stx_int_msk.s.unxfrm = 1; stx_int_msk.s.nosync = 1; stx_int_msk.s.diperr = 1; stx_int_msk.s.datovr = 1; stx_int_msk.s.ovrbst = 1; stx_int_msk.s.calpar1 = 1; stx_int_msk.s.calpar0 = 1; } cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64); }
gpl-2.0
highRPM/IM-A910S_msm8974_kernel-source
drivers/gpu/drm/exynos/exynos_hdmi.c
4788
68213
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Authors: * Seung-Woo Kim <sw0312.kim@samsung.com> * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * * Based on drivers/media/video/s5p-tv/hdmi_drv.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include "drmP.h" #include "drm_edid.h" #include "drm_crtc_helper.h" #include "regs-hdmi.h" #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_hdmi.h" #include "exynos_hdmi.h" #define MAX_WIDTH 1920 #define MAX_HEIGHT 1080 #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) struct hdmi_resources { struct clk *hdmi; struct clk *sclk_hdmi; struct clk *sclk_pixel; struct clk *sclk_hdmiphy; struct clk *hdmiphy; struct regulator_bulk_data *regul_bulk; int regul_count; }; struct hdmi_context { struct device *dev; struct drm_device *drm_dev; struct fb_videomode *default_timing; unsigned int is_v13:1; unsigned int default_win; unsigned int default_bpp; bool hpd_handle; bool enabled; struct resource *regs_res; void __iomem *regs; unsigned int irq; struct workqueue_struct *wq; struct work_struct hotplug_work; struct i2c_client *ddc_port; struct i2c_client *hdmiphy_port; /* current hdmiphy conf index */ int cur_conf; struct hdmi_resources res; void *parent_ctx; }; /* HDMI Version 1.3 */ static const u8 hdmiphy_v13_conf27[32] = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }; static const u8 hdmiphy_v13_conf27_027[32] = { 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }; static const u8 hdmiphy_v13_conf74_175[32] = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, }; static const u8 hdmiphy_v13_conf74_25[32] = { 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, }; static const u8 hdmiphy_v13_conf148_5[32] = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, }; struct hdmi_v13_tg_regs { u8 cmd; u8 h_fsz_l; u8 h_fsz_h; u8 hact_st_l; u8 hact_st_h; u8 hact_sz_l; u8 hact_sz_h; u8 v_fsz_l; u8 v_fsz_h; u8 vsync_l; u8 vsync_h; u8 vsync2_l; u8 vsync2_h; u8 vact_st_l; u8 vact_st_h; u8 vact_sz_l; u8 vact_sz_h; u8 field_chg_l; u8 field_chg_h; u8 vact_st2_l; u8 vact_st2_h; u8 vsync_top_hdmi_l; u8 vsync_top_hdmi_h; u8 vsync_bot_hdmi_l; u8 vsync_bot_hdmi_h; u8 field_top_hdmi_l; u8 field_top_hdmi_h; u8 field_bot_hdmi_l; u8 field_bot_hdmi_h; }; struct hdmi_v13_core_regs { u8 h_blank[2]; u8 v_blank[3]; u8 h_v_line[3]; u8 vsync_pol[1]; u8 int_pro_mode[1]; u8 v_blank_f[3]; u8 h_sync_gen[3]; u8 v_sync_gen1[3]; u8 v_sync_gen2[3]; u8 v_sync_gen3[3]; }; struct hdmi_v13_preset_conf { struct hdmi_v13_core_regs core; struct hdmi_v13_tg_regs tg; }; struct hdmi_v13_conf { int width; int height; int vrefresh; bool interlace; const u8 *hdmiphy_data; const struct hdmi_v13_preset_conf *conf; }; static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = { .core = { .h_blank = {0x8a, 0x00}, .v_blank = {0x0d, 0x6a, 0x01}, .h_v_line = {0x0d, 0xa2, 0x35}, .vsync_pol = {0x01}, .int_pro_mode = {0x00}, .v_blank_f = {0x00, 0x00, 0x00}, .h_sync_gen = {0x0e, 0x30, 0x11}, .v_sync_gen1 = {0x0f, 0x90, 0x00}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x5a, 0x03, /* h_fsz */ 0x8a, 0x00, 0xd0, 0x02, /* hact */ 0x0d, 0x02, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x2d, 0x00, 0xe0, 0x01, /* vact */ 0x33, 0x02, /* field_chg */ 0x49, 0x02, /* vact_st2 */ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ }, }; static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = { .core = { .h_blank = {0x72, 0x01}, .v_blank = {0xee, 0xf2, 0x00}, .h_v_line = {0xee, 0x22, 0x67}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */ .h_sync_gen = {0x6c, 0x50, 0x02}, .v_sync_gen1 = {0x0a, 0x50, 0x00}, .v_sync_gen2 = {0x01, 0x10, 0x00}, .v_sync_gen3 = {0x01, 0x10, 0x00}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x72, 0x06, /* h_fsz */ 0x71, 0x01, 0x01, 0x05, /* hact */ 0xee, 0x02, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x1e, 0x00, 0xd0, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x49, 0x02, /* vact_st2 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ }, }; static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = { .core = { .h_blank = {0xd0, 0x02}, .v_blank = {0x32, 0xB2, 0x00}, .h_v_line = {0x65, 0x04, 0xa5}, .vsync_pol = {0x00}, .int_pro_mode = {0x01}, .v_blank_f = {0x49, 0x2A, 0x23}, .h_sync_gen = {0x0E, 0xEA, 0x08}, .v_sync_gen1 = {0x07, 0x20, 0x00}, .v_sync_gen2 = {0x39, 0x42, 0x23}, .v_sync_gen3 = {0x38, 0x87, 0x73}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x50, 0x0A, /* h_fsz */ 0xCF, 0x02, 0x81, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x16, 0x00, 0x1c, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x49, 0x02, /* vact_st2 */ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ }, }; static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = { .core = { .h_blank = {0xd0, 0x02}, .v_blank = {0x65, 0x6c, 0x01}, .h_v_line = {0x65, 0x04, 0xa5}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */ .h_sync_gen = {0x0e, 0xea, 0x08}, .v_sync_gen1 = {0x09, 0x40, 0x00}, .v_sync_gen2 = {0x01, 0x10, 0x00}, .v_sync_gen3 = {0x01, 0x10, 0x00}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x50, 0x0A, /* h_fsz */ 0xCF, 0x02, 0x81, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x2d, 0x00, 0x38, 0x04, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ }, }; static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = { .core = { .h_blank = {0x18, 0x01}, .v_blank = {0x32, 0xB2, 0x00}, .h_v_line = {0x65, 0x84, 0x89}, .vsync_pol = {0x00}, .int_pro_mode = {0x01}, .v_blank_f = {0x49, 0x2A, 0x23}, .h_sync_gen = {0x56, 0x08, 0x02}, .v_sync_gen1 = {0x07, 0x20, 0x00}, .v_sync_gen2 = {0x39, 0x42, 0x23}, .v_sync_gen3 = {0xa4, 0x44, 0x4a}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x98, 0x08, /* h_fsz */ 0x17, 0x01, 0x81, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x16, 0x00, 0x1c, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x49, 0x02, /* vact_st2 */ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ }, }; static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = { .core = { .h_blank = {0x18, 0x01}, .v_blank = {0x65, 0x6c, 0x01}, .h_v_line = {0x65, 0x84, 0x89}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */ .h_sync_gen = {0x56, 0x08, 0x02}, .v_sync_gen1 = {0x09, 0x40, 0x00}, .v_sync_gen2 = {0x01, 0x10, 0x00}, .v_sync_gen3 = {0x01, 0x10, 0x00}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x98, 0x08, /* h_fsz */ 0x17, 0x01, 0x81, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x2d, 0x00, 0x38, 0x04, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ }, }; static const struct hdmi_v13_conf hdmi_v13_confs[] = { { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, &hdmi_v13_conf_1080p50 }, { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, &hdmi_v13_conf_1080p60 }, }; /* HDMI Version 1.4 */ static const u8 hdmiphy_conf27_027[32] = { 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }; static const u8 hdmiphy_conf74_25[32] = { 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }; static const u8 hdmiphy_conf148_5[32] = { 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08, 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }; struct hdmi_tg_regs { u8 cmd; u8 h_fsz_l; u8 h_fsz_h; u8 hact_st_l; u8 hact_st_h; u8 hact_sz_l; u8 hact_sz_h; u8 v_fsz_l; u8 v_fsz_h; u8 vsync_l; u8 vsync_h; u8 vsync2_l; u8 vsync2_h; u8 vact_st_l; u8 vact_st_h; u8 vact_sz_l; u8 vact_sz_h; u8 field_chg_l; u8 field_chg_h; u8 vact_st2_l; u8 vact_st2_h; u8 vact_st3_l; u8 vact_st3_h; u8 vact_st4_l; u8 vact_st4_h; u8 vsync_top_hdmi_l; u8 vsync_top_hdmi_h; u8 vsync_bot_hdmi_l; u8 vsync_bot_hdmi_h; u8 field_top_hdmi_l; u8 field_top_hdmi_h; u8 field_bot_hdmi_l; u8 field_bot_hdmi_h; u8 tg_3d; }; struct hdmi_core_regs { u8 h_blank[2]; u8 v2_blank[2]; u8 v1_blank[2]; u8 v_line[2]; u8 h_line[2]; u8 hsync_pol[1]; u8 vsync_pol[1]; u8 int_pro_mode[1]; u8 v_blank_f0[2]; u8 v_blank_f1[2]; u8 h_sync_start[2]; u8 h_sync_end[2]; u8 v_sync_line_bef_2[2]; u8 v_sync_line_bef_1[2]; u8 v_sync_line_aft_2[2]; u8 v_sync_line_aft_1[2]; u8 v_sync_line_aft_pxl_2[2]; u8 v_sync_line_aft_pxl_1[2]; u8 v_blank_f2[2]; /* for 3D mode */ u8 v_blank_f3[2]; /* for 3D mode */ u8 v_blank_f4[2]; /* for 3D mode */ u8 v_blank_f5[2]; /* for 3D mode */ u8 v_sync_line_aft_3[2]; u8 v_sync_line_aft_4[2]; u8 v_sync_line_aft_5[2]; u8 v_sync_line_aft_6[2]; u8 v_sync_line_aft_pxl_3[2]; u8 v_sync_line_aft_pxl_4[2]; u8 v_sync_line_aft_pxl_5[2]; u8 v_sync_line_aft_pxl_6[2]; u8 vact_space_1[2]; u8 vact_space_2[2]; u8 vact_space_3[2]; u8 vact_space_4[2]; u8 vact_space_5[2]; u8 vact_space_6[2]; }; struct hdmi_preset_conf { struct hdmi_core_regs core; struct hdmi_tg_regs tg; }; struct hdmi_conf { int width; int height; int vrefresh; bool interlace; const u8 *hdmiphy_data; const struct hdmi_preset_conf *conf; }; static const struct hdmi_preset_conf hdmi_conf_480p60 = { .core = { .h_blank = {0x8a, 0x00}, .v2_blank = {0x0d, 0x02}, .v1_blank = {0x2d, 0x00}, .v_line = {0x0d, 0x02}, .h_line = {0x5a, 0x03}, .hsync_pol = {0x01}, .vsync_pol = {0x01}, .int_pro_mode = {0x00}, .v_blank_f0 = {0xff, 0xff}, .v_blank_f1 = {0xff, 0xff}, .h_sync_start = {0x0e, 0x00}, .h_sync_end = {0x4c, 0x00}, .v_sync_line_bef_2 = {0x0f, 0x00}, .v_sync_line_bef_1 = {0x09, 0x00}, .v_sync_line_aft_2 = {0xff, 0xff}, .v_sync_line_aft_1 = {0xff, 0xff}, .v_sync_line_aft_pxl_2 = {0xff, 0xff}, .v_sync_line_aft_pxl_1 = {0xff, 0xff}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, .vact_space_1 = {0xff, 0xff}, .vact_space_2 = {0xff, 0xff}, .vact_space_3 = {0xff, 0xff}, .vact_space_4 = {0xff, 0xff}, .vact_space_5 = {0xff, 0xff}, .vact_space_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x5a, 0x03, /* h_fsz */ 0x8a, 0x00, 0xd0, 0x02, /* hact */ 0x0d, 0x02, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x2d, 0x00, 0xe0, 0x01, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_preset_conf hdmi_conf_720p50 = { .core = { .h_blank = {0xbc, 0x02}, .v2_blank = {0xee, 0x02}, .v1_blank = {0x1e, 0x00}, .v_line = {0xee, 0x02}, .h_line = {0xbc, 0x07}, .hsync_pol = {0x00}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f0 = {0xff, 0xff}, .v_blank_f1 = {0xff, 0xff}, .h_sync_start = {0xb6, 0x01}, .h_sync_end = {0xde, 0x01}, .v_sync_line_bef_2 = {0x0a, 0x00}, .v_sync_line_bef_1 = {0x05, 0x00}, .v_sync_line_aft_2 = {0xff, 0xff}, .v_sync_line_aft_1 = {0xff, 0xff}, .v_sync_line_aft_pxl_2 = {0xff, 0xff}, .v_sync_line_aft_pxl_1 = {0xff, 0xff}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, .vact_space_1 = {0xff, 0xff}, .vact_space_2 = {0xff, 0xff}, .vact_space_3 = {0xff, 0xff}, .vact_space_4 = {0xff, 0xff}, .vact_space_5 = {0xff, 0xff}, .vact_space_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0xbc, 0x07, /* h_fsz */ 0xbc, 0x02, 0x00, 0x05, /* hact */ 0xee, 0x02, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x1e, 0x00, 0xd0, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_preset_conf hdmi_conf_720p60 = { .core = { .h_blank = {0x72, 0x01}, .v2_blank = {0xee, 0x02}, .v1_blank = {0x1e, 0x00}, .v_line = {0xee, 0x02}, .h_line = {0x72, 0x06}, .hsync_pol = {0x00}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f0 = {0xff, 0xff}, .v_blank_f1 = {0xff, 0xff}, .h_sync_start = {0x6c, 0x00}, .h_sync_end = {0x94, 0x00}, .v_sync_line_bef_2 = {0x0a, 0x00}, .v_sync_line_bef_1 = {0x05, 0x00}, .v_sync_line_aft_2 = {0xff, 0xff}, .v_sync_line_aft_1 = {0xff, 0xff}, .v_sync_line_aft_pxl_2 = {0xff, 0xff}, .v_sync_line_aft_pxl_1 = {0xff, 0xff}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, .vact_space_1 = {0xff, 0xff}, .vact_space_2 = {0xff, 0xff}, .vact_space_3 = {0xff, 0xff}, .vact_space_4 = {0xff, 0xff}, .vact_space_5 = {0xff, 0xff}, .vact_space_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x72, 0x06, /* h_fsz */ 0x72, 0x01, 0x00, 0x05, /* hact */ 0xee, 0x02, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x1e, 0x00, 0xd0, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_preset_conf hdmi_conf_1080i50 = { .core = { .h_blank = {0xd0, 0x02}, .v2_blank = {0x32, 0x02}, .v1_blank = {0x16, 0x00}, .v_line = {0x65, 0x04}, .h_line = {0x50, 0x0a}, .hsync_pol = {0x00}, .vsync_pol = {0x00}, .int_pro_mode = {0x01}, .v_blank_f0 = {0x49, 0x02}, .v_blank_f1 = {0x65, 0x04}, .h_sync_start = {0x0e, 0x02}, .h_sync_end = {0x3a, 0x02}, .v_sync_line_bef_2 = {0x07, 0x00}, .v_sync_line_bef_1 = {0x02, 0x00}, .v_sync_line_aft_2 = {0x39, 0x02}, .v_sync_line_aft_1 = {0x34, 0x02}, .v_sync_line_aft_pxl_2 = {0x38, 0x07}, .v_sync_line_aft_pxl_1 = {0x38, 0x07}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, .vact_space_1 = {0xff, 0xff}, .vact_space_2 = {0xff, 0xff}, .vact_space_3 = {0xff, 0xff}, .vact_space_4 = {0xff, 0xff}, .vact_space_5 = {0xff, 0xff}, .vact_space_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x50, 0x0a, /* h_fsz */ 0xd0, 0x02, 0x80, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x16, 0x00, 0x1c, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x49, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_preset_conf hdmi_conf_1080i60 = { .core = { .h_blank = {0x18, 0x01}, .v2_blank = {0x32, 0x02}, .v1_blank = {0x16, 0x00}, .v_line = {0x65, 0x04}, .h_line = {0x98, 0x08}, .hsync_pol = {0x00}, .vsync_pol = {0x00}, .int_pro_mode = {0x01}, .v_blank_f0 = {0x49, 0x02}, .v_blank_f1 = {0x65, 0x04}, .h_sync_start = {0x56, 0x00}, .h_sync_end = {0x82, 0x00}, .v_sync_line_bef_2 = {0x07, 0x00}, .v_sync_line_bef_1 = {0x02, 0x00}, .v_sync_line_aft_2 = {0x39, 0x02}, .v_sync_line_aft_1 = {0x34, 0x02}, .v_sync_line_aft_pxl_2 = {0xa4, 0x04}, .v_sync_line_aft_pxl_1 = {0xa4, 0x04}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, .vact_space_1 = {0xff, 0xff}, .vact_space_2 = {0xff, 0xff}, .vact_space_3 = {0xff, 0xff}, .vact_space_4 = {0xff, 0xff}, .vact_space_5 = {0xff, 0xff}, .vact_space_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x98, 0x08, /* h_fsz */ 0x18, 0x01, 0x80, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x16, 0x00, 0x1c, 0x02, /* vact */ 0x33, 0x02, /* field_chg */ 0x49, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_preset_conf hdmi_conf_1080p50 = { .core = { .h_blank = {0xd0, 0x02}, .v2_blank = {0x65, 0x04}, .v1_blank = {0x2d, 0x00}, .v_line = {0x65, 0x04}, .h_line = {0x50, 0x0a}, .hsync_pol = {0x00}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f0 = {0xff, 0xff}, .v_blank_f1 = {0xff, 0xff}, .h_sync_start = {0x0e, 0x02}, .h_sync_end = {0x3a, 0x02}, .v_sync_line_bef_2 = {0x09, 0x00}, .v_sync_line_bef_1 = {0x04, 0x00}, .v_sync_line_aft_2 = {0xff, 0xff}, .v_sync_line_aft_1 = {0xff, 0xff}, .v_sync_line_aft_pxl_2 = {0xff, 0xff}, .v_sync_line_aft_pxl_1 = {0xff, 0xff}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, .vact_space_1 = {0xff, 0xff}, .vact_space_2 = {0xff, 0xff}, .vact_space_3 = {0xff, 0xff}, .vact_space_4 = {0xff, 0xff}, .vact_space_5 = {0xff, 0xff}, .vact_space_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x50, 0x0a, /* h_fsz */ 0xd0, 0x02, 0x80, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x2d, 0x00, 0x38, 0x04, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_preset_conf hdmi_conf_1080p60 = { .core = { .h_blank = {0x18, 0x01}, .v2_blank = {0x65, 0x04}, .v1_blank = {0x2d, 0x00}, .v_line = {0x65, 0x04}, .h_line = {0x98, 0x08}, .hsync_pol = {0x00}, .vsync_pol = {0x00}, .int_pro_mode = {0x00}, .v_blank_f0 = {0xff, 0xff}, .v_blank_f1 = {0xff, 0xff}, .h_sync_start = {0x56, 0x00}, .h_sync_end = {0x82, 0x00}, .v_sync_line_bef_2 = {0x09, 0x00}, .v_sync_line_bef_1 = {0x04, 0x00}, .v_sync_line_aft_2 = {0xff, 0xff}, .v_sync_line_aft_1 = {0xff, 0xff}, .v_sync_line_aft_pxl_2 = {0xff, 0xff}, .v_sync_line_aft_pxl_1 = {0xff, 0xff}, .v_blank_f2 = {0xff, 0xff}, .v_blank_f3 = {0xff, 0xff}, .v_blank_f4 = {0xff, 0xff}, .v_blank_f5 = {0xff, 0xff}, .v_sync_line_aft_3 = {0xff, 0xff}, .v_sync_line_aft_4 = {0xff, 0xff}, .v_sync_line_aft_5 = {0xff, 0xff}, .v_sync_line_aft_6 = {0xff, 0xff}, .v_sync_line_aft_pxl_3 = {0xff, 0xff}, .v_sync_line_aft_pxl_4 = {0xff, 0xff}, .v_sync_line_aft_pxl_5 = {0xff, 0xff}, .v_sync_line_aft_pxl_6 = {0xff, 0xff}, /* other don't care */ }, .tg = { 0x00, /* cmd */ 0x98, 0x08, /* h_fsz */ 0x18, 0x01, 0x80, 0x07, /* hact */ 0x65, 0x04, /* v_fsz */ 0x01, 0x00, 0x33, 0x02, /* vsync */ 0x2d, 0x00, 0x38, 0x04, /* vact */ 0x33, 0x02, /* field_chg */ 0x48, 0x02, /* vact_st2 */ 0x00, 0x00, /* vact_st3 */ 0x00, 0x00, /* vact_st4 */ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ 0x01, 0x00, 0x33, 0x02, /* field top/bot */ 0x00, /* 3d FP */ }, }; static const struct hdmi_conf hdmi_confs[] = { { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, }; static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { return readl(hdata->regs + reg_id); } static inline void hdmi_reg_writeb(struct hdmi_context *hdata, u32 reg_id, u8 value) { writeb(value, hdata->regs + reg_id); } static inline void hdmi_reg_writemask(struct hdmi_context *hdata, u32 reg_id, u32 value, u32 mask) { u32 old = readl(hdata->regs + reg_id); value = (value & mask) | (old & ~mask); writel(value, hdata->regs + reg_id); } static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix) { #define DUMPREG(reg_id) \ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ readl(hdata->regs + reg_id)) DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); DUMPREG(HDMI_INTC_FLAG); DUMPREG(HDMI_INTC_CON); DUMPREG(HDMI_HPD_STATUS); DUMPREG(HDMI_V13_PHY_RSTOUT); DUMPREG(HDMI_V13_PHY_VPLL); DUMPREG(HDMI_V13_PHY_CMU); DUMPREG(HDMI_V13_CORE_RSTOUT); DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); DUMPREG(HDMI_CON_0); DUMPREG(HDMI_CON_1); DUMPREG(HDMI_CON_2); DUMPREG(HDMI_SYS_STATUS); DUMPREG(HDMI_V13_PHY_STATUS); DUMPREG(HDMI_STATUS_EN); DUMPREG(HDMI_HPD); DUMPREG(HDMI_MODE_SEL); DUMPREG(HDMI_V13_HPD_GEN); DUMPREG(HDMI_V13_DC_CONTROL); DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN); DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); DUMPREG(HDMI_H_BLANK_0); DUMPREG(HDMI_H_BLANK_1); DUMPREG(HDMI_V13_V_BLANK_0); DUMPREG(HDMI_V13_V_BLANK_1); DUMPREG(HDMI_V13_V_BLANK_2); DUMPREG(HDMI_V13_H_V_LINE_0); DUMPREG(HDMI_V13_H_V_LINE_1); DUMPREG(HDMI_V13_H_V_LINE_2); DUMPREG(HDMI_VSYNC_POL); DUMPREG(HDMI_INT_PRO_MODE); DUMPREG(HDMI_V13_V_BLANK_F_0); DUMPREG(HDMI_V13_V_BLANK_F_1); DUMPREG(HDMI_V13_V_BLANK_F_2); DUMPREG(HDMI_V13_H_SYNC_GEN_0); DUMPREG(HDMI_V13_H_SYNC_GEN_1); DUMPREG(HDMI_V13_H_SYNC_GEN_2); DUMPREG(HDMI_V13_V_SYNC_GEN_1_0); DUMPREG(HDMI_V13_V_SYNC_GEN_1_1); DUMPREG(HDMI_V13_V_SYNC_GEN_1_2); DUMPREG(HDMI_V13_V_SYNC_GEN_2_0); DUMPREG(HDMI_V13_V_SYNC_GEN_2_1); DUMPREG(HDMI_V13_V_SYNC_GEN_2_2); DUMPREG(HDMI_V13_V_SYNC_GEN_3_0); DUMPREG(HDMI_V13_V_SYNC_GEN_3_1); DUMPREG(HDMI_V13_V_SYNC_GEN_3_2); DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); DUMPREG(HDMI_TG_CMD); DUMPREG(HDMI_TG_H_FSZ_L); DUMPREG(HDMI_TG_H_FSZ_H); DUMPREG(HDMI_TG_HACT_ST_L); DUMPREG(HDMI_TG_HACT_ST_H); DUMPREG(HDMI_TG_HACT_SZ_L); DUMPREG(HDMI_TG_HACT_SZ_H); DUMPREG(HDMI_TG_V_FSZ_L); DUMPREG(HDMI_TG_V_FSZ_H); DUMPREG(HDMI_TG_VSYNC_L); DUMPREG(HDMI_TG_VSYNC_H); DUMPREG(HDMI_TG_VSYNC2_L); DUMPREG(HDMI_TG_VSYNC2_H); DUMPREG(HDMI_TG_VACT_ST_L); DUMPREG(HDMI_TG_VACT_ST_H); DUMPREG(HDMI_TG_VACT_SZ_L); DUMPREG(HDMI_TG_VACT_SZ_H); DUMPREG(HDMI_TG_FIELD_CHG_L); DUMPREG(HDMI_TG_FIELD_CHG_H); DUMPREG(HDMI_TG_VACT_ST2_L); DUMPREG(HDMI_TG_VACT_ST2_H); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); #undef DUMPREG } static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) { int i; #define DUMPREG(reg_id) \ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ readl(hdata->regs + reg_id)) DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); DUMPREG(HDMI_INTC_CON); DUMPREG(HDMI_INTC_FLAG); DUMPREG(HDMI_HPD_STATUS); DUMPREG(HDMI_INTC_CON_1); DUMPREG(HDMI_INTC_FLAG_1); DUMPREG(HDMI_PHY_STATUS_0); DUMPREG(HDMI_PHY_STATUS_PLL); DUMPREG(HDMI_PHY_CON_0); DUMPREG(HDMI_PHY_RSTOUT); DUMPREG(HDMI_PHY_VPLL); DUMPREG(HDMI_PHY_CMU); DUMPREG(HDMI_CORE_RSTOUT); DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); DUMPREG(HDMI_CON_0); DUMPREG(HDMI_CON_1); DUMPREG(HDMI_CON_2); DUMPREG(HDMI_SYS_STATUS); DUMPREG(HDMI_PHY_STATUS_0); DUMPREG(HDMI_STATUS_EN); DUMPREG(HDMI_HPD); DUMPREG(HDMI_MODE_SEL); DUMPREG(HDMI_ENC_EN); DUMPREG(HDMI_DC_CONTROL); DUMPREG(HDMI_VIDEO_PATTERN_GEN); DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); DUMPREG(HDMI_H_BLANK_0); DUMPREG(HDMI_H_BLANK_1); DUMPREG(HDMI_V2_BLANK_0); DUMPREG(HDMI_V2_BLANK_1); DUMPREG(HDMI_V1_BLANK_0); DUMPREG(HDMI_V1_BLANK_1); DUMPREG(HDMI_V_LINE_0); DUMPREG(HDMI_V_LINE_1); DUMPREG(HDMI_H_LINE_0); DUMPREG(HDMI_H_LINE_1); DUMPREG(HDMI_HSYNC_POL); DUMPREG(HDMI_VSYNC_POL); DUMPREG(HDMI_INT_PRO_MODE); DUMPREG(HDMI_V_BLANK_F0_0); DUMPREG(HDMI_V_BLANK_F0_1); DUMPREG(HDMI_V_BLANK_F1_0); DUMPREG(HDMI_V_BLANK_F1_1); DUMPREG(HDMI_H_SYNC_START_0); DUMPREG(HDMI_H_SYNC_START_1); DUMPREG(HDMI_H_SYNC_END_0); DUMPREG(HDMI_H_SYNC_END_1); DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0); DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1); DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0); DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1); DUMPREG(HDMI_V_BLANK_F2_0); DUMPREG(HDMI_V_BLANK_F2_1); DUMPREG(HDMI_V_BLANK_F3_0); DUMPREG(HDMI_V_BLANK_F3_1); DUMPREG(HDMI_V_BLANK_F4_0); DUMPREG(HDMI_V_BLANK_F4_1); DUMPREG(HDMI_V_BLANK_F5_0); DUMPREG(HDMI_V_BLANK_F5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1); DUMPREG(HDMI_VACT_SPACE_1_0); DUMPREG(HDMI_VACT_SPACE_1_1); DUMPREG(HDMI_VACT_SPACE_2_0); DUMPREG(HDMI_VACT_SPACE_2_1); DUMPREG(HDMI_VACT_SPACE_3_0); DUMPREG(HDMI_VACT_SPACE_3_1); DUMPREG(HDMI_VACT_SPACE_4_0); DUMPREG(HDMI_VACT_SPACE_4_1); DUMPREG(HDMI_VACT_SPACE_5_0); DUMPREG(HDMI_VACT_SPACE_5_1); DUMPREG(HDMI_VACT_SPACE_6_0); DUMPREG(HDMI_VACT_SPACE_6_1); DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); DUMPREG(HDMI_TG_CMD); DUMPREG(HDMI_TG_H_FSZ_L); DUMPREG(HDMI_TG_H_FSZ_H); DUMPREG(HDMI_TG_HACT_ST_L); DUMPREG(HDMI_TG_HACT_ST_H); DUMPREG(HDMI_TG_HACT_SZ_L); DUMPREG(HDMI_TG_HACT_SZ_H); DUMPREG(HDMI_TG_V_FSZ_L); DUMPREG(HDMI_TG_V_FSZ_H); DUMPREG(HDMI_TG_VSYNC_L); DUMPREG(HDMI_TG_VSYNC_H); DUMPREG(HDMI_TG_VSYNC2_L); DUMPREG(HDMI_TG_VSYNC2_H); DUMPREG(HDMI_TG_VACT_ST_L); DUMPREG(HDMI_TG_VACT_ST_H); DUMPREG(HDMI_TG_VACT_SZ_L); DUMPREG(HDMI_TG_VACT_SZ_H); DUMPREG(HDMI_TG_FIELD_CHG_L); DUMPREG(HDMI_TG_FIELD_CHG_H); DUMPREG(HDMI_TG_VACT_ST2_L); DUMPREG(HDMI_TG_VACT_ST2_H); DUMPREG(HDMI_TG_VACT_ST3_L); DUMPREG(HDMI_TG_VACT_ST3_H); DUMPREG(HDMI_TG_VACT_ST4_L); DUMPREG(HDMI_TG_VACT_ST4_H); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); DUMPREG(HDMI_TG_3D); DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix); DUMPREG(HDMI_AVI_CON); DUMPREG(HDMI_AVI_HEADER0); DUMPREG(HDMI_AVI_HEADER1); DUMPREG(HDMI_AVI_HEADER2); DUMPREG(HDMI_AVI_CHECK_SUM); DUMPREG(HDMI_VSI_CON); DUMPREG(HDMI_VSI_HEADER0); DUMPREG(HDMI_VSI_HEADER1); DUMPREG(HDMI_VSI_HEADER2); for (i = 0; i < 7; ++i) DUMPREG(HDMI_VSI_DATA(i)); #undef DUMPREG } static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) { if (hdata->is_v13) hdmi_v13_regs_dump(hdata, prefix); else hdmi_v14_regs_dump(hdata, prefix); } static int hdmi_v13_conf_index(struct drm_display_mode *mode) { int i; for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i) if (hdmi_v13_confs[i].width == mode->hdisplay && hdmi_v13_confs[i].height == mode->vdisplay && hdmi_v13_confs[i].vrefresh == mode->vrefresh && hdmi_v13_confs[i].interlace == ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : false)) return i; return -EINVAL; } static int hdmi_v14_conf_index(struct drm_display_mode *mode) { int i; for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i) if (hdmi_confs[i].width == mode->hdisplay && hdmi_confs[i].height == mode->vdisplay && hdmi_confs[i].vrefresh == mode->vrefresh && hdmi_confs[i].interlace == ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : false)) return i; return -EINVAL; } static int hdmi_conf_index(struct hdmi_context *hdata, struct drm_display_mode *mode) { if (hdata->is_v13) return hdmi_v13_conf_index(mode); return hdmi_v14_conf_index(mode); } static bool hdmi_is_connected(void *ctx) { struct hdmi_context *hdata = ctx; u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS); if (val) return true; return false; } static int hdmi_get_edid(void *ctx, struct drm_connector *connector, u8 *edid, int len) { struct edid *raw_edid; struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!hdata->ddc_port) return -ENODEV; raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); if (raw_edid) { memcpy(edid, raw_edid, min((1 + raw_edid->extensions) * EDID_LENGTH, len)); DRM_DEBUG_KMS("width[%d] x height[%d]\n", raw_edid->width_cm, raw_edid->height_cm); } else { return -ENODEV; } return 0; } static int hdmi_v13_check_timing(struct fb_videomode *check_timing) { int i; DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n", check_timing->xres, check_timing->yres, check_timing->refresh, (check_timing->vmode & FB_VMODE_INTERLACED) ? true : false); for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i) if (hdmi_v13_confs[i].width == check_timing->xres && hdmi_v13_confs[i].height == check_timing->yres && hdmi_v13_confs[i].vrefresh == check_timing->refresh && hdmi_v13_confs[i].interlace == ((check_timing->vmode & FB_VMODE_INTERLACED) ? true : false)) return 0; /* TODO */ return -EINVAL; } static int hdmi_v14_check_timing(struct fb_videomode *check_timing) { int i; DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n", check_timing->xres, check_timing->yres, check_timing->refresh, (check_timing->vmode & FB_VMODE_INTERLACED) ? true : false); for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++) if (hdmi_confs[i].width == check_timing->xres && hdmi_confs[i].height == check_timing->yres && hdmi_confs[i].vrefresh == check_timing->refresh && hdmi_confs[i].interlace == ((check_timing->vmode & FB_VMODE_INTERLACED) ? true : false)) return 0; /* TODO */ return -EINVAL; } static int hdmi_check_timing(void *ctx, void *timing) { struct hdmi_context *hdata = ctx; struct fb_videomode *check_timing = timing; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres, check_timing->yres, check_timing->refresh, check_timing->vmode); if (hdata->is_v13) return hdmi_v13_check_timing(check_timing); else return hdmi_v14_check_timing(check_timing); } static int hdmi_display_power_on(void *ctx, int mode) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); switch (mode) { case DRM_MODE_DPMS_ON: DRM_DEBUG_KMS("hdmi [on]\n"); break; case DRM_MODE_DPMS_STANDBY: break; case DRM_MODE_DPMS_SUSPEND: break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("hdmi [off]\n"); break; default: break; } return 0; } static void hdmi_set_acr(u32 freq, u8 *acr) { u32 n, cts; switch (freq) { case 32000: n = 4096; cts = 27000; break; case 44100: n = 6272; cts = 30000; break; case 88200: n = 12544; cts = 30000; break; case 176400: n = 25088; cts = 30000; break; case 48000: n = 6144; cts = 27000; break; case 96000: n = 12288; cts = 27000; break; case 192000: n = 24576; cts = 27000; break; default: n = 0; cts = 0; break; } acr[1] = cts >> 16; acr[2] = cts >> 8 & 0xff; acr[3] = cts & 0xff; acr[4] = n >> 16; acr[5] = n >> 8 & 0xff; acr[6] = n & 0xff; } static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) { hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]); hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]); hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); if (hdata->is_v13) hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); else hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); } static void hdmi_audio_init(struct hdmi_context *hdata) { u32 sample_rate, bits_per_sample, frame_size_code; u32 data_num, bit_ch, sample_frq; u32 val; u8 acr[7]; sample_rate = 44100; bits_per_sample = 16; frame_size_code = 0; switch (bits_per_sample) { case 20: data_num = 2; bit_ch = 1; break; case 24: data_num = 3; bit_ch = 1; break; default: data_num = 1; bit_ch = 0; break; } hdmi_set_acr(sample_rate, acr); hdmi_reg_acr(hdata, acr); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE | HDMI_I2S_MUX_ENABLE); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN); sample_frq = (sample_rate == 44100) ? 0 : (sample_rate == 48000) ? 2 : (sample_rate == 32000) ? 3 : (sample_rate == 96000) ? 0xa : 0x0; hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS); hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN); val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01; hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val); /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) | HDMI_I2S_SEL_LRCK(6)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) | HDMI_I2S_SEL_SDATA2(4)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) | HDMI_I2S_SEL_SDATA2(2)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); /* I2S_CON_1 & 2 */ hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE | HDMI_I2S_L_CH_LOW_POL); hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE | HDMI_I2S_SET_BIT_CH(bit_ch) | HDMI_I2S_SET_SDATA_BIT(data_num) | HDMI_I2S_BASIC_FORMAT); /* Configure register related to CUV information */ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0 | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH | HDMI_I2S_COPYRIGHT | HDMI_I2S_LINEAR_PCM | HDMI_I2S_CONSUMER_FORMAT); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0)); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2 | HDMI_I2S_SET_SMP_FREQ(sample_frq)); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4, HDMI_I2S_ORG_SMP_FREQ_44_1 | HDMI_I2S_WORD_LEN_MAX24_24BITS | HDMI_I2S_WORD_LEN_MAX_24BITS); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD); } static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) { u32 mod; mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); if (mod & HDMI_DVI_MODE_EN) return; hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ? HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); } static void hdmi_conf_reset(struct hdmi_context *hdata) { u32 reg; /* disable hpd handle for drm */ hdata->hpd_handle = false; if (hdata->is_v13) reg = HDMI_V13_CORE_RSTOUT; else reg = HDMI_CORE_RSTOUT; /* resetting HDMI core */ hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT); mdelay(10); hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); mdelay(10); /* enable hpd handle for drm */ hdata->hpd_handle = true; } static void hdmi_conf_init(struct hdmi_context *hdata) { /* disable hpd handle for drm */ hdata->hpd_handle = false; /* enable HPD interrupts */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); mdelay(10); hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); /* choose HDMI mode */ hdmi_reg_writemask(hdata, HDMI_MODE_SEL, HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); /* disable bluescreen */ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); if (hdata->is_v13) { /* choose bluescreen (fecal) color */ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56); /* enable AVI packet every vsync, fixes purple line problem */ hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02); /* force RGB, look to CEA-861-D, table 7 for more detail */ hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5); hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { /* enable AVI packet every vsync, fixes purple line problem */ hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); } /* enable hpd handle for drm */ hdata->hpd_handle = true; } static void hdmi_v13_timing_apply(struct hdmi_context *hdata) { const struct hdmi_v13_preset_conf *conf = hdmi_v13_confs[hdata->cur_conf].conf; const struct hdmi_v13_core_regs *core = &conf->core; const struct hdmi_v13_tg_regs *tg = &conf->tg; int tries; /* setting core registers */ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); /* Timing generator registers */ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); if (val & HDMI_PHY_STATUS_READY) break; mdelay(1); } /* steady state not achieved */ if (tries == 0) { DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); hdmi_regs_dump(hdata, "timing apply"); } clk_disable(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); clk_enable(hdata->res.sclk_hdmi); /* enable HDMI and timing generator */ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); if (core->int_pro_mode[0]) hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN | HDMI_FIELD_EN); else hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); } static void hdmi_v14_timing_apply(struct hdmi_context *hdata) { const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf; const struct hdmi_core_regs *core = &conf->core; const struct hdmi_tg_regs *tg = &conf->tg; int tries; /* setting core registers */ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, core->v_sync_line_bef_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, core->v_sync_line_bef_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, core->v_sync_line_bef_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, core->v_sync_line_bef_1[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, core->v_sync_line_aft_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, core->v_sync_line_aft_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, core->v_sync_line_aft_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, core->v_sync_line_aft_1[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, core->v_sync_line_aft_pxl_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, core->v_sync_line_aft_pxl_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, core->v_sync_line_aft_pxl_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, core->v_sync_line_aft_pxl_1[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, core->v_sync_line_aft_3[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, core->v_sync_line_aft_3[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, core->v_sync_line_aft_4[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, core->v_sync_line_aft_4[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, core->v_sync_line_aft_5[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, core->v_sync_line_aft_5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, core->v_sync_line_aft_6[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, core->v_sync_line_aft_6[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, core->v_sync_line_aft_pxl_3[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, core->v_sync_line_aft_pxl_3[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, core->v_sync_line_aft_pxl_4[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, core->v_sync_line_aft_pxl_4[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, core->v_sync_line_aft_pxl_5[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, core->v_sync_line_aft_pxl_5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, core->v_sync_line_aft_pxl_6[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, core->v_sync_line_aft_pxl_6[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); /* Timing generator registers */ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); if (val & HDMI_PHY_STATUS_READY) break; mdelay(1); } /* steady state not achieved */ if (tries == 0) { DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); hdmi_regs_dump(hdata, "timing apply"); } clk_disable(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); clk_enable(hdata->res.sclk_hdmi); /* enable HDMI and timing generator */ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); if (core->int_pro_mode[0]) hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN | HDMI_FIELD_EN); else hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); } static void hdmi_timing_apply(struct hdmi_context *hdata) { if (hdata->is_v13) hdmi_v13_timing_apply(hdata); else hdmi_v14_timing_apply(hdata); } static void hdmiphy_conf_reset(struct hdmi_context *hdata) { u8 buffer[2]; u32 reg; clk_disable(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel); clk_enable(hdata->res.sclk_hdmi); /* operation mode */ buffer[0] = 0x1f; buffer[1] = 0x00; if (hdata->hdmiphy_port) i2c_master_send(hdata->hdmiphy_port, buffer, 2); if (hdata->is_v13) reg = HDMI_V13_PHY_RSTOUT; else reg = HDMI_PHY_RSTOUT; /* reset hdmiphy */ hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); mdelay(10); hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); mdelay(10); } static void hdmiphy_conf_apply(struct hdmi_context *hdata) { const u8 *hdmiphy_data; u8 buffer[32]; u8 operation[2]; u8 read_buffer[32] = {0, }; int ret; int i; if (!hdata->hdmiphy_port) { DRM_ERROR("hdmiphy is not attached\n"); return; } /* pixel clock */ if (hdata->is_v13) hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; else hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data; memcpy(buffer, hdmiphy_data, 32); ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); if (ret != 32) { DRM_ERROR("failed to configure HDMIPHY via I2C\n"); return; } mdelay(10); /* operation mode */ operation[0] = 0x1f; operation[1] = 0x80; ret = i2c_master_send(hdata->hdmiphy_port, operation, 2); if (ret != 2) { DRM_ERROR("failed to enable hdmiphy\n"); return; } ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32); if (ret < 0) { DRM_ERROR("failed to read hdmiphy config\n"); return; } for (i = 0; i < ret; i++) DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - " "recv [0x%02x]\n", i, buffer[i], read_buffer[i]); } static void hdmi_conf_apply(struct hdmi_context *hdata) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdmiphy_conf_reset(hdata); hdmiphy_conf_apply(hdata); hdmi_conf_reset(hdata); hdmi_conf_init(hdata); hdmi_audio_init(hdata); /* setting core registers */ hdmi_timing_apply(hdata); hdmi_audio_control(hdata, true); hdmi_regs_dump(hdata, "start"); } static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_display_mode *m; struct hdmi_context *hdata = ctx; int index; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); drm_mode_set_crtcinfo(adjusted_mode, 0); if (hdata->is_v13) index = hdmi_v13_conf_index(adjusted_mode); else index = hdmi_v14_conf_index(adjusted_mode); /* just return if user desired mode exists. */ if (index >= 0) return; /* * otherwise, find the most suitable mode among modes and change it * to adjusted_mode. */ list_for_each_entry(m, &connector->modes, head) { if (hdata->is_v13) index = hdmi_v13_conf_index(m); else index = hdmi_v14_conf_index(m); if (index >= 0) { DRM_INFO("desired mode doesn't exist so\n"); DRM_INFO("use the most suitable mode among modes.\n"); memcpy(adjusted_mode, m, sizeof(*m)); break; } } } static void hdmi_mode_set(void *ctx, void *mode) { struct hdmi_context *hdata = ctx; int conf_idx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); conf_idx = hdmi_conf_index(hdata, mode); if (conf_idx >= 0) hdata->cur_conf = conf_idx; else DRM_DEBUG_KMS("not supported mode\n"); } static void hdmi_get_max_resol(void *ctx, unsigned int *width, unsigned int *height) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); *width = MAX_WIDTH; *height = MAX_HEIGHT; } static void hdmi_commit(void *ctx) { struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdmi_conf_apply(hdata); hdata->enabled = true; } static void hdmi_disable(void *ctx) { struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (hdata->enabled) { hdmi_audio_control(hdata, false); hdmiphy_conf_reset(hdata); hdmi_conf_reset(hdata); } } static struct exynos_hdmi_ops hdmi_ops = { /* display */ .is_connected = hdmi_is_connected, .get_edid = hdmi_get_edid, .check_timing = hdmi_check_timing, .power_on = hdmi_display_power_on, /* manager */ .mode_fixup = hdmi_mode_fixup, .mode_set = hdmi_mode_set, .get_max_resol = hdmi_get_max_resol, .commit = hdmi_commit, .disable = hdmi_disable, }; /* * Handle hotplug events outside the interrupt handler proper. */ static void hdmi_hotplug_func(struct work_struct *work) { struct hdmi_context *hdata = container_of(work, struct hdmi_context, hotplug_work); struct exynos_drm_hdmi_context *ctx = (struct exynos_drm_hdmi_context *)hdata->parent_ctx; drm_helper_hpd_irq_event(ctx->drm_dev); } static irqreturn_t hdmi_irq_handler(int irq, void *arg) { struct exynos_drm_hdmi_context *ctx = arg; struct hdmi_context *hdata = ctx->ctx; u32 intc_flag; intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG); /* clearing flags for HPD plug/unplug */ if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) { DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle); hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, HDMI_INTC_FLAG_HPD_UNPLUG); } if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) { DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle); hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, HDMI_INTC_FLAG_HPD_PLUG); } if (ctx->drm_dev && hdata->hpd_handle) queue_work(hdata->wq, &hdata->hotplug_work); return IRQ_HANDLED; } static int __devinit hdmi_resources_init(struct hdmi_context *hdata) { struct device *dev = hdata->dev; struct hdmi_resources *res = &hdata->res; static char *supply[] = { "hdmi-en", "vdd", "vdd_osc", "vdd_pll", }; int i, ret; DRM_DEBUG_KMS("HDMI resource init\n"); memset(res, 0, sizeof *res); /* get clocks, power */ res->hdmi = clk_get(dev, "hdmi"); if (IS_ERR_OR_NULL(res->hdmi)) { DRM_ERROR("failed to get clock 'hdmi'\n"); goto fail; } res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR_OR_NULL(res->sclk_hdmi)) { DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); goto fail; } res->sclk_pixel = clk_get(dev, "sclk_pixel"); if (IS_ERR_OR_NULL(res->sclk_pixel)) { DRM_ERROR("failed to get clock 'sclk_pixel'\n"); goto fail; } res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); goto fail; } res->hdmiphy = clk_get(dev, "hdmiphy"); if (IS_ERR_OR_NULL(res->hdmiphy)) { DRM_ERROR("failed to get clock 'hdmiphy'\n"); goto fail; } clk_set_parent(res->sclk_hdmi, res->sclk_pixel); res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * sizeof res->regul_bulk[0], GFP_KERNEL); if (!res->regul_bulk) { DRM_ERROR("failed to get memory for regulators\n"); goto fail; } for (i = 0; i < ARRAY_SIZE(supply); ++i) { res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; } ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); if (ret) { DRM_ERROR("failed to get regulators\n"); goto fail; } res->regul_count = ARRAY_SIZE(supply); return 0; fail: DRM_ERROR("HDMI resource init - failed\n"); return -ENODEV; } static int hdmi_resources_cleanup(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; regulator_bulk_free(res->regul_count, res->regul_bulk); /* kfree is NULL-safe */ kfree(res->regul_bulk); if (!IS_ERR_OR_NULL(res->hdmiphy)) clk_put(res->hdmiphy); if (!IS_ERR_OR_NULL(res->sclk_hdmiphy)) clk_put(res->sclk_hdmiphy); if (!IS_ERR_OR_NULL(res->sclk_pixel)) clk_put(res->sclk_pixel); if (!IS_ERR_OR_NULL(res->sclk_hdmi)) clk_put(res->sclk_hdmi); if (!IS_ERR_OR_NULL(res->hdmi)) clk_put(res->hdmi); memset(res, 0, sizeof *res); return 0; } static void hdmi_resource_poweron(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); /* turn HDMI power on */ regulator_bulk_enable(res->regul_count, res->regul_bulk); /* power-on hdmi physical interface */ clk_enable(res->hdmiphy); /* turn clocks on */ clk_enable(res->hdmi); clk_enable(res->sclk_hdmi); hdmiphy_conf_reset(hdata); hdmi_conf_reset(hdata); hdmi_conf_init(hdata); hdmi_audio_init(hdata); } static void hdmi_resource_poweroff(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); /* turn clocks off */ clk_disable(res->sclk_hdmi); clk_disable(res->hdmi); /* power-off hdmiphy */ clk_disable(res->hdmiphy); /* turn HDMI power off */ regulator_bulk_disable(res->regul_count, res->regul_bulk); } static int hdmi_runtime_suspend(struct device *dev) { struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); DRM_DEBUG_KMS("%s\n", __func__); hdmi_resource_poweroff(ctx->ctx); return 0; } static int hdmi_runtime_resume(struct device *dev) { struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); DRM_DEBUG_KMS("%s\n", __func__); hdmi_resource_poweron(ctx->ctx); return 0; } static const struct dev_pm_ops hdmi_pm_ops = { .runtime_suspend = hdmi_runtime_suspend, .runtime_resume = hdmi_runtime_resume, }; static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; void hdmi_attach_ddc_client(struct i2c_client *ddc) { if (ddc) hdmi_ddc = ddc; } void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy) { if (hdmiphy) hdmi_hdmiphy = hdmiphy; } static int __devinit hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_drm_hdmi_context *drm_hdmi_ctx; struct hdmi_context *hdata; struct exynos_drm_hdmi_pdata *pdata; struct resource *res; int ret; DRM_DEBUG_KMS("[%d]\n", __LINE__); pdata = pdev->dev.platform_data; if (!pdata) { DRM_ERROR("no platform data specified\n"); return -EINVAL; } drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL); if (!hdata) { DRM_ERROR("out of memory\n"); kfree(drm_hdmi_ctx); return -ENOMEM; } drm_hdmi_ctx->ctx = (void *)hdata; hdata->parent_ctx = (void *)drm_hdmi_ctx; platform_set_drvdata(pdev, drm_hdmi_ctx); hdata->is_v13 = pdata->is_v13; hdata->default_win = pdata->default_win; hdata->default_timing = &pdata->timing; hdata->default_bpp = pdata->bpp; hdata->dev = dev; ret = hdmi_resources_init(hdata); if (ret) { ret = -EINVAL; goto err_data; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { DRM_ERROR("failed to find registers\n"); ret = -ENOENT; goto err_resource; } hdata->regs_res = request_mem_region(res->start, resource_size(res), dev_name(dev)); if (!hdata->regs_res) { DRM_ERROR("failed to claim register region\n"); ret = -ENOENT; goto err_resource; } hdata->regs = ioremap(res->start, resource_size(res)); if (!hdata->regs) { DRM_ERROR("failed to map registers\n"); ret = -ENXIO; goto err_req_region; } /* DDC i2c driver */ if (i2c_add_driver(&ddc_driver)) { DRM_ERROR("failed to register ddc i2c driver\n"); ret = -ENOENT; goto err_iomap; } hdata->ddc_port = hdmi_ddc; /* hdmiphy i2c driver */ if (i2c_add_driver(&hdmiphy_driver)) { DRM_ERROR("failed to register hdmiphy i2c driver\n"); ret = -ENOENT; goto err_ddc; } hdata->hdmiphy_port = hdmi_hdmiphy; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { DRM_ERROR("get interrupt resource failed.\n"); ret = -ENXIO; goto err_hdmiphy; } /* create workqueue and hotplug work */ hdata->wq = alloc_workqueue("exynos-drm-hdmi", WQ_UNBOUND | WQ_NON_REENTRANT, 1); if (hdata->wq == NULL) { DRM_ERROR("Failed to create workqueue.\n"); ret = -ENOMEM; goto err_hdmiphy; } INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func); /* register hpd interrupt */ ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi", drm_hdmi_ctx); if (ret) { DRM_ERROR("request interrupt failed.\n"); goto err_workqueue; } hdata->irq = res->start; /* register specific callbacks to common hdmi. */ exynos_hdmi_ops_register(&hdmi_ops); hdmi_resource_poweron(hdata); return 0; err_workqueue: destroy_workqueue(hdata->wq); err_hdmiphy: i2c_del_driver(&hdmiphy_driver); err_ddc: i2c_del_driver(&ddc_driver); err_iomap: iounmap(hdata->regs); err_req_region: release_mem_region(hdata->regs_res->start, resource_size(hdata->regs_res)); err_resource: hdmi_resources_cleanup(hdata); err_data: kfree(hdata); kfree(drm_hdmi_ctx); return ret; } static int __devexit hdmi_remove(struct platform_device *pdev) { struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdmi_resource_poweroff(hdata); disable_irq(hdata->irq); free_irq(hdata->irq, hdata); cancel_work_sync(&hdata->hotplug_work); destroy_workqueue(hdata->wq); hdmi_resources_cleanup(hdata); iounmap(hdata->regs); release_mem_region(hdata->regs_res->start, resource_size(hdata->regs_res)); /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); /* DDC i2c driver */ i2c_del_driver(&ddc_driver); kfree(hdata); return 0; } struct platform_driver hdmi_driver = { .probe = hdmi_probe, .remove = __devexit_p(hdmi_remove), .driver = { .name = "exynos4-hdmi", .owner = THIS_MODULE, .pm = &hdmi_pm_ops, }, };
gpl-2.0
IonKiwi/android_kernel_samsung_kccat6
drivers/net/ethernet/cisco/enic/enic_res.c
7860
9562
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/netdevice.h> #include "wq_enet_desc.h" #include "rq_enet_desc.h" #include "cq_enet_desc.h" #include "vnic_resource.h" #include "vnic_enet.h" #include "vnic_dev.h" #include "vnic_wq.h" #include "vnic_rq.h" #include "vnic_cq.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_nic.h" #include "vnic_rss.h" #include "enic_res.h" #include "enic.h" int enic_get_vnic_config(struct enic *enic) { struct vnic_enet_config *c = &enic->config; int err; err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); if (err) { dev_err(enic_get_dev(enic), "Error getting MAC addr, %d\n", err); return err; } #define GET_CONFIG(m) \ do { \ err = vnic_dev_spec(enic->vdev, \ offsetof(struct vnic_enet_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ dev_err(enic_get_dev(enic), \ "Error getting %s, %d\n", #m, err); \ return err; \ } \ } while (0) GET_CONFIG(flags); GET_CONFIG(wq_desc_count); GET_CONFIG(rq_desc_count); GET_CONFIG(mtu); GET_CONFIG(intr_timer_type); GET_CONFIG(intr_mode); GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count)); c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c->rq_desc_count = min_t(u32, ENIC_MAX_RQ_DESCS, max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ if (c->mtu == 0) c->mtu = 1500; c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu)); c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec " "loopback tag 0x%04x\n", ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", ENIC_SETTING(enic, TSO) ? "yes" : "no", ENIC_SETTING(enic, LRO) ? "yes" : "no", ENIC_SETTING(enic, RSS) ? "yes" : "no", c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : c->intr_mode == VENET_INTR_MODE_ANY ? "any" : "unknown", c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : "unknown", c->intr_timer_usec, c->loop_tag); return 0; } int enic_add_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); if (err) dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); return err; } int enic_del_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); if (err) dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); return err; } int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en) { u64 a0, a1; u32 nic_cfg; int wait = 1000; vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); a0 = nic_cfg; a1 = 0; return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); } int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) { u64 a0 = (u64)key_pa, a1 = len; int wait = 1000; return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait); } int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len) { u64 a0 = (u64)cpu_pa, a1 = len; int wait = 1000; return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait); } void enic_free_vnic_resources(struct enic *enic) { unsigned int i; for (i = 0; i < enic->wq_count; i++) vnic_wq_free(&enic->wq[i]); for (i = 0; i < enic->rq_count; i++) vnic_rq_free(&enic->rq[i]); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) vnic_intr_free(&enic->intr[i]); } void enic_get_res_counts(struct enic *enic) { enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); enic->intr_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL); dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count); } void enic_init_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; unsigned int mask_on_assertion; unsigned int interrupt_offset; unsigned int error_interrupt_enable; unsigned int error_interrupt_offset; unsigned int cq_index; unsigned int i; intr_mode = vnic_dev_get_intr_mode(enic->vdev); /* Init RQ/WQ resources. * * RQ[0 - n-1] point to CQ[0 - n-1] * WQ[0 - m-1] point to CQ[n - n+m-1] * * Error interrupt is not enabled for MSI. */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; error_interrupt_offset = enic->intr_count - 2; break; default: error_interrupt_enable = 0; error_interrupt_offset = 0; break; } for (i = 0; i < enic->rq_count; i++) { cq_index = i; vnic_rq_init(&enic->rq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < enic->wq_count; i++) { cq_index = enic->rq_count + i; vnic_wq_init(&enic->wq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } /* Init CQ resources * * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X */ for (i = 0; i < enic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: interrupt_offset = i; break; default: interrupt_offset = 0; break; } vnic_cq_init(&enic->cq[i], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 1 /* interrupt_enable */, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, interrupt_offset, 0 /* cq_message_addr */); } /* Init INTR resources * * mask_on_assertion is not used for INTx due to the level- * triggered nature of INTx */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSI: case VNIC_DEV_INTR_MODE_MSIX: mask_on_assertion = 1; break; default: mask_on_assertion = 0; break; } for (i = 0; i < enic->intr_count; i++) { vnic_intr_init(&enic->intr[i], enic->config.intr_timer_usec, enic->config.intr_timer_type, mask_on_assertion); } } int enic_alloc_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; unsigned int i; int err; intr_mode = vnic_dev_get_intr_mode(enic->vdev); dev_info(enic_get_dev(enic), "vNIC resources used: " "wq %d rq %d cq %d intr %d intr mode %s\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count, intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); /* Allocate queue resources */ for (i = 0; i < enic->wq_count; i++) { err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->rq_count; i++) { err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->cq_count; i++) { if (i < enic->rq_count) err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.rq_desc_count, sizeof(struct cq_enet_rq_desc)); else err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.wq_desc_count, sizeof(struct cq_enet_wq_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->intr_count; i++) { err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); if (err) goto err_out_cleanup; } /* Hook remaining resource */ enic->legacy_pba = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_PBA_LEGACY, 0); if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { dev_err(enic_get_dev(enic), "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } return 0; err_out_cleanup: enic_free_vnic_resources(enic); return err; }
gpl-2.0
andr7e/rk3188_tablet_jb
kernel/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
8372
4380
/* kcomedilib/kcomedilib.c a comedlib interface for kernel modules COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define __NO_VERSION__ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/io.h> #include "../comedi.h" #include "../comedilib.h" #include "../comedidev.h" MODULE_AUTHOR("David Schleef <ds@schleef.org>"); MODULE_DESCRIPTION("Comedi kernel library"); MODULE_LICENSE("GPL"); struct comedi_device *comedi_open(const char *filename) { struct comedi_device_file_info *dev_file_info; struct comedi_device *dev; unsigned int minor; if (strncmp(filename, "/dev/comedi", 11) != 0) return NULL; minor = simple_strtoul(filename + 11, NULL, 0); if (minor >= COMEDI_NUM_BOARD_MINORS) return NULL; dev_file_info = comedi_get_device_file_info(minor); if (dev_file_info == NULL) return NULL; dev = dev_file_info->device; if (dev == NULL || !dev->attached) return NULL; if (!try_module_get(dev->driver->module)) return NULL; return dev; } EXPORT_SYMBOL(comedi_open); int comedi_close(struct comedi_device *d) { struct comedi_device *dev = (struct comedi_device *)d; module_put(dev->driver->module); return 0; } EXPORT_SYMBOL(comedi_close); static int comedi_do_insn(struct comedi_device *dev, struct comedi_insn *insn) { struct comedi_subdevice *s; int ret = 0; /* a subdevice instruction */ if (insn->subdev >= dev->n_subdevices) { ret = -EINVAL; goto error; } s = dev->subdevices + insn->subdev; if (s->type == COMEDI_SUBD_UNUSED) { printk(KERN_ERR "%d not useable subdevice\n", insn->subdev); ret = -EIO; goto error; } /* XXX check lock */ ret = comedi_check_chanlist(s, 1, &insn->chanspec); if (ret < 0) { printk(KERN_ERR "bad chanspec\n"); ret = -EINVAL; goto error; } if (s->busy) { ret = -EBUSY; goto error; } s->busy = dev; switch (insn->insn) { case INSN_BITS: ret = s->insn_bits(dev, s, insn, insn->data); break; case INSN_CONFIG: /* XXX should check instruction length */ ret = s->insn_config(dev, s, insn, insn->data); break; default: ret = -EINVAL; break; } s->busy = NULL; error: return ret; } int comedi_dio_config(struct comedi_device *dev, unsigned int subdev, unsigned int chan, unsigned int io) { struct comedi_insn insn; memset(&insn, 0, sizeof(insn)); insn.insn = INSN_CONFIG; insn.n = 1; insn.data = &io; insn.subdev = subdev; insn.chanspec = CR_PACK(chan, 0, 0); return comedi_do_insn(dev, &insn); } EXPORT_SYMBOL(comedi_dio_config); int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev, unsigned int mask, unsigned int *bits) { struct comedi_insn insn; unsigned int data[2]; int ret; memset(&insn, 0, sizeof(insn)); insn.insn = INSN_BITS; insn.n = 2; insn.data = data; insn.subdev = subdev; data[0] = mask; data[1] = *bits; ret = comedi_do_insn(dev, &insn); *bits = data[1]; return ret; } EXPORT_SYMBOL(comedi_dio_bitfield); int comedi_find_subdevice_by_type(struct comedi_device *dev, int type, unsigned int subd) { if (subd > dev->n_subdevices) return -ENODEV; for (; subd < dev->n_subdevices; subd++) { if (dev->subdevices[subd].type == type) return subd; } return -1; } EXPORT_SYMBOL(comedi_find_subdevice_by_type); int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice) { struct comedi_subdevice *s = dev->subdevices + subdevice; return s->n_chan; } EXPORT_SYMBOL(comedi_get_n_channels);
gpl-2.0
KyleCo76/FIK
drivers/infiniband/hw/mthca/mthca_memfree.c
9908
18110
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/page.h> #include "mthca_memfree.h" #include "mthca_dev.h" #include "mthca_cmd.h" /* * We allocate in as big chunks as we can, up to a maximum of 256 KB * per chunk. */ enum { MTHCA_ICM_ALLOC_SIZE = 1 << 18, MTHCA_TABLE_CHUNK_SIZE = 1 << 18 }; struct mthca_user_db_table { struct mutex mutex; struct { u64 uvirt; struct scatterlist mem; int refcount; } page[0]; }; static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) { int i; if (chunk->nsg > 0) pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) __free_pages(sg_page(&chunk->mem[i]), get_order(chunk->mem[i].length)); } static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) { int i; for (i = 0; i < chunk->npages; ++i) { dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, lowmem_page_address(sg_page(&chunk->mem[i])), sg_dma_address(&chunk->mem[i])); } } void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) { struct mthca_icm_chunk *chunk, *tmp; if (!icm) return; list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { if (coherent) mthca_free_icm_coherent(dev, chunk); else mthca_free_icm_pages(dev, chunk); kfree(chunk); } kfree(icm); } static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) { struct page *page; /* * Use __GFP_ZERO because buggy firmware assumes ICM pages are * cleared, and subtle failures are seen if they aren't. */ page = alloc_pages(gfp_mask | __GFP_ZERO, order); if (!page) return -ENOMEM; sg_set_page(mem, page, PAGE_SIZE << order, 0); return 0; } static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, int order, gfp_t gfp_mask) { void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem), gfp_mask); if (!buf) return -ENOMEM; sg_set_buf(mem, buf, PAGE_SIZE << order); BUG_ON(mem->offset); sg_dma_len(mem) = PAGE_SIZE << order; return 0; } struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, gfp_t gfp_mask, int coherent) { struct mthca_icm *icm; struct mthca_icm_chunk *chunk = NULL; int cur_order; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!icm) return icm; icm->refcount = 0; INIT_LIST_HEAD(&icm->chunk_list); cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); while (npages > 0) { if (!chunk) { chunk = kmalloc(sizeof *chunk, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!chunk) goto fail; sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN); chunk->npages = 0; chunk->nsg = 0; list_add_tail(&chunk->list, &icm->chunk_list); } while (1 << cur_order > npages) --cur_order; if (coherent) ret = mthca_alloc_icm_coherent(&dev->pdev->dev, &chunk->mem[chunk->npages], cur_order, gfp_mask); else ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], cur_order, gfp_mask); if (!ret) { ++chunk->npages; if (coherent) ++chunk->nsg; else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); if (chunk->nsg <= 0) goto fail; } if (chunk->npages == MTHCA_ICM_CHUNK_LEN) chunk = NULL; npages -= 1 << cur_order; } else { --cur_order; if (cur_order < 0) goto fail; } } if (!coherent && chunk) { chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); if (chunk->nsg <= 0) goto fail; } return icm; fail: mthca_free_icm(dev, icm, coherent); return NULL; } int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) { int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; int ret = 0; mutex_lock(&table->mutex); if (table->icm[i]) { ++table->icm[i]->refcount; goto out; } table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN, table->coherent); if (!table->icm[i]) { ret = -ENOMEM; goto out; } if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) { mthca_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; ret = -ENOMEM; goto out; } ++table->icm[i]->refcount; out: mutex_unlock(&table->mutex); return ret; } void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) { int i; if (!mthca_is_memfree(dev)) return; i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; mutex_lock(&table->mutex); if (--table->icm[i]->refcount == 0) { mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); mthca_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; } mutex_unlock(&table->mutex); } void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle) { int idx, offset, dma_offset, i; struct mthca_icm_chunk *chunk; struct mthca_icm *icm; struct page *page = NULL; if (!table->lowmem) return NULL; mutex_lock(&table->mutex); idx = (obj & (table->num_obj - 1)) * table->obj_size; icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE; if (!icm) goto out; list_for_each_entry(chunk, &icm->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) { if (dma_handle && dma_offset >= 0) { if (sg_dma_len(&chunk->mem[i]) > dma_offset) *dma_handle = sg_dma_address(&chunk->mem[i]) + dma_offset; dma_offset -= sg_dma_len(&chunk->mem[i]); } /* DMA mapping can merge pages but not split them, * so if we found the page, dma_handle has already * been assigned to. */ if (chunk->mem[i].length > offset) { page = sg_page(&chunk->mem[i]); goto out; } offset -= chunk->mem[i].length; } } out: mutex_unlock(&table->mutex); return page ? lowmem_page_address(page) + offset : NULL; } int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end) { int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; int i, err; for (i = start; i <= end; i += inc) { err = mthca_table_get(dev, table, i); if (err) goto fail; } return 0; fail: while (i > start) { i -= inc; mthca_table_put(dev, table, i); } return err; } void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end) { int i; if (!mthca_is_memfree(dev)) return; for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) mthca_table_put(dev, table, i); } struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, u64 virt, int obj_size, int nobj, int reserved, int use_lowmem, int use_coherent) { struct mthca_icm_table *table; int obj_per_chunk; int num_icm; unsigned chunk_size; int i; obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); if (!table) return NULL; table->virt = virt; table->num_icm = num_icm; table->num_obj = nobj; table->obj_size = obj_size; table->lowmem = use_lowmem; table->coherent = use_coherent; mutex_init(&table->mutex); for (i = 0; i < num_icm; ++i) table->icm[i] = NULL; for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { chunk_size = MTHCA_TABLE_CHUNK_SIZE; if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN, use_coherent); if (!table->icm[i]) goto err; if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE)) { mthca_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; goto err; } /* * Add a reference to this ICM chunk so that it never * gets freed (since it contains reserved firmware objects). */ ++table->icm[i]->refcount; } return table; err: for (i = 0; i < num_icm; ++i) if (table->icm[i]) { mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); mthca_free_icm(dev, table->icm[i], table->coherent); } kfree(table); return NULL; } void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) { int i; for (i = 0; i < table->num_icm; ++i) if (table->icm[i]) { mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); mthca_free_icm(dev, table->icm[i], table->coherent); } kfree(table); } static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) { return dev->uar_table.uarc_base + uar->index * dev->uar_table.uarc_size + page * MTHCA_ICM_PAGE_SIZE; } int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index, u64 uaddr) { struct page *pages[1]; int ret = 0; int i; if (!mthca_is_memfree(dev)) return 0; if (index < 0 || index > dev->uar_table.uarc_size / 8) return -EINVAL; mutex_lock(&db_tab->mutex); i = index / MTHCA_DB_REC_PER_PAGE; if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) || (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) || (uaddr & 4095)) { ret = -EINVAL; goto out; } if (db_tab->page[i].refcount) { ++db_tab->page[i].refcount; goto out; } ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); if (ret < 0) goto out; sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, uaddr & ~PAGE_MASK); ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); if (ret < 0) { put_page(pages[0]); goto out; } ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), mthca_uarc_virt(dev, uar, i)); if (ret) { pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); put_page(sg_page(&db_tab->page[i].mem)); goto out; } db_tab->page[i].uvirt = uaddr; db_tab->page[i].refcount = 1; out: mutex_unlock(&db_tab->mutex); return ret; } void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index) { if (!mthca_is_memfree(dev)) return; /* * To make our bookkeeping simpler, we don't unmap DB * pages until we clean up the whole db table. */ mutex_lock(&db_tab->mutex); --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount; mutex_unlock(&db_tab->mutex); } struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) { struct mthca_user_db_table *db_tab; int npages; int i; if (!mthca_is_memfree(dev)) return NULL; npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); if (!db_tab) return ERR_PTR(-ENOMEM); mutex_init(&db_tab->mutex); for (i = 0; i < npages; ++i) { db_tab->page[i].refcount = 0; db_tab->page[i].uvirt = 0; sg_init_table(&db_tab->page[i].mem, 1); } return db_tab; } void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab) { int i; if (!mthca_is_memfree(dev)) return; for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { if (db_tab->page[i].uvirt) { mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1); pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); put_page(sg_page(&db_tab->page[i].mem)); } } kfree(db_tab); } int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db) { int group; int start, end, dir; int i, j; struct mthca_db_page *page; int ret = 0; mutex_lock(&dev->db_tab->mutex); switch (type) { case MTHCA_DB_TYPE_CQ_ARM: case MTHCA_DB_TYPE_SQ: group = 0; start = 0; end = dev->db_tab->max_group1; dir = 1; break; case MTHCA_DB_TYPE_CQ_SET_CI: case MTHCA_DB_TYPE_RQ: case MTHCA_DB_TYPE_SRQ: group = 1; start = dev->db_tab->npages - 1; end = dev->db_tab->min_group2; dir = -1; break; default: ret = -EINVAL; goto out; } for (i = start; i != end; i += dir) if (dev->db_tab->page[i].db_rec && !bitmap_full(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) { page = dev->db_tab->page + i; goto found; } for (i = start; i != end; i += dir) if (!dev->db_tab->page[i].db_rec) { page = dev->db_tab->page + i; goto alloc; } if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { ret = -ENOMEM; goto out; } if (group == 0) ++dev->db_tab->max_group1; else --dev->db_tab->min_group2; page = dev->db_tab->page + end; alloc: page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, &page->mapping, GFP_KERNEL); if (!page->db_rec) { ret = -ENOMEM; goto out; } memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE); ret = mthca_MAP_ICM_page(dev, page->mapping, mthca_uarc_virt(dev, &dev->driver_uar, i)); if (ret) { dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, page->db_rec, page->mapping); goto out; } bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); found: j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); set_bit(j, page->used); if (group == 1) j = MTHCA_DB_REC_PER_PAGE - 1 - j; ret = i * MTHCA_DB_REC_PER_PAGE + j; page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); *db = (__be32 *) &page->db_rec[j]; out: mutex_unlock(&dev->db_tab->mutex); return ret; } void mthca_free_db(struct mthca_dev *dev, int type, int db_index) { int i, j; struct mthca_db_page *page; i = db_index / MTHCA_DB_REC_PER_PAGE; j = db_index % MTHCA_DB_REC_PER_PAGE; page = dev->db_tab->page + i; mutex_lock(&dev->db_tab->mutex); page->db_rec[j] = 0; if (i >= dev->db_tab->min_group2) j = MTHCA_DB_REC_PER_PAGE - 1 - j; clear_bit(j, page->used); if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && i >= dev->db_tab->max_group1 - 1) { mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1); dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, page->db_rec, page->mapping); page->db_rec = NULL; if (i == dev->db_tab->max_group1) { --dev->db_tab->max_group1; /* XXX may be able to unmap more pages now */ } if (i == dev->db_tab->min_group2) ++dev->db_tab->min_group2; } mutex_unlock(&dev->db_tab->mutex); } int mthca_init_db_tab(struct mthca_dev *dev) { int i; if (!mthca_is_memfree(dev)) return 0; dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); if (!dev->db_tab) return -ENOMEM; mutex_init(&dev->db_tab->mutex); dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; dev->db_tab->max_group1 = 0; dev->db_tab->min_group2 = dev->db_tab->npages - 1; dev->db_tab->page = kmalloc(dev->db_tab->npages * sizeof *dev->db_tab->page, GFP_KERNEL); if (!dev->db_tab->page) { kfree(dev->db_tab); return -ENOMEM; } for (i = 0; i < dev->db_tab->npages; ++i) dev->db_tab->page[i].db_rec = NULL; return 0; } void mthca_cleanup_db_tab(struct mthca_dev *dev) { int i; if (!mthca_is_memfree(dev)) return; /* * Because we don't always free our UARC pages when they * become empty to make mthca_free_db() simpler we need to * make a sweep through the doorbell pages and free any * leftover pages now. */ for (i = 0; i < dev->db_tab->npages; ++i) { if (!dev->db_tab->page[i].db_rec) continue; if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) mthca_warn(dev, "Kernel UARC page %d not empty\n", i); mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1); dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, dev->db_tab->page[i].db_rec, dev->db_tab->page[i].mapping); } kfree(dev->db_tab->page); kfree(dev->db_tab); }
gpl-2.0
Trinityhaxxor/Xperia_S_T-Core_Kernel
arch/score/kernel/time.c
11956
2829
/* * arch/score/kernel/time.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <asm/scoreregs.h> static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evdev = dev_id; /* clear timer interrupt flag */ outl(1, P_TIMER0_CPP_REG); evdev->event_handler(evdev); return IRQ_HANDLED; } static struct irqaction timer_irq = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER, .name = "timer", }; static int score_timer_set_next_event(unsigned long delta, struct clock_event_device *evdev) { outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); outl(delta, P_TIMER0_PRELOAD); outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); return 0; } static void score_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evdev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); outl(SYSTEM_CLOCK/HZ, P_TIMER0_PRELOAD); outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_UNUSED: break; default: BUG(); } } static struct clock_event_device score_clockevent = { .name = "score_clockevent", .features = CLOCK_EVT_FEAT_PERIODIC, .shift = 16, .set_next_event = score_timer_set_next_event, .set_mode = score_timer_set_mode, }; void __init time_init(void) { timer_irq.dev_id = &score_clockevent; setup_irq(IRQ_TIMER , &timer_irq); /* setup COMPARE clockevent */ score_clockevent.mult = div_sc(SYSTEM_CLOCK, NSEC_PER_SEC, score_clockevent.shift); score_clockevent.max_delta_ns = clockevent_delta2ns((u32)~0, &score_clockevent); score_clockevent.min_delta_ns = clockevent_delta2ns(50, &score_clockevent) + 1; score_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&score_clockevent); }
gpl-2.0
somya-anand/y2038
drivers/infiniband/hw/ehca/ehca_pd.c
13492
3392
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * PD functions * * Authors: Christoph Raisch <raisch@de.ibm.com> * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> #include "ehca_tools.h" #include "ehca_iverbs.h" static struct kmem_cache *pd_cache; struct ib_pd *ehca_alloc_pd(struct ib_device *device, struct ib_ucontext *context, struct ib_udata *udata) { struct ehca_pd *pd; int i; pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL); if (!pd) { ehca_err(device, "device=%p context=%p out of memory", device, context); return ERR_PTR(-ENOMEM); } for (i = 0; i < 2; i++) { INIT_LIST_HEAD(&pd->free[i]); INIT_LIST_HEAD(&pd->full[i]); } mutex_init(&pd->lock); /* * Kernel PD: when device = -1, 0 * User PD: when context != -1 */ if (!context) { /* * Kernel PDs after init reuses always * the one created in ehca_shca_reopen() */ struct ehca_shca *shca = container_of(device, struct ehca_shca, ib_device); pd->fw_pd.value = shca->pd->fw_pd.value; } else pd->fw_pd.value = (u64)pd; return &pd->ib_pd; } int ehca_dealloc_pd(struct ib_pd *pd) { struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); int i, leftovers = 0; struct ipz_small_queue_page *page, *tmp; for (i = 0; i < 2; i++) { list_splice(&my_pd->full[i], &my_pd->free[i]); list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) { leftovers = 1; free_page(page->page); kmem_cache_free(small_qp_cache, page); } } if (leftovers) ehca_warn(pd->device, "Some small queue pages were not freed"); kmem_cache_free(pd_cache, my_pd); return 0; } int ehca_init_pd_cache(void) { pd_cache = kmem_cache_create("ehca_cache_pd", sizeof(struct ehca_pd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!pd_cache) return -ENOMEM; return 0; } void ehca_cleanup_pd_cache(void) { if (pd_cache) kmem_cache_destroy(pd_cache); }
gpl-2.0
cyj1988jyc/jz2440-android-kernel-2.6.27
fs/jfs/acl.c
181
5703
/* * Copyright (C) International Business Machines Corp., 2002-2004 * Copyright (C) Andreas Gruenbacher, 2001 * Copyright (C) Linus Torvalds, 1991, 1992 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/sched.h> #include <linux/fs.h> #include <linux/quotaops.h> #include <linux/posix_acl_xattr.h> #include "jfs_incore.h" #include "jfs_txnmgr.h" #include "jfs_xattr.h" #include "jfs_acl.h" static struct posix_acl *jfs_get_acl(struct inode *inode, int type) { struct posix_acl *acl; char *ea_name; struct jfs_inode_info *ji = JFS_IP(inode); struct posix_acl **p_acl; int size; char *value = NULL; switch(type) { case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; p_acl = &ji->i_acl; break; case ACL_TYPE_DEFAULT: ea_name = POSIX_ACL_XATTR_DEFAULT; p_acl = &ji->i_default_acl; break; default: return ERR_PTR(-EINVAL); } if (*p_acl != JFS_ACL_NOT_CACHED) return posix_acl_dup(*p_acl); size = __jfs_getxattr(inode, ea_name, NULL, 0); if (size > 0) { value = kmalloc(size, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); size = __jfs_getxattr(inode, ea_name, value, size); } if (size < 0) { if (size == -ENODATA) { *p_acl = NULL; acl = NULL; } else acl = ERR_PTR(size); } else { acl = posix_acl_from_xattr(value, size); if (!IS_ERR(acl)) *p_acl = posix_acl_dup(acl); } kfree(value); return acl; } static int jfs_set_acl(tid_t tid, struct inode *inode, int type, struct posix_acl *acl) { char *ea_name; struct jfs_inode_info *ji = JFS_IP(inode); struct posix_acl **p_acl; int rc; int size = 0; char *value = NULL; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch(type) { case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; p_acl = &ji->i_acl; break; case ACL_TYPE_DEFAULT: ea_name = POSIX_ACL_XATTR_DEFAULT; p_acl = &ji->i_default_acl; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { size = posix_acl_xattr_size(acl->a_count); value = kmalloc(size, GFP_KERNEL); if (!value) return -ENOMEM; rc = posix_acl_to_xattr(acl, value, size); if (rc < 0) goto out; } rc = __jfs_setxattr(tid, inode, ea_name, value, size, 0); out: kfree(value); if (!rc) { if (*p_acl && (*p_acl != JFS_ACL_NOT_CACHED)) posix_acl_release(*p_acl); *p_acl = posix_acl_dup(acl); } return rc; } static int jfs_check_acl(struct inode *inode, int mask) { struct jfs_inode_info *ji = JFS_IP(inode); if (ji->i_acl == JFS_ACL_NOT_CACHED) { struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); posix_acl_release(acl); } if (ji->i_acl) return posix_acl_permission(inode, ji->i_acl, mask); return -EAGAIN; } int jfs_permission(struct inode *inode, int mask) { return generic_permission(inode, mask, jfs_check_acl); } int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir) { struct posix_acl *acl = NULL; struct posix_acl *clone; mode_t mode; int rc = 0; if (S_ISLNK(inode->i_mode)) return 0; acl = jfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { if (S_ISDIR(inode->i_mode)) { rc = jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, acl); if (rc) goto cleanup; } clone = posix_acl_clone(acl, GFP_KERNEL); if (!clone) { rc = -ENOMEM; goto cleanup; } mode = inode->i_mode; rc = posix_acl_create_masq(clone, &mode); if (rc >= 0) { inode->i_mode = mode; if (rc > 0) rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); } posix_acl_release(clone); cleanup: posix_acl_release(acl); } else inode->i_mode &= ~current->fs->umask; JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) | inode->i_mode; return rc; } static int jfs_acl_chmod(struct inode *inode) { struct posix_acl *acl, *clone; int rc; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); clone = posix_acl_clone(acl, GFP_KERNEL); posix_acl_release(acl); if (!clone) return -ENOMEM; rc = posix_acl_chmod_masq(clone, inode->i_mode); if (!rc) { tid_t tid = txBegin(inode->i_sb, 0); mutex_lock(&JFS_IP(inode)->commit_mutex); rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, clone); if (!rc) rc = txCommit(tid, 1, &inode, 0); txEnd(tid); mutex_unlock(&JFS_IP(inode)->commit_mutex); } posix_acl_release(clone); return rc; } int jfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; int rc; rc = inode_change_ok(inode, iattr); if (rc) return rc; if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { if (DQUOT_TRANSFER(inode, iattr)) return -EDQUOT; } rc = inode_setattr(inode, iattr); if (!rc && (iattr->ia_valid & ATTR_MODE)) rc = jfs_acl_chmod(inode); return rc; }
gpl-2.0
pengshp/linux
fs/xfs/xfs_rtalloc.c
437
36292
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_bmap_btree.h" #include "xfs_alloc.h" #include "xfs_error.h" #include "xfs_trans.h" #include "xfs_trans_space.h" #include "xfs_trace.h" #include "xfs_buf.h" #include "xfs_icache.h" #include "xfs_rtalloc.h" /* * Read and return the summary information for a given extent size, * bitmap block combination. * Keeps track of a current summary block, so we don't keep reading * it from the buffer cache. */ static int xfs_rtget_summary( xfs_mount_t *mp, /* file system mount structure */ xfs_trans_t *tp, /* transaction pointer */ int log, /* log2 of extent size */ xfs_rtblock_t bbno, /* bitmap block number */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb, /* in/out: summary block number */ xfs_suminfo_t *sum) /* out: summary info for this block */ { return xfs_rtmodify_summary_int(mp, tp, log, bbno, 0, rbpp, rsb, sum); } /* * Return whether there are any free extents in the size range given * by low and high, for the bitmap block bbno. */ STATIC int /* error */ xfs_rtany_summary( xfs_mount_t *mp, /* file system mount structure */ xfs_trans_t *tp, /* transaction pointer */ int low, /* low log2 extent size */ int high, /* high log2 extent size */ xfs_rtblock_t bbno, /* bitmap block number */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb, /* in/out: summary block number */ int *stat) /* out: any good extents here? */ { int error; /* error value */ int log; /* loop counter, log2 of ext. size */ xfs_suminfo_t sum; /* summary data */ /* * Loop over logs of extent sizes. Order is irrelevant. */ for (log = low; log <= high; log++) { /* * Get one summary datum. */ error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum); if (error) { return error; } /* * If there are any, return success. */ if (sum) { *stat = 1; return 0; } } /* * Found nothing, return failure. */ *stat = 0; return 0; } /* * Copy and transform the summary file, given the old and new * parameters in the mount structures. */ STATIC int /* error */ xfs_rtcopy_summary( xfs_mount_t *omp, /* old file system mount point */ xfs_mount_t *nmp, /* new file system mount point */ xfs_trans_t *tp) /* transaction pointer */ { xfs_rtblock_t bbno; /* bitmap block number */ xfs_buf_t *bp; /* summary buffer */ int error; /* error return value */ int log; /* summary level number (log length) */ xfs_suminfo_t sum; /* summary data */ xfs_fsblock_t sumbno; /* summary block number */ bp = NULL; for (log = omp->m_rsumlevels - 1; log >= 0; log--) { for (bbno = omp->m_sb.sb_rbmblocks - 1; (xfs_srtblock_t)bbno >= 0; bbno--) { error = xfs_rtget_summary(omp, tp, log, bbno, &bp, &sumbno, &sum); if (error) return error; if (sum == 0) continue; error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum, &bp, &sumbno); if (error) return error; error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum, &bp, &sumbno); if (error) return error; ASSERT(sum > 0); } } return 0; } /* * Mark an extent specified by start and len allocated. * Updates all the summary information as well as the bitmap. */ STATIC int /* error */ xfs_rtallocate_range( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_rtblock_t start, /* start block to allocate */ xfs_extlen_t len, /* length to allocate */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb) /* in/out: summary block number */ { xfs_rtblock_t end; /* end of the allocated extent */ int error; /* error value */ xfs_rtblock_t postblock = 0; /* first block allocated > end */ xfs_rtblock_t preblock = 0; /* first block allocated < start */ end = start + len - 1; /* * Assume we're allocating out of the middle of a free extent. * We need to find the beginning and end of the extent so we can * properly update the summary. */ error = xfs_rtfind_back(mp, tp, start, 0, &preblock); if (error) { return error; } /* * Find the next allocated block (end of free extent). */ error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1, &postblock); if (error) { return error; } /* * Decrement the summary information corresponding to the entire * (old) free extent. */ error = xfs_rtmodify_summary(mp, tp, XFS_RTBLOCKLOG(postblock + 1 - preblock), XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb); if (error) { return error; } /* * If there are blocks not being allocated at the front of the * old extent, add summary data for them to be free. */ if (preblock < start) { error = xfs_rtmodify_summary(mp, tp, XFS_RTBLOCKLOG(start - preblock), XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb); if (error) { return error; } } /* * If there are blocks not being allocated at the end of the * old extent, add summary data for them to be free. */ if (postblock > end) { error = xfs_rtmodify_summary(mp, tp, XFS_RTBLOCKLOG(postblock - end), XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb); if (error) { return error; } } /* * Modify the bitmap to mark this extent allocated. */ error = xfs_rtmodify_range(mp, tp, start, len, 0); return error; } /* * Attempt to allocate an extent minlen<=len<=maxlen starting from * bitmap block bbno. If we don't get maxlen then use prod to trim * the length, if given. Returns error; returns starting block in *rtblock. * The lengths are all in rtextents. */ STATIC int /* error */ xfs_rtallocate_extent_block( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_rtblock_t bbno, /* bitmap block number */ xfs_extlen_t minlen, /* minimum length to allocate */ xfs_extlen_t maxlen, /* maximum length to allocate */ xfs_extlen_t *len, /* out: actual length allocated */ xfs_rtblock_t *nextp, /* out: next block to try */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb, /* in/out: summary block number */ xfs_extlen_t prod, /* extent product factor */ xfs_rtblock_t *rtblock) /* out: start block allocated */ { xfs_rtblock_t besti; /* best rtblock found so far */ xfs_rtblock_t bestlen; /* best length found so far */ xfs_rtblock_t end; /* last rtblock in chunk */ int error; /* error value */ xfs_rtblock_t i; /* current rtblock trying */ xfs_rtblock_t next; /* next rtblock to try */ int stat; /* status from internal calls */ /* * Loop over all the extents starting in this bitmap block, * looking for one that's long enough. */ for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0, end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1; i <= end; i++) { /* * See if there's a free extent of maxlen starting at i. * If it's not so then next will contain the first non-free. */ error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat); if (error) { return error; } if (stat) { /* * i for maxlen is all free, allocate and return that. */ error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp, rsb); if (error) { return error; } *len = maxlen; *rtblock = i; return 0; } /* * In the case where we have a variable-sized allocation * request, figure out how big this free piece is, * and if it's big enough for the minimum, and the best * so far, remember it. */ if (minlen < maxlen) { xfs_rtblock_t thislen; /* this extent size */ thislen = next - i; if (thislen >= minlen && thislen > bestlen) { besti = i; bestlen = thislen; } } /* * If not done yet, find the start of the next free space. */ if (next < end) { error = xfs_rtfind_forw(mp, tp, next, end, &i); if (error) { return error; } } else break; } /* * Searched the whole thing & didn't find a maxlen free extent. */ if (minlen < maxlen && besti != -1) { xfs_extlen_t p; /* amount to trim length by */ /* * If size should be a multiple of prod, make that so. */ if (prod > 1 && (p = do_mod(bestlen, prod))) bestlen -= p; /* * Allocate besti for bestlen & return that. */ error = xfs_rtallocate_range(mp, tp, besti, bestlen, rbpp, rsb); if (error) { return error; } *len = bestlen; *rtblock = besti; return 0; } /* * Allocation failed. Set *nextp to the next block to try. */ *nextp = next; *rtblock = NULLRTBLOCK; return 0; } /* * Allocate an extent of length minlen<=len<=maxlen, starting at block * bno. If we don't get maxlen then use prod to trim the length, if given. * Returns error; returns starting block in *rtblock. * The lengths are all in rtextents. */ STATIC int /* error */ xfs_rtallocate_extent_exact( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_rtblock_t bno, /* starting block number to allocate */ xfs_extlen_t minlen, /* minimum length to allocate */ xfs_extlen_t maxlen, /* maximum length to allocate */ xfs_extlen_t *len, /* out: actual length allocated */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb, /* in/out: summary block number */ xfs_extlen_t prod, /* extent product factor */ xfs_rtblock_t *rtblock) /* out: start block allocated */ { int error; /* error value */ xfs_extlen_t i; /* extent length trimmed due to prod */ int isfree; /* extent is free */ xfs_rtblock_t next; /* next block to try (dummy) */ ASSERT(minlen % prod == 0 && maxlen % prod == 0); /* * Check if the range in question (for maxlen) is free. */ error = xfs_rtcheck_range(mp, tp, bno, maxlen, 1, &next, &isfree); if (error) { return error; } if (isfree) { /* * If it is, allocate it and return success. */ error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb); if (error) { return error; } *len = maxlen; *rtblock = bno; return 0; } /* * If not, allocate what there is, if it's at least minlen. */ maxlen = next - bno; if (maxlen < minlen) { /* * Failed, return failure status. */ *rtblock = NULLRTBLOCK; return 0; } /* * Trim off tail of extent, if prod is specified. */ if (prod > 1 && (i = maxlen % prod)) { maxlen -= i; if (maxlen < minlen) { /* * Now we can't do it, return failure status. */ *rtblock = NULLRTBLOCK; return 0; } } /* * Allocate what we can and return it. */ error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb); if (error) { return error; } *len = maxlen; *rtblock = bno; return 0; } /* * Allocate an extent of length minlen<=len<=maxlen, starting as near * to bno as possible. If we don't get maxlen then use prod to trim * the length, if given. The lengths are all in rtextents. */ STATIC int /* error */ xfs_rtallocate_extent_near( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_rtblock_t bno, /* starting block number to allocate */ xfs_extlen_t minlen, /* minimum length to allocate */ xfs_extlen_t maxlen, /* maximum length to allocate */ xfs_extlen_t *len, /* out: actual length allocated */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb, /* in/out: summary block number */ xfs_extlen_t prod, /* extent product factor */ xfs_rtblock_t *rtblock) /* out: start block allocated */ { int any; /* any useful extents from summary */ xfs_rtblock_t bbno; /* bitmap block number */ int error; /* error value */ int i; /* bitmap block offset (loop control) */ int j; /* secondary loop control */ int log2len; /* log2 of minlen */ xfs_rtblock_t n; /* next block to try */ xfs_rtblock_t r; /* result block */ ASSERT(minlen % prod == 0 && maxlen % prod == 0); /* * If the block number given is off the end, silently set it to * the last block. */ if (bno >= mp->m_sb.sb_rextents) bno = mp->m_sb.sb_rextents - 1; /* * Try the exact allocation first. */ error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len, rbpp, rsb, prod, &r); if (error) { return error; } /* * If the exact allocation worked, return that. */ if (r != NULLRTBLOCK) { *rtblock = r; return 0; } bbno = XFS_BITTOBLOCK(mp, bno); i = 0; ASSERT(minlen != 0); log2len = xfs_highbit32(minlen); /* * Loop over all bitmap blocks (bbno + i is current block). */ for (;;) { /* * Get summary information of extents of all useful levels * starting in this bitmap block. */ error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1, bbno + i, rbpp, rsb, &any); if (error) { return error; } /* * If there are any useful extents starting here, try * allocating one. */ if (any) { /* * On the positive side of the starting location. */ if (i >= 0) { /* * Try to allocate an extent starting in * this block. */ error = xfs_rtallocate_extent_block(mp, tp, bbno + i, minlen, maxlen, len, &n, rbpp, rsb, prod, &r); if (error) { return error; } /* * If it worked, return it. */ if (r != NULLRTBLOCK) { *rtblock = r; return 0; } } /* * On the negative side of the starting location. */ else { /* i < 0 */ /* * Loop backwards through the bitmap blocks from * the starting point-1 up to where we are now. * There should be an extent which ends in this * bitmap block and is long enough. */ for (j = -1; j > i; j--) { /* * Grab the summary information for * this bitmap block. */ error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1, bbno + j, rbpp, rsb, &any); if (error) { return error; } /* * If there's no extent given in the * summary that means the extent we * found must carry over from an * earlier block. If there is an * extent given, we've already tried * that allocation, don't do it again. */ if (any) continue; error = xfs_rtallocate_extent_block(mp, tp, bbno + j, minlen, maxlen, len, &n, rbpp, rsb, prod, &r); if (error) { return error; } /* * If it works, return the extent. */ if (r != NULLRTBLOCK) { *rtblock = r; return 0; } } /* * There weren't intervening bitmap blocks * with a long enough extent, or the * allocation didn't work for some reason * (i.e. it's a little * too short). * Try to allocate from the summary block * that we found. */ error = xfs_rtallocate_extent_block(mp, tp, bbno + i, minlen, maxlen, len, &n, rbpp, rsb, prod, &r); if (error) { return error; } /* * If it works, return the extent. */ if (r != NULLRTBLOCK) { *rtblock = r; return 0; } } } /* * Loop control. If we were on the positive side, and there's * still more blocks on the negative side, go there. */ if (i > 0 && (int)bbno - i >= 0) i = -i; /* * If positive, and no more negative, but there are more * positive, go there. */ else if (i > 0 && (int)bbno + i < mp->m_sb.sb_rbmblocks - 1) i++; /* * If negative or 0 (just started), and there are positive * blocks to go, go there. The 0 case moves to block 1. */ else if (i <= 0 && (int)bbno - i < mp->m_sb.sb_rbmblocks - 1) i = 1 - i; /* * If negative or 0 and there are more negative blocks, * go there. */ else if (i <= 0 && (int)bbno + i > 0) i--; /* * Must be done. Return failure. */ else break; } *rtblock = NULLRTBLOCK; return 0; } /* * Allocate an extent of length minlen<=len<=maxlen, with no position * specified. If we don't get maxlen then use prod to trim * the length, if given. The lengths are all in rtextents. */ STATIC int /* error */ xfs_rtallocate_extent_size( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_extlen_t minlen, /* minimum length to allocate */ xfs_extlen_t maxlen, /* maximum length to allocate */ xfs_extlen_t *len, /* out: actual length allocated */ xfs_buf_t **rbpp, /* in/out: summary block buffer */ xfs_fsblock_t *rsb, /* in/out: summary block number */ xfs_extlen_t prod, /* extent product factor */ xfs_rtblock_t *rtblock) /* out: start block allocated */ { int error; /* error value */ int i; /* bitmap block number */ int l; /* level number (loop control) */ xfs_rtblock_t n; /* next block to be tried */ xfs_rtblock_t r; /* result block number */ xfs_suminfo_t sum; /* summary information for extents */ ASSERT(minlen % prod == 0 && maxlen % prod == 0); ASSERT(maxlen != 0); /* * Loop over all the levels starting with maxlen. * At each level, look at all the bitmap blocks, to see if there * are extents starting there that are long enough (>= maxlen). * Note, only on the initial level can the allocation fail if * the summary says there's an extent. */ for (l = xfs_highbit32(maxlen); l < mp->m_rsumlevels; l++) { /* * Loop over all the bitmap blocks. */ for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) { /* * Get the summary for this level/block. */ error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, &sum); if (error) { return error; } /* * Nothing there, on to the next block. */ if (!sum) continue; /* * Try allocating the extent. */ error = xfs_rtallocate_extent_block(mp, tp, i, maxlen, maxlen, len, &n, rbpp, rsb, prod, &r); if (error) { return error; } /* * If it worked, return that. */ if (r != NULLRTBLOCK) { *rtblock = r; return 0; } /* * If the "next block to try" returned from the * allocator is beyond the next bitmap block, * skip to that bitmap block. */ if (XFS_BITTOBLOCK(mp, n) > i + 1) i = XFS_BITTOBLOCK(mp, n) - 1; } } /* * Didn't find any maxlen blocks. Try smaller ones, unless * we're asking for a fixed size extent. */ if (minlen > --maxlen) { *rtblock = NULLRTBLOCK; return 0; } ASSERT(minlen != 0); ASSERT(maxlen != 0); /* * Loop over sizes, from maxlen down to minlen. * This time, when we do the allocations, allow smaller ones * to succeed. */ for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) { /* * Loop over all the bitmap blocks, try an allocation * starting in that block. */ for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) { /* * Get the summary information for this level/block. */ error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, &sum); if (error) { return error; } /* * If nothing there, go on to next. */ if (!sum) continue; /* * Try the allocation. Make sure the specified * minlen/maxlen are in the possible range for * this summary level. */ error = xfs_rtallocate_extent_block(mp, tp, i, XFS_RTMAX(minlen, 1 << l), XFS_RTMIN(maxlen, (1 << (l + 1)) - 1), len, &n, rbpp, rsb, prod, &r); if (error) { return error; } /* * If it worked, return that extent. */ if (r != NULLRTBLOCK) { *rtblock = r; return 0; } /* * If the "next block to try" returned from the * allocator is beyond the next bitmap block, * skip to that bitmap block. */ if (XFS_BITTOBLOCK(mp, n) > i + 1) i = XFS_BITTOBLOCK(mp, n) - 1; } } /* * Got nothing, return failure. */ *rtblock = NULLRTBLOCK; return 0; } /* * Allocate space to the bitmap or summary file, and zero it, for growfs. */ STATIC int /* error */ xfs_growfs_rt_alloc( xfs_mount_t *mp, /* file system mount point */ xfs_extlen_t oblocks, /* old count of blocks */ xfs_extlen_t nblocks, /* new count of blocks */ xfs_inode_t *ip) /* inode (bitmap/summary) */ { xfs_fileoff_t bno; /* block number in file */ xfs_buf_t *bp; /* temporary buffer for zeroing */ int committed; /* transaction committed flag */ xfs_daddr_t d; /* disk block address */ int error; /* error return value */ xfs_fsblock_t firstblock; /* first block allocated in xaction */ xfs_bmap_free_t flist; /* list of freed blocks */ xfs_fsblock_t fsbno; /* filesystem block for bno */ xfs_bmbt_irec_t map; /* block map output */ int nmap; /* number of block maps */ int resblks; /* space reservation */ /* * Allocate space to the file, as necessary. */ while (oblocks < nblocks) { int cancelflags = 0; xfs_trans_t *tp; tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC); resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks); /* * Reserve space & log for one extent added to the file. */ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtalloc, resblks, 0); if (error) goto error_cancel; cancelflags = XFS_TRANS_RELEASE_LOG_RES; /* * Lock the inode. */ xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_bmap_init(&flist, &firstblock); /* * Allocate blocks to the bitmap file. */ nmap = 1; cancelflags |= XFS_TRANS_ABORT; error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks, XFS_BMAPI_METADATA, &firstblock, resblks, &map, &nmap, &flist); if (!error && nmap < 1) error = -ENOSPC; if (error) goto error_cancel; /* * Free any blocks freed up in the transaction, then commit. */ error = xfs_bmap_finish(&tp, &flist, &committed); if (error) goto error_cancel; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error; /* * Now we need to clear the allocated blocks. * Do this one block per transaction, to keep it simple. */ cancelflags = 0; for (bno = map.br_startoff, fsbno = map.br_startblock; bno < map.br_startoff + map.br_blockcount; bno++, fsbno++) { tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO); /* * Reserve log for one block zeroing. */ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero, 0, 0); if (error) goto error_cancel; /* * Lock the bitmap inode. */ xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); /* * Get a buffer for the block. */ d = XFS_FSB_TO_DADDR(mp, fsbno); bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0); if (bp == NULL) { error = -EIO; error_cancel: xfs_trans_cancel(tp, cancelflags); goto error; } memset(bp->b_addr, 0, mp->m_sb.sb_blocksize); xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); /* * Commit the transaction. */ error = xfs_trans_commit(tp, 0); if (error) goto error; } /* * Go on to the next extent, if any. */ oblocks = map.br_startoff + map.br_blockcount; } return 0; error: return error; } /* * Visible (exported) functions. */ /* * Grow the realtime area of the filesystem. */ int xfs_growfs_rt( xfs_mount_t *mp, /* mount point for filesystem */ xfs_growfs_rt_t *in) /* growfs rt input struct */ { xfs_rtblock_t bmbno; /* bitmap block number */ xfs_buf_t *bp; /* temporary buffer */ int error; /* error return value */ xfs_mount_t *nmp; /* new (fake) mount structure */ xfs_rfsblock_t nrblocks; /* new number of realtime blocks */ xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ xfs_rtblock_t nrextents; /* new number of realtime extents */ uint8_t nrextslog; /* new log2 of sb_rextents */ xfs_extlen_t nrsumblocks; /* new number of summary blocks */ uint nrsumlevels; /* new rt summary levels */ uint nrsumsize; /* new size of rt summary, bytes */ xfs_sb_t *nsbp; /* new superblock */ xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */ xfs_extlen_t rsumblocks; /* current number of rt summary blks */ xfs_sb_t *sbp; /* old superblock */ xfs_fsblock_t sumbno; /* summary block number */ sbp = &mp->m_sb; /* * Initial error checking. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || (nrblocks = in->newblocks) <= sbp->sb_rblocks || (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) return -EINVAL; if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) return error; /* * Read in the last block of the device, make sure it exists. */ error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_FSB_TO_BB(mp, nrblocks - 1), XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); if (error) return error; xfs_buf_relse(bp); /* * Calculate new parameters. These are the final values to be reached. */ nrextents = nrblocks; do_div(nrextents, in->extsize); nrbmblocks = howmany_64(nrextents, NBBY * sbp->sb_blocksize); nrextslog = xfs_highbit32(nrextents); nrsumlevels = nrextslog + 1; nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks; nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); /* * New summary size can't be more than half the size of * the log. This prevents us from getting a log overflow, * since we'll log basically the whole summary file at once. */ if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) return -EINVAL; /* * Get the old block counts for bitmap and summary inodes. * These can't change since other growfs callers are locked out. */ rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_d.di_size); rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_d.di_size); /* * Allocate space to the bitmap and summary files, as necessary. */ error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, mp->m_rbmip); if (error) return error; error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip); if (error) return error; /* * Allocate a new (fake) mount/sb. */ nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP); /* * Loop over the bitmap blocks. * We will do everything one bitmap block at a time. * Skip the current block if it is exactly full. * This also deals with the case where there were no rtextents before. */ for (bmbno = sbp->sb_rbmblocks - ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); bmbno < nrbmblocks; bmbno++) { xfs_trans_t *tp; int cancelflags = 0; *nmp = *mp; nsbp = &nmp->m_sb; /* * Calculate new sb and mount fields for this round. */ nsbp->sb_rextsize = in->extsize; nsbp->sb_rbmblocks = bmbno + 1; nsbp->sb_rblocks = XFS_RTMIN(nrblocks, nsbp->sb_rbmblocks * NBBY * nsbp->sb_blocksize * nsbp->sb_rextsize); nsbp->sb_rextents = nsbp->sb_rblocks; do_div(nsbp->sb_rextents, nsbp->sb_rextsize); ASSERT(nsbp->sb_rextents != 0); nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nsbp->sb_rbmblocks; nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); /* * Start a transaction, get the log reservation. */ tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtfree, 0, 0); if (error) goto error_cancel; /* * Lock out other callers by grabbing the bitmap inode lock. */ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); /* * Update the bitmap inode's size. */ mp->m_rbmip->i_d.di_size = nsbp->sb_rbmblocks * nsbp->sb_blocksize; xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); cancelflags |= XFS_TRANS_ABORT; /* * Get the summary inode into the transaction. */ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); /* * Update the summary inode's size. */ mp->m_rsumip->i_d.di_size = nmp->m_rsumsize; xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE); /* * Copy summary data from old to new sizes. * Do this when the real size (not block-aligned) changes. */ if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks || mp->m_rsumlevels != nmp->m_rsumlevels) { error = xfs_rtcopy_summary(mp, nmp, tp); if (error) goto error_cancel; } /* * Update superblock fields. */ if (nsbp->sb_rextsize != sbp->sb_rextsize) xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE, nsbp->sb_rextsize - sbp->sb_rextsize); if (nsbp->sb_rbmblocks != sbp->sb_rbmblocks) xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS, nsbp->sb_rbmblocks - sbp->sb_rbmblocks); if (nsbp->sb_rblocks != sbp->sb_rblocks) xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS, nsbp->sb_rblocks - sbp->sb_rblocks); if (nsbp->sb_rextents != sbp->sb_rextents) xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS, nsbp->sb_rextents - sbp->sb_rextents); if (nsbp->sb_rextslog != sbp->sb_rextslog) xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG, nsbp->sb_rextslog - sbp->sb_rextslog); /* * Free new extent. */ bp = NULL; error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents, nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); if (error) { error_cancel: xfs_trans_cancel(tp, cancelflags); break; } /* * Mark more blocks free in the superblock. */ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, nsbp->sb_rextents - sbp->sb_rextents); /* * Update mp values into the real mp structure. */ mp->m_rsumlevels = nrsumlevels; mp->m_rsumsize = nrsumsize; error = xfs_trans_commit(tp, 0); if (error) break; } /* * Free the fake mp structure. */ kmem_free(nmp); return error; } /* * Allocate an extent in the realtime subvolume, with the usual allocation * parameters. The length units are all in realtime extents, as is the * result block number. */ int /* error */ xfs_rtallocate_extent( xfs_trans_t *tp, /* transaction pointer */ xfs_rtblock_t bno, /* starting block number to allocate */ xfs_extlen_t minlen, /* minimum length to allocate */ xfs_extlen_t maxlen, /* maximum length to allocate */ xfs_extlen_t *len, /* out: actual length allocated */ xfs_alloctype_t type, /* allocation type XFS_ALLOCTYPE... */ int wasdel, /* was a delayed allocation extent */ xfs_extlen_t prod, /* extent product factor */ xfs_rtblock_t *rtblock) /* out: start block allocated */ { xfs_mount_t *mp = tp->t_mountp; int error; /* error value */ xfs_rtblock_t r; /* result allocated block */ xfs_fsblock_t sb; /* summary file block number */ xfs_buf_t *sumbp; /* summary file block buffer */ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL)); ASSERT(minlen > 0 && minlen <= maxlen); /* * If prod is set then figure out what to do to minlen and maxlen. */ if (prod > 1) { xfs_extlen_t i; if ((i = maxlen % prod)) maxlen -= i; if ((i = minlen % prod)) minlen += prod - i; if (maxlen < minlen) { *rtblock = NULLRTBLOCK; return 0; } } sumbp = NULL; /* * Allocate by size, or near another block, or exactly at some block. */ switch (type) { case XFS_ALLOCTYPE_ANY_AG: error = xfs_rtallocate_extent_size(mp, tp, minlen, maxlen, len, &sumbp, &sb, prod, &r); break; case XFS_ALLOCTYPE_NEAR_BNO: error = xfs_rtallocate_extent_near(mp, tp, bno, minlen, maxlen, len, &sumbp, &sb, prod, &r); break; case XFS_ALLOCTYPE_THIS_BNO: error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len, &sumbp, &sb, prod, &r); break; default: error = -EIO; ASSERT(0); } if (error) return error; /* * If it worked, update the superblock. */ if (r != NULLRTBLOCK) { long slen = (long)*len; ASSERT(*len >= minlen && *len <= maxlen); if (wasdel) xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FREXTENTS, -slen); else xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, -slen); } *rtblock = r; return 0; } /* * Initialize realtime fields in the mount structure. */ int /* error */ xfs_rtmount_init( struct xfs_mount *mp) /* file system mount structure */ { struct xfs_buf *bp; /* buffer for last block of subvolume */ struct xfs_sb *sbp; /* filesystem superblock copy in mount */ xfs_daddr_t d; /* address of last block of subvolume */ int error; sbp = &mp->m_sb; if (sbp->sb_rblocks == 0) return 0; if (mp->m_rtdev_targp == NULL) { xfs_warn(mp, "Filesystem has a realtime volume, use rtdev=device option"); return -ENODEV; } mp->m_rsumlevels = sbp->sb_rextslog + 1; mp->m_rsumsize = (uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels * sbp->sb_rbmblocks; mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize); mp->m_rbmip = mp->m_rsumip = NULL; /* * Check that the realtime section is an ok size. */ d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) { xfs_warn(mp, "realtime mount -- %llu != %llu", (unsigned long long) XFS_BB_TO_FSB(mp, d), (unsigned long long) mp->m_sb.sb_rblocks); return -EFBIG; } error = xfs_buf_read_uncached(mp->m_rtdev_targp, d - XFS_FSB_TO_BB(mp, 1), XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); if (error) { xfs_warn(mp, "realtime device size check failed"); return error; } xfs_buf_relse(bp); return 0; } /* * Get the bitmap and summary inodes into the mount structure * at mount time. */ int /* error */ xfs_rtmount_inodes( xfs_mount_t *mp) /* file system mount structure */ { int error; /* error return value */ xfs_sb_t *sbp; sbp = &mp->m_sb; if (sbp->sb_rbmino == NULLFSINO) return 0; error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip); if (error) return error; ASSERT(mp->m_rbmip != NULL); ASSERT(sbp->sb_rsumino != NULLFSINO); error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip); if (error) { IRELE(mp->m_rbmip); return error; } ASSERT(mp->m_rsumip != NULL); return 0; } void xfs_rtunmount_inodes( struct xfs_mount *mp) { if (mp->m_rbmip) IRELE(mp->m_rbmip); if (mp->m_rsumip) IRELE(mp->m_rsumip); } /* * Pick an extent for allocation at the start of a new realtime file. * Use the sequence number stored in the atime field of the bitmap inode. * Translate this to a fraction of the rtextents, and return the product * of rtextents and the fraction. * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ... */ int /* error */ xfs_rtpick_extent( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_extlen_t len, /* allocation length (rtextents) */ xfs_rtblock_t *pick) /* result rt extent */ { xfs_rtblock_t b; /* result block */ int log2; /* log of sequence number */ __uint64_t resid; /* residual after log removed */ __uint64_t seq; /* sequence number of file creation */ __uint64_t *seqp; /* pointer to seqno in inode */ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL)); seqp = (__uint64_t *)&mp->m_rbmip->i_d.di_atime; if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) { mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; *seqp = 0; } seq = *seqp; if ((log2 = xfs_highbit64(seq)) == -1) b = 0; else { resid = seq - (1ULL << log2); b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >> (log2 + 1); if (b >= mp->m_sb.sb_rextents) b = do_mod(b, mp->m_sb.sb_rextents); if (b + len > mp->m_sb.sb_rextents) b = mp->m_sb.sb_rextents - len; } *seqp = seq + 1; xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); *pick = b; return 0; }
gpl-2.0
tiagovignatti/drm-intel
drivers/media/usb/gspca/ov534.c
693
37409
/* * ov534-ov7xxx gspca driver * * Copyright (C) 2008 Antonio Ospite <ospite@studenti.unina.it> * Copyright (C) 2008 Jim Paris <jim@jtan.com> * Copyright (C) 2009 Jean-Francois Moine http://moinejf.free.fr * * Based on a prototype written by Mark Ferrell <majortrips@gmail.com> * USB protocol reverse engineered by Jim Paris <jim@jtan.com> * https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/ * * PS3 Eye camera enhanced by Richard Kaswy http://kaswy.free.fr * PS3 Eye camera - brightness, contrast, awb, agc, aec controls * added by Max Thrun <bear24rw@gmail.com> * PS3 Eye camera - FPS range extended by Joseph Howse * <josephhowse@nummist.com> http://nummist.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "ov534" #include "gspca.h" #include <linux/fixp-arith.h> #include <media/v4l2-ctrls.h> #define OV534_REG_ADDRESS 0xf1 /* sensor address */ #define OV534_REG_SUBADDR 0xf2 #define OV534_REG_WRITE 0xf3 #define OV534_REG_READ 0xf4 #define OV534_REG_OPERATION 0xf5 #define OV534_REG_STATUS 0xf6 #define OV534_OP_WRITE_3 0x37 #define OV534_OP_WRITE_2 0x33 #define OV534_OP_READ_2 0xf9 #define CTRL_TIMEOUT 500 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("GSPCA/OV534 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *hue; struct v4l2_ctrl *saturation; struct v4l2_ctrl *brightness; struct v4l2_ctrl *contrast; struct { /* gain control cluster */ struct v4l2_ctrl *autogain; struct v4l2_ctrl *gain; }; struct v4l2_ctrl *autowhitebalance; struct { /* exposure control cluster */ struct v4l2_ctrl *autoexposure; struct v4l2_ctrl *exposure; }; struct v4l2_ctrl *sharpness; struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; struct v4l2_ctrl *plfreq; __u32 last_pts; u16 last_fid; u8 frame_rate; u8 sensor; }; enum sensors { SENSOR_OV767x, SENSOR_OV772x, NSENSORS }; static int sd_start(struct gspca_dev *gspca_dev); static void sd_stopN(struct gspca_dev *gspca_dev); static const struct v4l2_pix_format ov772x_mode[] = { {320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, .bytesperline = 320 * 2, .sizeimage = 320 * 240 * 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, .bytesperline = 640 * 2, .sizeimage = 640 * 480 * 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; static const struct v4l2_pix_format ov767x_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, }; static const u8 qvga_rates[] = {187, 150, 137, 125, 100, 75, 60, 50, 37, 30}; static const u8 vga_rates[] = {60, 50, 40, 30, 15}; static const struct framerates ov772x_framerates[] = { { /* 320x240 */ .rates = qvga_rates, .nrates = ARRAY_SIZE(qvga_rates), }, { /* 640x480 */ .rates = vga_rates, .nrates = ARRAY_SIZE(vga_rates), }, }; struct reg_array { const u8 (*val)[2]; int len; }; static const u8 bridge_init_767x[][2] = { /* comments from the ms-win file apollo7670.set */ /* str1 */ {0xf1, 0x42}, {0x88, 0xf8}, {0x89, 0xff}, {0x76, 0x03}, {0x92, 0x03}, {0x95, 0x10}, {0xe2, 0x00}, {0xe7, 0x3e}, {0x8d, 0x1c}, {0x8e, 0x00}, {0x8f, 0x00}, {0x1f, 0x00}, {0xc3, 0xf9}, {0x89, 0xff}, {0x88, 0xf8}, {0x76, 0x03}, {0x92, 0x01}, {0x93, 0x18}, {0x1c, 0x00}, {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1d, 0x02}, {0x1d, 0x58}, {0x1d, 0x00}, {0x1c, 0x0a}, {0x1d, 0x0a}, {0x1d, 0x0e}, {0xc0, 0x50}, /* HSize 640 */ {0xc1, 0x3c}, /* VSize 480 */ {0x34, 0x05}, /* enable Audio Suspend mode */ {0xc2, 0x0c}, /* Input YUV */ {0xc3, 0xf9}, /* enable PRE */ {0x34, 0x05}, /* enable Audio Suspend mode */ {0xe7, 0x2e}, /* this solves failure of "SuspendResumeTest" */ {0x31, 0xf9}, /* enable 1.8V Suspend */ {0x35, 0x02}, /* turn on JPEG */ {0xd9, 0x10}, {0x25, 0x42}, /* GPIO[8]:Input */ {0x94, 0x11}, /* If the default setting is loaded when * system boots up, this flag is closed here */ }; static const u8 sensor_init_767x[][2] = { {0x12, 0x80}, {0x11, 0x03}, {0x3a, 0x04}, {0x12, 0x00}, {0x17, 0x13}, {0x18, 0x01}, {0x32, 0xb6}, {0x19, 0x02}, {0x1a, 0x7a}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00}, {0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0}, {0xa2, 0x02}, {0x7a, 0x2a}, /* set Gamma=1.6 below */ {0x7b, 0x12}, {0x7c, 0x1d}, {0x7d, 0x2d}, {0x7e, 0x45}, {0x7f, 0x50}, {0x80, 0x59}, {0x81, 0x62}, {0x82, 0x6b}, {0x83, 0x73}, {0x84, 0x7b}, {0x85, 0x8a}, {0x86, 0x98}, {0x87, 0xb2}, {0x88, 0xca}, {0x89, 0xe0}, {0x13, 0xe0}, {0x00, 0x00}, {0x10, 0x00}, {0x0d, 0x40}, {0x14, 0x38}, /* gain max 16x */ {0xa5, 0x05}, {0xab, 0x07}, {0x24, 0x95}, {0x25, 0x33}, {0x26, 0xe3}, {0x9f, 0x78}, {0xa0, 0x68}, {0xa1, 0x03}, {0xa6, 0xd8}, {0xa7, 0xd8}, {0xa8, 0xf0}, {0xa9, 0x90}, {0xaa, 0x94}, {0x13, 0xe5}, {0x0e, 0x61}, {0x0f, 0x4b}, {0x16, 0x02}, {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x33, 0x0b}, {0x35, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a}, {0x3c, 0x78}, {0x4d, 0x40}, {0x4e, 0x20}, {0x69, 0x00}, {0x6b, 0x4a}, {0x74, 0x10}, {0x8d, 0x4f}, {0x8e, 0x00}, {0x8f, 0x00}, {0x90, 0x00}, {0x91, 0x00}, {0x96, 0x00}, {0x9a, 0x80}, {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82}, {0xb8, 0x0a}, {0x43, 0x0a}, {0x44, 0xf0}, {0x45, 0x34}, {0x46, 0x58}, {0x47, 0x28}, {0x48, 0x3a}, {0x59, 0x88}, {0x5a, 0x88}, {0x5b, 0x44}, {0x5c, 0x67}, {0x5d, 0x49}, {0x5e, 0x0e}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11}, {0x6f, 0x9f}, {0x6a, 0x40}, {0x01, 0x40}, {0x02, 0x40}, {0x13, 0xe7}, {0x4f, 0x80}, {0x50, 0x80}, {0x51, 0x00}, {0x52, 0x22}, {0x53, 0x5e}, {0x54, 0x80}, {0x58, 0x9e}, {0x41, 0x08}, {0x3f, 0x00}, {0x75, 0x04}, {0x76, 0xe1}, {0x4c, 0x00}, {0x77, 0x01}, {0x3d, 0xc2}, {0x4b, 0x09}, {0xc9, 0x60}, {0x41, 0x38}, /* jfm: auto sharpness + auto de-noise */ {0x56, 0x40}, {0x34, 0x11}, {0x3b, 0xc2}, {0xa4, 0x8a}, /* Night mode trigger point */ {0x96, 0x00}, {0x97, 0x30}, {0x98, 0x20}, {0x99, 0x20}, {0x9a, 0x84}, {0x9b, 0x29}, {0x9c, 0x03}, {0x9d, 0x4c}, {0x9e, 0x3f}, {0x78, 0x04}, {0x79, 0x01}, {0xc8, 0xf0}, {0x79, 0x0f}, {0xc8, 0x00}, {0x79, 0x10}, {0xc8, 0x7e}, {0x79, 0x0a}, {0xc8, 0x80}, {0x79, 0x0b}, {0xc8, 0x01}, {0x79, 0x0c}, {0xc8, 0x0f}, {0x79, 0x0d}, {0xc8, 0x20}, {0x79, 0x09}, {0xc8, 0x80}, {0x79, 0x02}, {0xc8, 0xc0}, {0x79, 0x03}, {0xc8, 0x20}, {0x79, 0x26}, }; static const u8 bridge_start_vga_767x[][2] = { /* str59 JPG */ {0x94, 0xaa}, {0xf1, 0x42}, {0xe5, 0x04}, {0xc0, 0x50}, {0xc1, 0x3c}, {0xc2, 0x0c}, {0x35, 0x02}, /* turn on JPEG */ {0xd9, 0x10}, {0xda, 0x00}, /* for higher clock rate(30fps) */ {0x34, 0x05}, /* enable Audio Suspend mode */ {0xc3, 0xf9}, /* enable PRE */ {0x8c, 0x00}, /* CIF VSize LSB[2:0] */ {0x8d, 0x1c}, /* output YUV */ /* {0x34, 0x05}, * enable Audio Suspend mode (?) */ {0x50, 0x00}, /* H/V divider=0 */ {0x51, 0xa0}, /* input H=640/4 */ {0x52, 0x3c}, /* input V=480/4 */ {0x53, 0x00}, /* offset X=0 */ {0x54, 0x00}, /* offset Y=0 */ {0x55, 0x00}, /* H/V size[8]=0 */ {0x57, 0x00}, /* H-size[9]=0 */ {0x5c, 0x00}, /* output size[9:8]=0 */ {0x5a, 0xa0}, /* output H=640/4 */ {0x5b, 0x78}, /* output V=480/4 */ {0x1c, 0x0a}, {0x1d, 0x0a}, {0x94, 0x11}, }; static const u8 sensor_start_vga_767x[][2] = { {0x11, 0x01}, {0x1e, 0x04}, {0x19, 0x02}, {0x1a, 0x7a}, }; static const u8 bridge_start_qvga_767x[][2] = { /* str86 JPG */ {0x94, 0xaa}, {0xf1, 0x42}, {0xe5, 0x04}, {0xc0, 0x80}, {0xc1, 0x60}, {0xc2, 0x0c}, {0x35, 0x02}, /* turn on JPEG */ {0xd9, 0x10}, {0xc0, 0x50}, /* CIF HSize 640 */ {0xc1, 0x3c}, /* CIF VSize 480 */ {0x8c, 0x00}, /* CIF VSize LSB[2:0] */ {0x8d, 0x1c}, /* output YUV */ {0x34, 0x05}, /* enable Audio Suspend mode */ {0xc2, 0x4c}, /* output YUV and Enable DCW */ {0xc3, 0xf9}, /* enable PRE */ {0x1c, 0x00}, /* indirect addressing */ {0x1d, 0x48}, /* output YUV422 */ {0x50, 0x89}, /* H/V divider=/2; plus DCW AVG */ {0x51, 0xa0}, /* DCW input H=640/4 */ {0x52, 0x78}, /* DCW input V=480/4 */ {0x53, 0x00}, /* offset X=0 */ {0x54, 0x00}, /* offset Y=0 */ {0x55, 0x00}, /* H/V size[8]=0 */ {0x57, 0x00}, /* H-size[9]=0 */ {0x5c, 0x00}, /* DCW output size[9:8]=0 */ {0x5a, 0x50}, /* DCW output H=320/4 */ {0x5b, 0x3c}, /* DCW output V=240/4 */ {0x1c, 0x0a}, {0x1d, 0x0a}, {0x94, 0x11}, }; static const u8 sensor_start_qvga_767x[][2] = { {0x11, 0x01}, {0x1e, 0x04}, {0x19, 0x02}, {0x1a, 0x7a}, }; static const u8 bridge_init_772x[][2] = { { 0xc2, 0x0c }, { 0x88, 0xf8 }, { 0xc3, 0x69 }, { 0x89, 0xff }, { 0x76, 0x03 }, { 0x92, 0x01 }, { 0x93, 0x18 }, { 0x94, 0x10 }, { 0x95, 0x10 }, { 0xe2, 0x00 }, { 0xe7, 0x3e }, { 0x96, 0x00 }, { 0x97, 0x20 }, { 0x97, 0x20 }, { 0x97, 0x20 }, { 0x97, 0x0a }, { 0x97, 0x3f }, { 0x97, 0x4a }, { 0x97, 0x20 }, { 0x97, 0x15 }, { 0x97, 0x0b }, { 0x8e, 0x40 }, { 0x1f, 0x81 }, { 0x34, 0x05 }, { 0xe3, 0x04 }, { 0x88, 0x00 }, { 0x89, 0x00 }, { 0x76, 0x00 }, { 0xe7, 0x2e }, { 0x31, 0xf9 }, { 0x25, 0x42 }, { 0x21, 0xf0 }, { 0x1c, 0x00 }, { 0x1d, 0x40 }, { 0x1d, 0x02 }, /* payload size 0x0200 * 4 = 2048 bytes */ { 0x1d, 0x00 }, /* payload size */ { 0x1d, 0x02 }, /* frame size 0x025800 * 4 = 614400 */ { 0x1d, 0x58 }, /* frame size */ { 0x1d, 0x00 }, /* frame size */ { 0x1c, 0x0a }, { 0x1d, 0x08 }, /* turn on UVC header */ { 0x1d, 0x0e }, /* .. */ { 0x8d, 0x1c }, { 0x8e, 0x80 }, { 0xe5, 0x04 }, { 0xc0, 0x50 }, { 0xc1, 0x3c }, { 0xc2, 0x0c }, }; static const u8 sensor_init_772x[][2] = { { 0x12, 0x80 }, { 0x11, 0x01 }, /*fixme: better have a delay?*/ { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x11, 0x01 }, { 0x3d, 0x03 }, { 0x17, 0x26 }, { 0x18, 0xa0 }, { 0x19, 0x07 }, { 0x1a, 0xf0 }, { 0x32, 0x00 }, { 0x29, 0xa0 }, { 0x2c, 0xf0 }, { 0x65, 0x20 }, { 0x11, 0x01 }, { 0x42, 0x7f }, { 0x63, 0xaa }, /* AWB - was e0 */ { 0x64, 0xff }, { 0x66, 0x00 }, { 0x13, 0xf0 }, /* com8 */ { 0x0d, 0x41 }, { 0x0f, 0xc5 }, { 0x14, 0x11 }, { 0x22, 0x7f }, { 0x23, 0x03 }, { 0x24, 0x40 }, { 0x25, 0x30 }, { 0x26, 0xa1 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, { 0x6b, 0xaa }, { 0x13, 0xff }, /* AWB */ { 0x90, 0x05 }, { 0x91, 0x01 }, { 0x92, 0x03 }, { 0x93, 0x00 }, { 0x94, 0x60 }, { 0x95, 0x3c }, { 0x96, 0x24 }, { 0x97, 0x1e }, { 0x98, 0x62 }, { 0x99, 0x80 }, { 0x9a, 0x1e }, { 0x9b, 0x08 }, { 0x9c, 0x20 }, { 0x9e, 0x81 }, { 0xa6, 0x07 }, { 0x7e, 0x0c }, { 0x7f, 0x16 }, { 0x80, 0x2a }, { 0x81, 0x4e }, { 0x82, 0x61 }, { 0x83, 0x6f }, { 0x84, 0x7b }, { 0x85, 0x86 }, { 0x86, 0x8e }, { 0x87, 0x97 }, { 0x88, 0xa4 }, { 0x89, 0xaf }, { 0x8a, 0xc5 }, { 0x8b, 0xd7 }, { 0x8c, 0xe8 }, { 0x8d, 0x20 }, { 0x0c, 0x90 }, { 0x2b, 0x00 }, { 0x22, 0x7f }, { 0x23, 0x03 }, { 0x11, 0x01 }, { 0x0c, 0xd0 }, { 0x64, 0xff }, { 0x0d, 0x41 }, { 0x14, 0x41 }, { 0x0e, 0xcd }, { 0xac, 0xbf }, { 0x8e, 0x00 }, /* De-noise threshold */ { 0x0c, 0xd0 } }; static const u8 bridge_start_vga_772x[][2] = { {0x1c, 0x00}, {0x1d, 0x40}, {0x1d, 0x02}, {0x1d, 0x00}, {0x1d, 0x02}, {0x1d, 0x58}, {0x1d, 0x00}, {0xc0, 0x50}, {0xc1, 0x3c}, }; static const u8 sensor_start_vga_772x[][2] = { {0x12, 0x00}, {0x17, 0x26}, {0x18, 0xa0}, {0x19, 0x07}, {0x1a, 0xf0}, {0x29, 0xa0}, {0x2c, 0xf0}, {0x65, 0x20}, }; static const u8 bridge_start_qvga_772x[][2] = { {0x1c, 0x00}, {0x1d, 0x40}, {0x1d, 0x02}, {0x1d, 0x00}, {0x1d, 0x01}, {0x1d, 0x4b}, {0x1d, 0x00}, {0xc0, 0x28}, {0xc1, 0x1e}, }; static const u8 sensor_start_qvga_772x[][2] = { {0x12, 0x40}, {0x17, 0x3f}, {0x18, 0x50}, {0x19, 0x03}, {0x1a, 0x78}, {0x29, 0x50}, {0x2c, 0x78}, {0x65, 0x2f}, }; static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val) { struct usb_device *udev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "SET 01 0000 %04x %02x", reg, val); gspca_dev->usb_buf[0] = val; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); if (ret < 0) { pr_err("write failed %d\n", ret); gspca_dev->usb_err = ret; } } static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg) { struct usb_device *udev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]); if (ret < 0) { pr_err("read failed %d\n", ret); gspca_dev->usb_err = ret; } return gspca_dev->usb_buf[0]; } /* Two bits control LED: 0x21 bit 7 and 0x23 bit 7. * (direction and output)? */ static void ov534_set_led(struct gspca_dev *gspca_dev, int status) { u8 data; PDEBUG(D_CONF, "led status: %d", status); data = ov534_reg_read(gspca_dev, 0x21); data |= 0x80; ov534_reg_write(gspca_dev, 0x21, data); data = ov534_reg_read(gspca_dev, 0x23); if (status) data |= 0x80; else data &= ~0x80; ov534_reg_write(gspca_dev, 0x23, data); if (!status) { data = ov534_reg_read(gspca_dev, 0x21); data &= ~0x80; ov534_reg_write(gspca_dev, 0x21, data); } } static int sccb_check_status(struct gspca_dev *gspca_dev) { u8 data; int i; for (i = 0; i < 5; i++) { msleep(10); data = ov534_reg_read(gspca_dev, OV534_REG_STATUS); switch (data) { case 0x00: return 1; case 0x04: return 0; case 0x03: break; default: PERR("sccb status 0x%02x, attempt %d/5", data, i + 1); } } return 0; } static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val) { PDEBUG(D_USBO, "sccb write: %02x %02x", reg, val); ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg); ov534_reg_write(gspca_dev, OV534_REG_WRITE, val); ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3); if (!sccb_check_status(gspca_dev)) { pr_err("sccb_reg_write failed\n"); gspca_dev->usb_err = -EIO; } } static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg) { ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg); ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2); if (!sccb_check_status(gspca_dev)) pr_err("sccb_reg_read failed 1\n"); ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2); if (!sccb_check_status(gspca_dev)) pr_err("sccb_reg_read failed 2\n"); return ov534_reg_read(gspca_dev, OV534_REG_READ); } /* output a bridge sequence (reg - val) */ static void reg_w_array(struct gspca_dev *gspca_dev, const u8 (*data)[2], int len) { while (--len >= 0) { ov534_reg_write(gspca_dev, (*data)[0], (*data)[1]); data++; } } /* output a sensor sequence (reg - val) */ static void sccb_w_array(struct gspca_dev *gspca_dev, const u8 (*data)[2], int len) { while (--len >= 0) { if ((*data)[0] != 0xff) { sccb_reg_write(gspca_dev, (*data)[0], (*data)[1]); } else { sccb_reg_read(gspca_dev, (*data)[1]); sccb_reg_write(gspca_dev, 0xff, 0x00); } data++; } } /* ov772x specific controls */ static void set_frame_rate(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; struct rate_s { u8 fps; u8 r11; u8 r0d; u8 re5; }; const struct rate_s *r; static const struct rate_s rate_0[] = { /* 640x480 */ {60, 0x01, 0xc1, 0x04}, {50, 0x01, 0x41, 0x02}, {40, 0x02, 0xc1, 0x04}, {30, 0x04, 0x81, 0x02}, {15, 0x03, 0x41, 0x04}, }; static const struct rate_s rate_1[] = { /* 320x240 */ /* {205, 0x01, 0xc1, 0x02}, * 205 FPS: video is partly corrupt */ {187, 0x01, 0x81, 0x02}, /* 187 FPS or below: video is valid */ {150, 0x01, 0xc1, 0x04}, {137, 0x02, 0xc1, 0x02}, {125, 0x02, 0x81, 0x02}, {100, 0x02, 0xc1, 0x04}, {75, 0x03, 0xc1, 0x04}, {60, 0x04, 0xc1, 0x04}, {50, 0x02, 0x41, 0x04}, {37, 0x03, 0x41, 0x04}, {30, 0x04, 0x41, 0x04}, }; if (sd->sensor != SENSOR_OV772x) return; if (gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv == 0) { r = rate_0; i = ARRAY_SIZE(rate_0); } else { r = rate_1; i = ARRAY_SIZE(rate_1); } while (--i > 0) { if (sd->frame_rate >= r->fps) break; r++; } sccb_reg_write(gspca_dev, 0x11, r->r11); sccb_reg_write(gspca_dev, 0x0d, r->r0d); ov534_reg_write(gspca_dev, 0xe5, r->re5); PDEBUG(D_PROBE, "frame_rate: %d", r->fps); } static void sethue(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV767x) { /* TBD */ } else { s16 huesin; s16 huecos; /* According to the datasheet the registers expect HUESIN and * HUECOS to be the result of the trigonometric functions, * scaled by 0x80. * * The 0x7fff here represents the maximum absolute value * returned byt fixp_sin and fixp_cos, so the scaling will * consider the result like in the interval [-1.0, 1.0]. */ huesin = fixp_sin16(val) * 0x80 / 0x7fff; huecos = fixp_cos16(val) * 0x80 / 0x7fff; if (huesin < 0) { sccb_reg_write(gspca_dev, 0xab, sccb_reg_read(gspca_dev, 0xab) | 0x2); huesin = -huesin; } else { sccb_reg_write(gspca_dev, 0xab, sccb_reg_read(gspca_dev, 0xab) & ~0x2); } sccb_reg_write(gspca_dev, 0xa9, (u8)huecos); sccb_reg_write(gspca_dev, 0xaa, (u8)huesin); } } static void setsaturation(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV767x) { int i; static u8 color_tb[][6] = { {0x42, 0x42, 0x00, 0x11, 0x30, 0x41}, {0x52, 0x52, 0x00, 0x16, 0x3c, 0x52}, {0x66, 0x66, 0x00, 0x1b, 0x4b, 0x66}, {0x80, 0x80, 0x00, 0x22, 0x5e, 0x80}, {0x9a, 0x9a, 0x00, 0x29, 0x71, 0x9a}, {0xb8, 0xb8, 0x00, 0x31, 0x87, 0xb8}, {0xdd, 0xdd, 0x00, 0x3b, 0xa2, 0xdd}, }; for (i = 0; i < ARRAY_SIZE(color_tb[0]); i++) sccb_reg_write(gspca_dev, 0x4f + i, color_tb[val][i]); } else { sccb_reg_write(gspca_dev, 0xa7, val); /* U saturation */ sccb_reg_write(gspca_dev, 0xa8, val); /* V saturation */ } } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV767x) { if (val < 0) val = 0x80 - val; sccb_reg_write(gspca_dev, 0x55, val); /* bright */ } else { sccb_reg_write(gspca_dev, 0x9b, val); } } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV767x) sccb_reg_write(gspca_dev, 0x56, val); /* contras */ else sccb_reg_write(gspca_dev, 0x9c, val); } static void setgain(struct gspca_dev *gspca_dev, s32 val) { switch (val & 0x30) { case 0x00: val &= 0x0f; break; case 0x10: val &= 0x0f; val |= 0x30; break; case 0x20: val &= 0x0f; val |= 0x70; break; default: /* case 0x30: */ val &= 0x0f; val |= 0xf0; break; } sccb_reg_write(gspca_dev, 0x00, val); } static s32 getgain(struct gspca_dev *gspca_dev) { return sccb_reg_read(gspca_dev, 0x00); } static void setexposure(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV767x) { /* set only aec[9:2] */ sccb_reg_write(gspca_dev, 0x10, val); /* aech */ } else { /* 'val' is one byte and represents half of the exposure value * we are going to set into registers, a two bytes value: * * MSB: ((u16) val << 1) >> 8 == val >> 7 * LSB: ((u16) val << 1) & 0xff == val << 1 */ sccb_reg_write(gspca_dev, 0x08, val >> 7); sccb_reg_write(gspca_dev, 0x10, val << 1); } } static s32 getexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV767x) { /* get only aec[9:2] */ return sccb_reg_read(gspca_dev, 0x10); /* aech */ } else { u8 hi = sccb_reg_read(gspca_dev, 0x08); u8 lo = sccb_reg_read(gspca_dev, 0x10); return (hi << 8 | lo) >> 1; } } static void setagc(struct gspca_dev *gspca_dev, s32 val) { if (val) { sccb_reg_write(gspca_dev, 0x13, sccb_reg_read(gspca_dev, 0x13) | 0x04); sccb_reg_write(gspca_dev, 0x64, sccb_reg_read(gspca_dev, 0x64) | 0x03); } else { sccb_reg_write(gspca_dev, 0x13, sccb_reg_read(gspca_dev, 0x13) & ~0x04); sccb_reg_write(gspca_dev, 0x64, sccb_reg_read(gspca_dev, 0x64) & ~0x03); } } static void setawb(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (val) { sccb_reg_write(gspca_dev, 0x13, sccb_reg_read(gspca_dev, 0x13) | 0x02); if (sd->sensor == SENSOR_OV772x) sccb_reg_write(gspca_dev, 0x63, sccb_reg_read(gspca_dev, 0x63) | 0xc0); } else { sccb_reg_write(gspca_dev, 0x13, sccb_reg_read(gspca_dev, 0x13) & ~0x02); if (sd->sensor == SENSOR_OV772x) sccb_reg_write(gspca_dev, 0x63, sccb_reg_read(gspca_dev, 0x63) & ~0xc0); } } static void setaec(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 data; data = sd->sensor == SENSOR_OV767x ? 0x05 : /* agc + aec */ 0x01; /* agc */ switch (val) { case V4L2_EXPOSURE_AUTO: sccb_reg_write(gspca_dev, 0x13, sccb_reg_read(gspca_dev, 0x13) | data); break; case V4L2_EXPOSURE_MANUAL: sccb_reg_write(gspca_dev, 0x13, sccb_reg_read(gspca_dev, 0x13) & ~data); break; } } static void setsharpness(struct gspca_dev *gspca_dev, s32 val) { sccb_reg_write(gspca_dev, 0x91, val); /* Auto de-noise threshold */ sccb_reg_write(gspca_dev, 0x8e, val); /* De-noise threshold */ } static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip) { struct sd *sd = (struct sd *) gspca_dev; u8 val; if (sd->sensor == SENSOR_OV767x) { val = sccb_reg_read(gspca_dev, 0x1e); /* mvfp */ val &= ~0x30; if (hflip) val |= 0x20; if (vflip) val |= 0x10; sccb_reg_write(gspca_dev, 0x1e, val); } else { val = sccb_reg_read(gspca_dev, 0x0c); val &= ~0xc0; if (hflip == 0) val |= 0x40; if (vflip == 0) val |= 0x80; sccb_reg_write(gspca_dev, 0x0c, val); } } static void setlightfreq(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; val = val ? 0x9e : 0x00; if (sd->sensor == SENSOR_OV767x) { sccb_reg_write(gspca_dev, 0x2a, 0x00); if (val) val = 0x9d; /* insert dummy to 25fps for 50Hz */ } sccb_reg_write(gspca_dev, 0x2b, val); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = ov772x_mode; cam->nmodes = ARRAY_SIZE(ov772x_mode); sd->frame_rate = 30; return 0; } static int ov534_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct sd *sd = container_of(ctrl->handler, struct sd, ctrl_handler); struct gspca_dev *gspca_dev = &sd->gspca_dev; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: gspca_dev->usb_err = 0; if (ctrl->val && sd->gain && gspca_dev->streaming) sd->gain->val = getgain(gspca_dev); return gspca_dev->usb_err; case V4L2_CID_EXPOSURE_AUTO: gspca_dev->usb_err = 0; if (ctrl->val == V4L2_EXPOSURE_AUTO && sd->exposure && gspca_dev->streaming) sd->exposure->val = getexposure(gspca_dev); return gspca_dev->usb_err; } return -EINVAL; } static int ov534_s_ctrl(struct v4l2_ctrl *ctrl) { struct sd *sd = container_of(ctrl->handler, struct sd, ctrl_handler); struct gspca_dev *gspca_dev = &sd->gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_HUE: sethue(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setsaturation(gspca_dev, ctrl->val); break; case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: /* case V4L2_CID_GAIN: */ setagc(gspca_dev, ctrl->val); if (!gspca_dev->usb_err && !ctrl->val && sd->gain) setgain(gspca_dev, sd->gain->val); break; case V4L2_CID_AUTO_WHITE_BALANCE: setawb(gspca_dev, ctrl->val); break; case V4L2_CID_EXPOSURE_AUTO: /* case V4L2_CID_EXPOSURE: */ setaec(gspca_dev, ctrl->val); if (!gspca_dev->usb_err && ctrl->val == V4L2_EXPOSURE_MANUAL && sd->exposure) setexposure(gspca_dev, sd->exposure->val); break; case V4L2_CID_SHARPNESS: setsharpness(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: sethvflip(gspca_dev, ctrl->val, sd->vflip->val); break; case V4L2_CID_VFLIP: sethvflip(gspca_dev, sd->hflip->val, ctrl->val); break; case V4L2_CID_POWER_LINE_FREQUENCY: setlightfreq(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops ov534_ctrl_ops = { .g_volatile_ctrl = ov534_g_volatile_ctrl, .s_ctrl = ov534_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &sd->ctrl_handler; /* parameters with different values between the supported sensors */ int saturation_min; int saturation_max; int saturation_def; int brightness_min; int brightness_max; int brightness_def; int contrast_max; int contrast_def; int exposure_min; int exposure_max; int exposure_def; int hflip_def; if (sd->sensor == SENSOR_OV767x) { saturation_min = 0, saturation_max = 6, saturation_def = 3, brightness_min = -127; brightness_max = 127; brightness_def = 0; contrast_max = 0x80; contrast_def = 0x40; exposure_min = 0x08; exposure_max = 0x60; exposure_def = 0x13; hflip_def = 1; } else { saturation_min = 0, saturation_max = 255, saturation_def = 64, brightness_min = 0; brightness_max = 255; brightness_def = 0; contrast_max = 255; contrast_def = 32; exposure_min = 0; exposure_max = 255; exposure_def = 120; hflip_def = 0; } gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 13); if (sd->sensor == SENSOR_OV772x) sd->hue = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_HUE, -90, 90, 1, 0); sd->saturation = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_SATURATION, saturation_min, saturation_max, 1, saturation_def); sd->brightness = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_BRIGHTNESS, brightness_min, brightness_max, 1, brightness_def); sd->contrast = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_CONTRAST, 0, contrast_max, 1, contrast_def); if (sd->sensor == SENSOR_OV772x) { sd->autogain = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); sd->gain = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_GAIN, 0, 63, 1, 20); } sd->autoexposure = v4l2_ctrl_new_std_menu(hdl, &ov534_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO); sd->exposure = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_EXPOSURE, exposure_min, exposure_max, 1, exposure_def); sd->autowhitebalance = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); if (sd->sensor == SENSOR_OV772x) sd->sharpness = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_SHARPNESS, 0, 63, 1, 0); sd->hflip = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, hflip_def); sd->vflip = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); sd->plfreq = v4l2_ctrl_new_std_menu(hdl, &ov534_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_50HZ, 0, V4L2_CID_POWER_LINE_FREQUENCY_DISABLED); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } if (sd->sensor == SENSOR_OV772x) v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, true); v4l2_ctrl_auto_cluster(2, &sd->autoexposure, V4L2_EXPOSURE_MANUAL, true); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 sensor_id; static const struct reg_array bridge_init[NSENSORS] = { [SENSOR_OV767x] = {bridge_init_767x, ARRAY_SIZE(bridge_init_767x)}, [SENSOR_OV772x] = {bridge_init_772x, ARRAY_SIZE(bridge_init_772x)}, }; static const struct reg_array sensor_init[NSENSORS] = { [SENSOR_OV767x] = {sensor_init_767x, ARRAY_SIZE(sensor_init_767x)}, [SENSOR_OV772x] = {sensor_init_772x, ARRAY_SIZE(sensor_init_772x)}, }; /* reset bridge */ ov534_reg_write(gspca_dev, 0xe7, 0x3a); ov534_reg_write(gspca_dev, 0xe0, 0x08); msleep(100); /* initialize the sensor address */ ov534_reg_write(gspca_dev, OV534_REG_ADDRESS, 0x42); /* reset sensor */ sccb_reg_write(gspca_dev, 0x12, 0x80); msleep(10); /* probe the sensor */ sccb_reg_read(gspca_dev, 0x0a); sensor_id = sccb_reg_read(gspca_dev, 0x0a) << 8; sccb_reg_read(gspca_dev, 0x0b); sensor_id |= sccb_reg_read(gspca_dev, 0x0b); PDEBUG(D_PROBE, "Sensor ID: %04x", sensor_id); if ((sensor_id & 0xfff0) == 0x7670) { sd->sensor = SENSOR_OV767x; gspca_dev->cam.cam_mode = ov767x_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(ov767x_mode); } else { sd->sensor = SENSOR_OV772x; gspca_dev->cam.bulk = 1; gspca_dev->cam.bulk_size = 16384; gspca_dev->cam.bulk_nurbs = 2; gspca_dev->cam.mode_framerates = ov772x_framerates; } /* initialize */ reg_w_array(gspca_dev, bridge_init[sd->sensor].val, bridge_init[sd->sensor].len); ov534_set_led(gspca_dev, 1); sccb_w_array(gspca_dev, sensor_init[sd->sensor].val, sensor_init[sd->sensor].len); sd_stopN(gspca_dev); /* set_frame_rate(gspca_dev); */ return gspca_dev->usb_err; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode; static const struct reg_array bridge_start[NSENSORS][2] = { [SENSOR_OV767x] = {{bridge_start_qvga_767x, ARRAY_SIZE(bridge_start_qvga_767x)}, {bridge_start_vga_767x, ARRAY_SIZE(bridge_start_vga_767x)}}, [SENSOR_OV772x] = {{bridge_start_qvga_772x, ARRAY_SIZE(bridge_start_qvga_772x)}, {bridge_start_vga_772x, ARRAY_SIZE(bridge_start_vga_772x)}}, }; static const struct reg_array sensor_start[NSENSORS][2] = { [SENSOR_OV767x] = {{sensor_start_qvga_767x, ARRAY_SIZE(sensor_start_qvga_767x)}, {sensor_start_vga_767x, ARRAY_SIZE(sensor_start_vga_767x)}}, [SENSOR_OV772x] = {{sensor_start_qvga_772x, ARRAY_SIZE(sensor_start_qvga_772x)}, {sensor_start_vga_772x, ARRAY_SIZE(sensor_start_vga_772x)}}, }; /* (from ms-win trace) */ if (sd->sensor == SENSOR_OV767x) sccb_reg_write(gspca_dev, 0x1e, 0x04); /* black sun enable ? */ mode = gspca_dev->curr_mode; /* 0: 320x240, 1: 640x480 */ reg_w_array(gspca_dev, bridge_start[sd->sensor][mode].val, bridge_start[sd->sensor][mode].len); sccb_w_array(gspca_dev, sensor_start[sd->sensor][mode].val, sensor_start[sd->sensor][mode].len); set_frame_rate(gspca_dev); if (sd->hue) sethue(gspca_dev, v4l2_ctrl_g_ctrl(sd->hue)); setsaturation(gspca_dev, v4l2_ctrl_g_ctrl(sd->saturation)); if (sd->autogain) setagc(gspca_dev, v4l2_ctrl_g_ctrl(sd->autogain)); setawb(gspca_dev, v4l2_ctrl_g_ctrl(sd->autowhitebalance)); setaec(gspca_dev, v4l2_ctrl_g_ctrl(sd->autoexposure)); if (sd->gain) setgain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain)); setexposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure)); setbrightness(gspca_dev, v4l2_ctrl_g_ctrl(sd->brightness)); setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->contrast)); if (sd->sharpness) setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); sethvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip), v4l2_ctrl_g_ctrl(sd->vflip)); setlightfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->plfreq)); ov534_set_led(gspca_dev, 1); ov534_reg_write(gspca_dev, 0xe0, 0x00); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { ov534_reg_write(gspca_dev, 0xe0, 0x09); ov534_set_led(gspca_dev, 0); } /* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */ #define UVC_STREAM_EOH (1 << 7) #define UVC_STREAM_ERR (1 << 6) #define UVC_STREAM_STI (1 << 5) #define UVC_STREAM_RES (1 << 4) #define UVC_STREAM_SCR (1 << 3) #define UVC_STREAM_PTS (1 << 2) #define UVC_STREAM_EOF (1 << 1) #define UVC_STREAM_FID (1 << 0) static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; __u32 this_pts; u16 this_fid; int remaining_len = len; int payload_len; payload_len = gspca_dev->cam.bulk ? 2048 : 2040; do { len = min(remaining_len, payload_len); /* Payloads are prefixed with a UVC-style header. We consider a frame to start when the FID toggles, or the PTS changes. A frame ends when EOF is set, and we've received the correct number of bytes. */ /* Verify UVC header. Header length is always 12 */ if (data[0] != 12 || len < 12) { PDEBUG(D_PACK, "bad header"); goto discard; } /* Check errors */ if (data[1] & UVC_STREAM_ERR) { PDEBUG(D_PACK, "payload error"); goto discard; } /* Extract PTS and FID */ if (!(data[1] & UVC_STREAM_PTS)) { PDEBUG(D_PACK, "PTS not present"); goto discard; } this_pts = (data[5] << 24) | (data[4] << 16) | (data[3] << 8) | data[2]; this_fid = (data[1] & UVC_STREAM_FID) ? 1 : 0; /* If PTS or FID has changed, start a new frame. */ if (this_pts != sd->last_pts || this_fid != sd->last_fid) { if (gspca_dev->last_packet_type == INTER_PACKET) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); sd->last_pts = this_pts; sd->last_fid = this_fid; gspca_frame_add(gspca_dev, FIRST_PACKET, data + 12, len - 12); /* If this packet is marked as EOF, end the frame */ } else if (data[1] & UVC_STREAM_EOF) { sd->last_pts = 0; if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV && gspca_dev->image_len + len - 12 != gspca_dev->pixfmt.width * gspca_dev->pixfmt.height * 2) { PDEBUG(D_PACK, "wrong sized frame"); goto discard; } gspca_frame_add(gspca_dev, LAST_PACKET, data + 12, len - 12); } else { /* Add the data from this payload */ gspca_frame_add(gspca_dev, INTER_PACKET, data + 12, len - 12); } /* Done this payload */ goto scan_next; discard: /* Discard data until a new frame starts. */ gspca_dev->last_packet_type = DISCARD_PACKET; scan_next: remaining_len -= len; data += len; } while (remaining_len > 0); } /* get stream parameters (framerate) */ static void sd_get_streamparm(struct gspca_dev *gspca_dev, struct v4l2_streamparm *parm) { struct v4l2_captureparm *cp = &parm->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; struct sd *sd = (struct sd *) gspca_dev; cp->capability |= V4L2_CAP_TIMEPERFRAME; tpf->numerator = 1; tpf->denominator = sd->frame_rate; } /* set stream parameters (framerate) */ static void sd_set_streamparm(struct gspca_dev *gspca_dev, struct v4l2_streamparm *parm) { struct v4l2_captureparm *cp = &parm->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; struct sd *sd = (struct sd *) gspca_dev; /* Set requested framerate */ sd->frame_rate = tpf->denominator / tpf->numerator; if (gspca_dev->streaming) set_frame_rate(gspca_dev); /* Return the actual framerate */ tpf->numerator = 1; tpf->denominator = sd->frame_rate; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .get_streamparm = sd_get_streamparm, .set_streamparm = sd_set_streamparm, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x1415, 0x2000)}, {USB_DEVICE(0x06f8, 0x3002)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
htc-mirror/jewel-ics-crc-3.0.8-3fd0422
arch/sh/oprofile/common.c
949
1474
/* * arch/sh/oprofile/init.c * * Copyright (C) 2003 - 2010 Paul Mundt * * Based on arch/mips/oprofile/common.c: * * Copyright (C) 2004, 2005 Ralf Baechle * Copyright (C) 2005 MIPS Technologies, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/oprofile.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/slab.h> #include <asm/processor.h> extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth); #ifdef CONFIG_HW_PERF_EVENTS /* * This will need to be reworked when multiple PMUs are supported. */ static char *sh_pmu_op_name; char *op_name_from_perf_id(void) { return sh_pmu_op_name; } int __init oprofile_arch_init(struct oprofile_operations *ops) { ops->backtrace = sh_backtrace; if (perf_num_counters() == 0) return -ENODEV; sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s", UTS_MACHINE, perf_pmu_name()); if (unlikely(!sh_pmu_op_name)) return -ENOMEM; return oprofile_perf_init(ops); } void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); kfree(sh_pmu_op_name); } #else int __init oprofile_arch_init(struct oprofile_operations *ops) { ops->backtrace = sh_backtrace; return -ENODEV; } void __exit oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */
gpl-2.0
ExPeacer/CAF_android-msm-2.6.32
sound/pci/ctxfi/ctdaio.c
1205
17793
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctdaio.c * * @Brief * This file contains the implementation of Digital Audio Input Output * resource management object. * * @Author Liu Chun * @Date May 23 2008 * */ #include "ctdaio.h" #include "cthardware.h" #include "ctimap.h" #include <linux/slab.h> #include <linux/kernel.h> #define DAIO_RESOURCE_NUM NUM_DAIOTYP #define DAIO_OUT_MAX SPDIFOO union daio_usage { struct { unsigned short lineo1:1; unsigned short lineo2:1; unsigned short lineo3:1; unsigned short lineo4:1; unsigned short spdifoo:1; unsigned short lineim:1; unsigned short spdifio:1; unsigned short spdifi1:1; } bf; unsigned short data; }; struct daio_rsc_idx { unsigned short left; unsigned short right; }; struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = { [LINEO1] = {.left = 0x00, .right = 0x01}, [LINEO2] = {.left = 0x18, .right = 0x19}, [LINEO3] = {.left = 0x08, .right = 0x09}, [LINEO4] = {.left = 0x10, .right = 0x11}, [LINEIM] = {.left = 0x1b5, .right = 0x1bd}, [SPDIFOO] = {.left = 0x20, .right = 0x21}, [SPDIFIO] = {.left = 0x15, .right = 0x1d}, [SPDIFI1] = {.left = 0x95, .right = 0x9d}, }; struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = { [LINEO1] = {.left = 0x40, .right = 0x41}, [LINEO2] = {.left = 0x60, .right = 0x61}, [LINEO3] = {.left = 0x50, .right = 0x51}, [LINEO4] = {.left = 0x70, .right = 0x71}, [LINEIM] = {.left = 0x45, .right = 0xc5}, [SPDIFOO] = {.left = 0x00, .right = 0x01}, [SPDIFIO] = {.left = 0x05, .right = 0x85}, }; static int daio_master(struct rsc *rsc) { /* Actually, this is not the resource index of DAIO. * For DAO, it is the input mapper index. And, for DAI, * it is the output time-slot index. */ return rsc->conj = rsc->idx; } static int daio_index(const struct rsc *rsc) { return rsc->conj; } static int daio_out_next_conj(struct rsc *rsc) { return rsc->conj += 2; } static int daio_in_next_conj_20k1(struct rsc *rsc) { return rsc->conj += 0x200; } static int daio_in_next_conj_20k2(struct rsc *rsc) { return rsc->conj += 0x100; } static struct rsc_ops daio_out_rsc_ops = { .master = daio_master, .next_conj = daio_out_next_conj, .index = daio_index, .output_slot = NULL, }; static struct rsc_ops daio_in_rsc_ops_20k1 = { .master = daio_master, .next_conj = daio_in_next_conj_20k1, .index = NULL, .output_slot = daio_index, }; static struct rsc_ops daio_in_rsc_ops_20k2 = { .master = daio_master, .next_conj = daio_in_next_conj_20k2, .index = NULL, .output_slot = daio_index, }; static unsigned int daio_device_index(enum DAIOTYP type, struct hw *hw) { switch (hw->chip_type) { case ATC20K1: switch (type) { case SPDIFOO: return 0; case SPDIFIO: return 0; case SPDIFI1: return 1; case LINEO1: return 4; case LINEO2: return 7; case LINEO3: return 5; case LINEO4: return 6; case LINEIM: return 7; default: return -EINVAL; } case ATC20K2: switch (type) { case SPDIFOO: return 0; case SPDIFIO: return 0; case LINEO1: return 4; case LINEO2: return 7; case LINEO3: return 5; case LINEO4: return 6; case LINEIM: return 4; default: return -EINVAL; } default: return -EINVAL; } } static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc); static int dao_spdif_get_spos(struct dao *dao, unsigned int *spos) { ((struct hw *)dao->hw)->dao_get_spos(dao->ctrl_blk, spos); return 0; } static int dao_spdif_set_spos(struct dao *dao, unsigned int spos) { ((struct hw *)dao->hw)->dao_set_spos(dao->ctrl_blk, spos); return 0; } static int dao_commit_write(struct dao *dao) { ((struct hw *)dao->hw)->dao_commit_write(dao->hw, daio_device_index(dao->daio.type, dao->hw), dao->ctrl_blk); return 0; } static int dao_set_left_input(struct dao *dao, struct rsc *input) { struct imapper *entry; struct daio *daio = &dao->daio; int i; entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL); if (!entry) return -ENOMEM; /* Program master and conjugate resources */ input->ops->master(input); daio->rscl.ops->master(&daio->rscl); for (i = 0; i < daio->rscl.msr; i++, entry++) { entry->slot = input->ops->output_slot(input); entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl); dao->mgr->imap_add(dao->mgr, entry); dao->imappers[i] = entry; input->ops->next_conj(input); daio->rscl.ops->next_conj(&daio->rscl); } input->ops->master(input); daio->rscl.ops->master(&daio->rscl); return 0; } static int dao_set_right_input(struct dao *dao, struct rsc *input) { struct imapper *entry; struct daio *daio = &dao->daio; int i; entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL); if (!entry) return -ENOMEM; /* Program master and conjugate resources */ input->ops->master(input); daio->rscr.ops->master(&daio->rscr); for (i = 0; i < daio->rscr.msr; i++, entry++) { entry->slot = input->ops->output_slot(input); entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr); dao->mgr->imap_add(dao->mgr, entry); dao->imappers[daio->rscl.msr + i] = entry; input->ops->next_conj(input); daio->rscr.ops->next_conj(&daio->rscr); } input->ops->master(input); daio->rscr.ops->master(&daio->rscr); return 0; } static int dao_clear_left_input(struct dao *dao) { struct imapper *entry; struct daio *daio = &dao->daio; int i; if (!dao->imappers[0]) return 0; entry = dao->imappers[0]; dao->mgr->imap_delete(dao->mgr, entry); /* Program conjugate resources */ for (i = 1; i < daio->rscl.msr; i++) { entry = dao->imappers[i]; dao->mgr->imap_delete(dao->mgr, entry); dao->imappers[i] = NULL; } kfree(dao->imappers[0]); dao->imappers[0] = NULL; return 0; } static int dao_clear_right_input(struct dao *dao) { struct imapper *entry; struct daio *daio = &dao->daio; int i; if (!dao->imappers[daio->rscl.msr]) return 0; entry = dao->imappers[daio->rscl.msr]; dao->mgr->imap_delete(dao->mgr, entry); /* Program conjugate resources */ for (i = 1; i < daio->rscr.msr; i++) { entry = dao->imappers[daio->rscl.msr + i]; dao->mgr->imap_delete(dao->mgr, entry); dao->imappers[daio->rscl.msr + i] = NULL; } kfree(dao->imappers[daio->rscl.msr]); dao->imappers[daio->rscl.msr] = NULL; return 0; } static struct dao_rsc_ops dao_ops = { .set_spos = dao_spdif_set_spos, .commit_write = dao_commit_write, .get_spos = dao_spdif_get_spos, .reinit = dao_rsc_reinit, .set_left_input = dao_set_left_input, .set_right_input = dao_set_right_input, .clear_left_input = dao_clear_left_input, .clear_right_input = dao_clear_right_input, }; static int dai_set_srt_srcl(struct dai *dai, struct rsc *src) { src->ops->master(src); ((struct hw *)dai->hw)->dai_srt_set_srcm(dai->ctrl_blk, src->ops->index(src)); return 0; } static int dai_set_srt_srcr(struct dai *dai, struct rsc *src) { src->ops->master(src); ((struct hw *)dai->hw)->dai_srt_set_srco(dai->ctrl_blk, src->ops->index(src)); return 0; } static int dai_set_srt_msr(struct dai *dai, unsigned int msr) { unsigned int rsr; for (rsr = 0; msr > 1; msr >>= 1) rsr++; ((struct hw *)dai->hw)->dai_srt_set_rsr(dai->ctrl_blk, rsr); return 0; } static int dai_set_enb_src(struct dai *dai, unsigned int enb) { ((struct hw *)dai->hw)->dai_srt_set_ec(dai->ctrl_blk, enb); return 0; } static int dai_set_enb_srt(struct dai *dai, unsigned int enb) { ((struct hw *)dai->hw)->dai_srt_set_et(dai->ctrl_blk, enb); return 0; } static int dai_commit_write(struct dai *dai) { ((struct hw *)dai->hw)->dai_commit_write(dai->hw, daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk); return 0; } static struct dai_rsc_ops dai_ops = { .set_srt_srcl = dai_set_srt_srcl, .set_srt_srcr = dai_set_srt_srcr, .set_srt_msr = dai_set_srt_msr, .set_enb_src = dai_set_enb_src, .set_enb_srt = dai_set_enb_srt, .commit_write = dai_commit_write, }; static int daio_rsc_init(struct daio *daio, const struct daio_desc *desc, void *hw) { int err; unsigned int idx_l, idx_r; switch (((struct hw *)hw)->chip_type) { case ATC20K1: idx_l = idx_20k1[desc->type].left; idx_r = idx_20k1[desc->type].right; break; case ATC20K2: idx_l = idx_20k2[desc->type].left; idx_r = idx_20k2[desc->type].right; break; default: return -EINVAL; } err = rsc_init(&daio->rscl, idx_l, DAIO, desc->msr, hw); if (err) return err; err = rsc_init(&daio->rscr, idx_r, DAIO, desc->msr, hw); if (err) goto error1; /* Set daio->rscl/r->ops to daio specific ones */ if (desc->type <= DAIO_OUT_MAX) { daio->rscl.ops = daio->rscr.ops = &daio_out_rsc_ops; } else { switch (((struct hw *)hw)->chip_type) { case ATC20K1: daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k1; break; case ATC20K2: daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k2; break; default: break; } } daio->type = desc->type; return 0; error1: rsc_uninit(&daio->rscl); return err; } static int daio_rsc_uninit(struct daio *daio) { rsc_uninit(&daio->rscl); rsc_uninit(&daio->rscr); return 0; } static int dao_rsc_init(struct dao *dao, const struct daio_desc *desc, struct daio_mgr *mgr) { struct hw *hw = mgr->mgr.hw; unsigned int conf; int err; err = daio_rsc_init(&dao->daio, desc, mgr->mgr.hw); if (err) return err; dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL); if (!dao->imappers) { err = -ENOMEM; goto error1; } dao->ops = &dao_ops; dao->mgr = mgr; dao->hw = hw; err = hw->dao_get_ctrl_blk(&dao->ctrl_blk); if (err) goto error2; hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk, daio_device_index(dao->daio.type, hw)); hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk); conf = (desc->msr & 0x7) | (desc->passthru << 3); hw->daio_mgr_dao_init(mgr->mgr.ctrl_blk, daio_device_index(dao->daio.type, hw), conf); hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk, daio_device_index(dao->daio.type, hw)); hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk); return 0; error2: kfree(dao->imappers); dao->imappers = NULL; error1: daio_rsc_uninit(&dao->daio); return err; } static int dao_rsc_uninit(struct dao *dao) { if (dao->imappers) { if (dao->imappers[0]) dao_clear_left_input(dao); if (dao->imappers[dao->daio.rscl.msr]) dao_clear_right_input(dao); kfree(dao->imappers); dao->imappers = NULL; } ((struct hw *)dao->hw)->dao_put_ctrl_blk(dao->ctrl_blk); dao->hw = dao->ctrl_blk = NULL; daio_rsc_uninit(&dao->daio); return 0; } static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc) { struct daio_mgr *mgr = dao->mgr; struct daio_desc dsc = {0}; dsc.type = dao->daio.type; dsc.msr = desc->msr; dsc.passthru = desc->passthru; dao_rsc_uninit(dao); return dao_rsc_init(dao, &dsc, mgr); } static int dai_rsc_init(struct dai *dai, const struct daio_desc *desc, struct daio_mgr *mgr) { int err; struct hw *hw = mgr->mgr.hw; unsigned int rsr, msr; err = daio_rsc_init(&dai->daio, desc, mgr->mgr.hw); if (err) return err; dai->ops = &dai_ops; dai->hw = mgr->mgr.hw; err = hw->dai_get_ctrl_blk(&dai->ctrl_blk); if (err) goto error1; for (rsr = 0, msr = desc->msr; msr > 1; msr >>= 1) rsr++; hw->dai_srt_set_rsr(dai->ctrl_blk, rsr); hw->dai_srt_set_drat(dai->ctrl_blk, 0); /* default to disabling control of a SRC */ hw->dai_srt_set_ec(dai->ctrl_blk, 0); hw->dai_srt_set_et(dai->ctrl_blk, 0); /* default to disabling SRT */ hw->dai_commit_write(hw, daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk); return 0; error1: daio_rsc_uninit(&dai->daio); return err; } static int dai_rsc_uninit(struct dai *dai) { ((struct hw *)dai->hw)->dai_put_ctrl_blk(dai->ctrl_blk); dai->hw = dai->ctrl_blk = NULL; daio_rsc_uninit(&dai->daio); return 0; } static int daio_mgr_get_rsc(struct rsc_mgr *mgr, enum DAIOTYP type) { if (((union daio_usage *)mgr->rscs)->data & (0x1 << type)) return -ENOENT; ((union daio_usage *)mgr->rscs)->data |= (0x1 << type); return 0; } static int daio_mgr_put_rsc(struct rsc_mgr *mgr, enum DAIOTYP type) { ((union daio_usage *)mgr->rscs)->data &= ~(0x1 << type); return 0; } static int get_daio_rsc(struct daio_mgr *mgr, const struct daio_desc *desc, struct daio **rdaio) { int err; struct dai *dai = NULL; struct dao *dao = NULL; unsigned long flags; *rdaio = NULL; /* Check whether there are sufficient daio resources to meet request. */ spin_lock_irqsave(&mgr->mgr_lock, flags); err = daio_mgr_get_rsc(&mgr->mgr, desc->type); spin_unlock_irqrestore(&mgr->mgr_lock, flags); if (err) { printk(KERN_ERR "Can't meet DAIO resource request!\n"); return err; } /* Allocate mem for daio resource */ if (desc->type <= DAIO_OUT_MAX) { dao = kzalloc(sizeof(*dao), GFP_KERNEL); if (!dao) { err = -ENOMEM; goto error; } err = dao_rsc_init(dao, desc, mgr); if (err) goto error; *rdaio = &dao->daio; } else { dai = kzalloc(sizeof(*dai), GFP_KERNEL); if (!dai) { err = -ENOMEM; goto error; } err = dai_rsc_init(dai, desc, mgr); if (err) goto error; *rdaio = &dai->daio; } mgr->daio_enable(mgr, *rdaio); mgr->commit_write(mgr); return 0; error: if (dao) kfree(dao); else if (dai) kfree(dai); spin_lock_irqsave(&mgr->mgr_lock, flags); daio_mgr_put_rsc(&mgr->mgr, desc->type); spin_unlock_irqrestore(&mgr->mgr_lock, flags); return err; } static int put_daio_rsc(struct daio_mgr *mgr, struct daio *daio) { unsigned long flags; mgr->daio_disable(mgr, daio); mgr->commit_write(mgr); spin_lock_irqsave(&mgr->mgr_lock, flags); daio_mgr_put_rsc(&mgr->mgr, daio->type); spin_unlock_irqrestore(&mgr->mgr_lock, flags); if (daio->type <= DAIO_OUT_MAX) { dao_rsc_uninit(container_of(daio, struct dao, daio)); kfree(container_of(daio, struct dao, daio)); } else { dai_rsc_uninit(container_of(daio, struct dai, daio)); kfree(container_of(daio, struct dai, daio)); } return 0; } static int daio_mgr_enb_daio(struct daio_mgr *mgr, struct daio *daio) { struct hw *hw = mgr->mgr.hw; if (DAIO_OUT_MAX >= daio->type) { hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk, daio_device_index(daio->type, hw)); } else { hw->daio_mgr_enb_dai(mgr->mgr.ctrl_blk, daio_device_index(daio->type, hw)); } return 0; } static int daio_mgr_dsb_daio(struct daio_mgr *mgr, struct daio *daio) { struct hw *hw = mgr->mgr.hw; if (DAIO_OUT_MAX >= daio->type) { hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk, daio_device_index(daio->type, hw)); } else { hw->daio_mgr_dsb_dai(mgr->mgr.ctrl_blk, daio_device_index(daio->type, hw)); } return 0; } static int daio_map_op(void *data, struct imapper *entry) { struct rsc_mgr *mgr = &((struct daio_mgr *)data)->mgr; struct hw *hw = mgr->hw; hw->daio_mgr_set_imaparc(mgr->ctrl_blk, entry->slot); hw->daio_mgr_set_imapnxt(mgr->ctrl_blk, entry->next); hw->daio_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr); hw->daio_mgr_commit_write(mgr->hw, mgr->ctrl_blk); return 0; } static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry) { unsigned long flags; int err; spin_lock_irqsave(&mgr->imap_lock, flags); if (!entry->addr && mgr->init_imap_added) { input_mapper_delete(&mgr->imappers, mgr->init_imap, daio_map_op, mgr); mgr->init_imap_added = 0; } err = input_mapper_add(&mgr->imappers, entry, daio_map_op, mgr); spin_unlock_irqrestore(&mgr->imap_lock, flags); return err; } static int daio_imap_delete(struct daio_mgr *mgr, struct imapper *entry) { unsigned long flags; int err; spin_lock_irqsave(&mgr->imap_lock, flags); err = input_mapper_delete(&mgr->imappers, entry, daio_map_op, mgr); if (list_empty(&mgr->imappers)) { input_mapper_add(&mgr->imappers, mgr->init_imap, daio_map_op, mgr); mgr->init_imap_added = 1; } spin_unlock_irqrestore(&mgr->imap_lock, flags); return err; } static int daio_mgr_commit_write(struct daio_mgr *mgr) { struct hw *hw = mgr->mgr.hw; hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk); return 0; } int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr) { int err, i; struct daio_mgr *daio_mgr; struct imapper *entry; *rdaio_mgr = NULL; daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL); if (!daio_mgr) return -ENOMEM; err = rsc_mgr_init(&daio_mgr->mgr, DAIO, DAIO_RESOURCE_NUM, hw); if (err) goto error1; spin_lock_init(&daio_mgr->mgr_lock); spin_lock_init(&daio_mgr->imap_lock); INIT_LIST_HEAD(&daio_mgr->imappers); entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { err = -ENOMEM; goto error2; } entry->slot = entry->addr = entry->next = entry->user = 0; list_add(&entry->list, &daio_mgr->imappers); daio_mgr->init_imap = entry; daio_mgr->init_imap_added = 1; daio_mgr->get_daio = get_daio_rsc; daio_mgr->put_daio = put_daio_rsc; daio_mgr->daio_enable = daio_mgr_enb_daio; daio_mgr->daio_disable = daio_mgr_dsb_daio; daio_mgr->imap_add = daio_imap_add; daio_mgr->imap_delete = daio_imap_delete; daio_mgr->commit_write = daio_mgr_commit_write; for (i = 0; i < 8; i++) { ((struct hw *)hw)->daio_mgr_dsb_dao(daio_mgr->mgr.ctrl_blk, i); ((struct hw *)hw)->daio_mgr_dsb_dai(daio_mgr->mgr.ctrl_blk, i); } ((struct hw *)hw)->daio_mgr_commit_write(hw, daio_mgr->mgr.ctrl_blk); *rdaio_mgr = daio_mgr; return 0; error2: rsc_mgr_uninit(&daio_mgr->mgr); error1: kfree(daio_mgr); return err; } int daio_mgr_destroy(struct daio_mgr *daio_mgr) { unsigned long flags; /* free daio input mapper list */ spin_lock_irqsave(&daio_mgr->imap_lock, flags); free_input_mapper_list(&daio_mgr->imappers); spin_unlock_irqrestore(&daio_mgr->imap_lock, flags); rsc_mgr_uninit(&daio_mgr->mgr); kfree(daio_mgr); return 0; }
gpl-2.0
kevinzyuan/ok6410
arch/powerpc/oprofile/op_model_fsl_emb.c
1461
6543
/* * Freescale Embedded oprofile support, based on ppc64 oprofile support * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM * * Copyright (c) 2004 Freescale Semiconductor, Inc * * Author: Andy Fleming * Maintainer: Kumar Gala <galak@kernel.crashing.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> #include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/reg_fsl_emb.h> #include <asm/page.h> #include <asm/pmc.h> #include <asm/oprofile_impl.h> static unsigned long reset_value[OP_MAX_COUNTER]; static int num_counters; static int oprofile_running; static inline u32 get_pmlca(int ctr) { u32 pmlca; switch (ctr) { case 0: pmlca = mfpmr(PMRN_PMLCA0); break; case 1: pmlca = mfpmr(PMRN_PMLCA1); break; case 2: pmlca = mfpmr(PMRN_PMLCA2); break; case 3: pmlca = mfpmr(PMRN_PMLCA3); break; default: panic("Bad ctr number\n"); } return pmlca; } static inline void set_pmlca(int ctr, u32 pmlca) { switch (ctr) { case 0: mtpmr(PMRN_PMLCA0, pmlca); break; case 1: mtpmr(PMRN_PMLCA1, pmlca); break; case 2: mtpmr(PMRN_PMLCA2, pmlca); break; case 3: mtpmr(PMRN_PMLCA3, pmlca); break; default: panic("Bad ctr number\n"); } } static inline unsigned int ctr_read(unsigned int i) { switch(i) { case 0: return mfpmr(PMRN_PMC0); case 1: return mfpmr(PMRN_PMC1); case 2: return mfpmr(PMRN_PMC2); case 3: return mfpmr(PMRN_PMC3); default: return 0; } } static inline void ctr_write(unsigned int i, unsigned int val) { switch(i) { case 0: mtpmr(PMRN_PMC0, val); break; case 1: mtpmr(PMRN_PMC1, val); break; case 2: mtpmr(PMRN_PMC2, val); break; case 3: mtpmr(PMRN_PMC3, val); break; default: break; } } static void init_pmc_stop(int ctr) { u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | PMLCA_FCM1 | PMLCA_FCM0); u32 pmlcb = 0; switch (ctr) { case 0: mtpmr(PMRN_PMLCA0, pmlca); mtpmr(PMRN_PMLCB0, pmlcb); break; case 1: mtpmr(PMRN_PMLCA1, pmlca); mtpmr(PMRN_PMLCB1, pmlcb); break; case 2: mtpmr(PMRN_PMLCA2, pmlca); mtpmr(PMRN_PMLCB2, pmlcb); break; case 3: mtpmr(PMRN_PMLCA3, pmlca); mtpmr(PMRN_PMLCB3, pmlcb); break; default: panic("Bad ctr number!\n"); } } static void set_pmc_event(int ctr, int event) { u32 pmlca; pmlca = get_pmlca(ctr); pmlca = (pmlca & ~PMLCA_EVENT_MASK) | ((event << PMLCA_EVENT_SHIFT) & PMLCA_EVENT_MASK); set_pmlca(ctr, pmlca); } static void set_pmc_user_kernel(int ctr, int user, int kernel) { u32 pmlca; pmlca = get_pmlca(ctr); if(user) pmlca &= ~PMLCA_FCU; else pmlca |= PMLCA_FCU; if(kernel) pmlca &= ~PMLCA_FCS; else pmlca |= PMLCA_FCS; set_pmlca(ctr, pmlca); } static void set_pmc_marked(int ctr, int mark0, int mark1) { u32 pmlca = get_pmlca(ctr); if(mark0) pmlca &= ~PMLCA_FCM0; else pmlca |= PMLCA_FCM0; if(mark1) pmlca &= ~PMLCA_FCM1; else pmlca |= PMLCA_FCM1; set_pmlca(ctr, pmlca); } static void pmc_start_ctr(int ctr, int enable) { u32 pmlca = get_pmlca(ctr); pmlca &= ~PMLCA_FC; if (enable) pmlca |= PMLCA_CE; else pmlca &= ~PMLCA_CE; set_pmlca(ctr, pmlca); } static void pmc_start_ctrs(int enable) { u32 pmgc0 = mfpmr(PMRN_PMGC0); pmgc0 &= ~PMGC0_FAC; pmgc0 |= PMGC0_FCECE; if (enable) pmgc0 |= PMGC0_PMIE; else pmgc0 &= ~PMGC0_PMIE; mtpmr(PMRN_PMGC0, pmgc0); } static void pmc_stop_ctrs(void) { u32 pmgc0 = mfpmr(PMRN_PMGC0); pmgc0 |= PMGC0_FAC; pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); mtpmr(PMRN_PMGC0, pmgc0); } static int fsl_emb_cpu_setup(struct op_counter_config *ctr) { int i; /* freeze all counters */ pmc_stop_ctrs(); for (i = 0;i < num_counters;i++) { init_pmc_stop(i); set_pmc_event(i, ctr[i].event); set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); } return 0; } static int fsl_emb_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, int num_ctrs) { int i; num_counters = num_ctrs; /* Our counters count up, and "count" refers to * how much before the next interrupt, and we interrupt * on overflow. So we calculate the starting value * which will give us "count" until overflow. * Then we set the events on the enabled counters */ for (i = 0; i < num_counters; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; return 0; } static int fsl_emb_start(struct op_counter_config *ctr) { int i; mtmsr(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { ctr_write(i, reset_value[i]); /* Set each enabled counter to only * count when the Mark bit is *not* set */ set_pmc_marked(i, 1, 0); pmc_start_ctr(i, 1); } else { ctr_write(i, 0); /* Set the ctr to be stopped */ pmc_start_ctr(i, 0); } } /* Clear the freeze bit, and enable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); oprofile_running = 1; pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(), mfpmr(PMRN_PMGC0)); return 0; } static void fsl_emb_stop(void) { /* freeze counters */ pmc_stop_ctrs(); oprofile_running = 0; pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(), mfpmr(PMRN_PMGC0)); mb(); } static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* set the PMM bit (see comment below) */ mtmsr(mfmsr() | MSR_PMM); pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); } struct op_powerpc_model op_model_fsl_emb = { .reg_setup = fsl_emb_reg_setup, .cpu_setup = fsl_emb_cpu_setup, .start = fsl_emb_start, .stop = fsl_emb_stop, .handle_interrupt = fsl_emb_handle_interrupt, };
gpl-2.0
k2wl/i9105Sammy
drivers/net/wireless/ath/ath9k/htc_drv_init.c
1717
27119
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "htc.h" MODULE_AUTHOR("Atheros Communications"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices"); static unsigned int ath9k_debug = ATH_DBG_DEFAULT; module_param_named(debug, ath9k_debug, uint, 0); MODULE_PARM_DESC(debug, "Debugging mask"); int htc_modparam_nohwcrypt; module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); #define CHAN2G(_freq, _idx) { \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 20, \ } #define CHAN5G(_freq, _idx) { \ .band = IEEE80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 20, \ } #define ATH_HTC_BTCOEX_PRODUCT_ID "wb193" static struct ieee80211_channel ath9k_2ghz_channels[] = { CHAN2G(2412, 0), /* Channel 1 */ CHAN2G(2417, 1), /* Channel 2 */ CHAN2G(2422, 2), /* Channel 3 */ CHAN2G(2427, 3), /* Channel 4 */ CHAN2G(2432, 4), /* Channel 5 */ CHAN2G(2437, 5), /* Channel 6 */ CHAN2G(2442, 6), /* Channel 7 */ CHAN2G(2447, 7), /* Channel 8 */ CHAN2G(2452, 8), /* Channel 9 */ CHAN2G(2457, 9), /* Channel 10 */ CHAN2G(2462, 10), /* Channel 11 */ CHAN2G(2467, 11), /* Channel 12 */ CHAN2G(2472, 12), /* Channel 13 */ CHAN2G(2484, 13), /* Channel 14 */ }; static struct ieee80211_channel ath9k_5ghz_channels[] = { /* _We_ call this UNII 1 */ CHAN5G(5180, 14), /* Channel 36 */ CHAN5G(5200, 15), /* Channel 40 */ CHAN5G(5220, 16), /* Channel 44 */ CHAN5G(5240, 17), /* Channel 48 */ /* _We_ call this UNII 2 */ CHAN5G(5260, 18), /* Channel 52 */ CHAN5G(5280, 19), /* Channel 56 */ CHAN5G(5300, 20), /* Channel 60 */ CHAN5G(5320, 21), /* Channel 64 */ /* _We_ call this "Middle band" */ CHAN5G(5500, 22), /* Channel 100 */ CHAN5G(5520, 23), /* Channel 104 */ CHAN5G(5540, 24), /* Channel 108 */ CHAN5G(5560, 25), /* Channel 112 */ CHAN5G(5580, 26), /* Channel 116 */ CHAN5G(5600, 27), /* Channel 120 */ CHAN5G(5620, 28), /* Channel 124 */ CHAN5G(5640, 29), /* Channel 128 */ CHAN5G(5660, 30), /* Channel 132 */ CHAN5G(5680, 31), /* Channel 136 */ CHAN5G(5700, 32), /* Channel 140 */ /* _We_ call this UNII 3 */ CHAN5G(5745, 33), /* Channel 149 */ CHAN5G(5765, 34), /* Channel 153 */ CHAN5G(5785, 35), /* Channel 157 */ CHAN5G(5805, 36), /* Channel 161 */ CHAN5G(5825, 37), /* Channel 165 */ }; /* Atheros hardware rate code addition for short premble */ #define SHPCHECK(__hw_rate, __flags) \ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0) #define RATE(_bitrate, _hw_rate, _flags) { \ .bitrate = (_bitrate), \ .flags = (_flags), \ .hw_value = (_hw_rate), \ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \ } static struct ieee80211_rate ath9k_legacy_rates[] = { RATE(10, 0x1b, 0), RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */ RATE(60, 0x0b, 0), RATE(90, 0x0f, 0), RATE(120, 0x0a, 0), RATE(180, 0x0e, 0), RATE(240, 0x09, 0), RATE(360, 0x0d, 0), RATE(480, 0x08, 0), RATE(540, 0x0c, 0), }; #ifdef CONFIG_MAC80211_LEDS static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = { { .throughput = 0 * 1024, .blink_time = 334 }, { .throughput = 1 * 1024, .blink_time = 260 }, { .throughput = 5 * 1024, .blink_time = 220 }, { .throughput = 10 * 1024, .blink_time = 190 }, { .throughput = 20 * 1024, .blink_time = 170 }, { .throughput = 50 * 1024, .blink_time = 150 }, { .throughput = 70 * 1024, .blink_time = 130 }, { .throughput = 100 * 1024, .blink_time = 110 }, { .throughput = 200 * 1024, .blink_time = 80 }, { .throughput = 300 * 1024, .blink_time = 50 }, }; #endif static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv) { int time_left; if (atomic_read(&priv->htc->tgt_ready) > 0) { atomic_dec(&priv->htc->tgt_ready); return 0; } /* Firmware can take up to 50ms to get ready, to be safe use 1 second */ time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ); if (!time_left) { dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n"); return -ETIMEDOUT; } atomic_dec(&priv->htc->tgt_ready); return 0; } static void ath9k_deinit_priv(struct ath9k_htc_priv *priv) { ath9k_hw_deinit(priv->ah); kfree(priv->ah); priv->ah = NULL; } static void ath9k_deinit_device(struct ath9k_htc_priv *priv) { struct ieee80211_hw *hw = priv->hw; wiphy_rfkill_stop_polling(hw->wiphy); ath9k_deinit_leds(priv); ieee80211_unregister_hw(hw); ath9k_rx_cleanup(priv); ath9k_tx_cleanup(priv); ath9k_deinit_priv(priv); } static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv, u16 service_id, void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok), enum htc_endpoint_id *ep_id) { struct htc_service_connreq req; memset(&req, 0, sizeof(struct htc_service_connreq)); req.service_id = service_id; req.ep_callbacks.priv = priv; req.ep_callbacks.rx = ath9k_htc_rxep; req.ep_callbacks.tx = tx; return htc_connect_service(priv->htc, &req, ep_id); } static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid, u32 drv_info) { int ret; /* WMI CMD*/ ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep); if (ret) goto err; /* Beacon */ ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep, &priv->beacon_ep); if (ret) goto err; /* CAB */ ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep, &priv->cab_ep); if (ret) goto err; /* UAPSD */ ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep, &priv->uapsd_ep); if (ret) goto err; /* MGMT */ ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep, &priv->mgmt_ep); if (ret) goto err; /* DATA BE */ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep, &priv->data_be_ep); if (ret) goto err; /* DATA BK */ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep, &priv->data_bk_ep); if (ret) goto err; /* DATA VI */ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep, &priv->data_vi_ep); if (ret) goto err; /* DATA VO */ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep, &priv->data_vo_ep); if (ret) goto err; /* * Setup required credits before initializing HTC. * This is a bit hacky, but, since queuing is done in * the HIF layer, shouldn't matter much. */ if (IS_AR7010_DEVICE(drv_info)) priv->htc->credits = 45; else priv->htc->credits = 33; ret = htc_init(priv->htc); if (ret) goto err; dev_info(priv->dev, "ath9k_htc: HTC initialized with %d credits\n", priv->htc->credits); return 0; err: dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n"); return ret; } static int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath9k_htc_priv *priv = hw->priv; return ath_reg_notifier_apply(wiphy, request, ath9k_hw_regulatory(priv->ah)); } static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; __be32 val, reg = cpu_to_be32(reg_offset); int r; r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, (u8 *) &reg, sizeof(reg), (u8 *) &val, sizeof(val), 100); if (unlikely(r)) { ath_dbg(common, ATH_DBG_WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", reg_offset, r); return -EIO; } return be32_to_cpu(val); } static void ath9k_multi_regread(void *hw_priv, u32 *addr, u32 *val, u16 count) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; __be32 tmpaddr[8]; __be32 tmpval[8]; int i, ret; for (i = 0; i < count; i++) { tmpaddr[i] = cpu_to_be32(addr[i]); } ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, (u8 *)tmpaddr , sizeof(u32) * count, (u8 *)tmpval, sizeof(u32) * count, 100); if (unlikely(ret)) { ath_dbg(common, ATH_DBG_WMI, "Multiple REGISTER READ FAILED (count: %d)\n", count); } for (i = 0; i < count; i++) { val[i] = be32_to_cpu(tmpval[i]); } } static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; const __be32 buf[2] = { cpu_to_be32(reg_offset), cpu_to_be32(val), }; int r; r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID, (u8 *) &buf, sizeof(buf), (u8 *) &val, sizeof(val), 100); if (unlikely(r)) { ath_dbg(common, ATH_DBG_WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n", reg_offset, r); } } static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; u32 rsp_status; int r; mutex_lock(&priv->wmi->multi_write_mutex); /* Store the register/value */ priv->wmi->multi_write[priv->wmi->multi_write_idx].reg = cpu_to_be32(reg_offset); priv->wmi->multi_write[priv->wmi->multi_write_idx].val = cpu_to_be32(val); priv->wmi->multi_write_idx++; /* If the buffer is full, send it out. */ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) { r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID, (u8 *) &priv->wmi->multi_write, sizeof(struct register_write) * priv->wmi->multi_write_idx, (u8 *) &rsp_status, sizeof(rsp_status), 100); if (unlikely(r)) { ath_dbg(common, ATH_DBG_WMI, "REGISTER WRITE FAILED, multi len: %d\n", priv->wmi->multi_write_idx); } priv->wmi->multi_write_idx = 0; } mutex_unlock(&priv->wmi->multi_write_mutex); } static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; if (atomic_read(&priv->wmi->mwrite_cnt)) ath9k_regwrite_buffer(hw_priv, val, reg_offset); else ath9k_regwrite_single(hw_priv, val, reg_offset); } static void ath9k_enable_regwrite_buffer(void *hw_priv) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; atomic_inc(&priv->wmi->mwrite_cnt); } static void ath9k_regwrite_flush(void *hw_priv) { struct ath_hw *ah = (struct ath_hw *) hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; u32 rsp_status; int r; atomic_dec(&priv->wmi->mwrite_cnt); mutex_lock(&priv->wmi->multi_write_mutex); if (priv->wmi->multi_write_idx) { r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID, (u8 *) &priv->wmi->multi_write, sizeof(struct register_write) * priv->wmi->multi_write_idx, (u8 *) &rsp_status, sizeof(rsp_status), 100); if (unlikely(r)) { ath_dbg(common, ATH_DBG_WMI, "REGISTER WRITE FAILED, multi len: %d\n", priv->wmi->multi_write_idx); } priv->wmi->multi_write_idx = 0; } mutex_unlock(&priv->wmi->multi_write_mutex); } static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) { u32 val; val = ath9k_regread(hw_priv, reg_offset); val &= ~clr; val |= set; ath9k_regwrite(hw_priv, val, reg_offset); return val; } static void ath_usb_read_cachesize(struct ath_common *common, int *csz) { *csz = L1_CACHE_BYTES >> 2; } static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { struct ath_hw *ah = (struct ath_hw *) common->ah; (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); if (!ath9k_hw_wait(ah, AR_EEPROM_STATUS_DATA, AR_EEPROM_STATUS_DATA_BUSY | AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, AH_WAIT_TIMEOUT)) return false; *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA), AR_EEPROM_STATUS_DATA_VAL); return true; } static const struct ath_bus_ops ath9k_usb_bus_ops = { .ath_bus_type = ATH_USB, .read_cachesize = ath_usb_read_cachesize, .eeprom_read = ath_usb_eeprom_read, }; static void setup_ht_cap(struct ath9k_htc_priv *priv, struct ieee80211_sta_ht_cap *ht_info) { struct ath_common *common = ath9k_hw_common(priv->ah); u8 tx_streams, rx_streams; int i; ht_info->ht_supported = true; ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_SM_PS | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) ht_info->cap |= IEEE80211_HT_CAP_SGI_20; ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); /* ath9k_htc supports only 1 or 2 stream devices */ tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2); rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2); ath_dbg(common, ATH_DBG_CONFIG, "TX streams %d, RX streams: %d\n", tx_streams, rx_streams); if (tx_streams != rx_streams) { ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; ht_info->mcs.tx_params |= ((tx_streams - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); } for (i = 0; i < rx_streams; i++) ht_info->mcs.rx_mask[i] = 0xff; ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; } static int ath9k_init_queues(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); int i; for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++) priv->hwq_map[i] = -1; priv->beaconq = ath9k_hw_beaconq_setup(priv->ah); if (priv->beaconq == -1) { ath_err(common, "Unable to setup BEACON xmit queue\n"); goto err; } priv->cabq = ath9k_htc_cabq_setup(priv); if (priv->cabq == -1) { ath_err(common, "Unable to setup CAB xmit queue\n"); goto err; } if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) { ath_err(common, "Unable to setup xmit queue for BE traffic\n"); goto err; } if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) { ath_err(common, "Unable to setup xmit queue for BK traffic\n"); goto err; } if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) { ath_err(common, "Unable to setup xmit queue for VI traffic\n"); goto err; } if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) { ath_err(common, "Unable to setup xmit queue for VO traffic\n"); goto err; } return 0; err: return -EINVAL; } static void ath9k_init_crypto(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); int i = 0; /* Get the hardware key cache size. */ common->keymax = AR_KEYTABLE_SIZE; if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED; /* * Reset the key cache since some parts do not * reset the contents on initial power up. */ for (i = 0; i < common->keymax; i++) ath_hw_keyreset(common, (u16) i); } static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv) { if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) { priv->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_channels; priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; priv->sbands[IEEE80211_BAND_2GHZ].n_channels = ARRAY_SIZE(ath9k_2ghz_channels); priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates); } if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) { priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels; priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; priv->sbands[IEEE80211_BAND_5GHZ].n_channels = ARRAY_SIZE(ath9k_5ghz_channels); priv->sbands[IEEE80211_BAND_5GHZ].bitrates = ath9k_legacy_rates + 4; priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates) - 4; } } static void ath9k_init_misc(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); common->tx_chainmask = priv->ah->caps.tx_chainmask; common->rx_chainmask = priv->ah->caps.rx_chainmask; memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); priv->ah->opmode = NL80211_IFTYPE_STATION; } static void ath9k_init_btcoex(struct ath9k_htc_priv *priv) { int qnum; switch (priv->ah->btcoex_hw.scheme) { case ATH_BTCOEX_CFG_NONE: break; case ATH_BTCOEX_CFG_3WIRE: priv->ah->btcoex_hw.btactive_gpio = 7; priv->ah->btcoex_hw.btpriority_gpio = 6; priv->ah->btcoex_hw.wlanactive_gpio = 8; priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; ath9k_hw_btcoex_init_3wire(priv->ah); ath_htc_init_btcoex_work(priv); qnum = priv->hwq_map[WME_AC_BE]; ath9k_hw_init_btcoex_hw(priv->ah, qnum); break; default: WARN_ON(1); break; } } static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid, char *product, u32 drv_info) { struct ath_hw *ah = NULL; struct ath_common *common; int i, ret = 0, csz = 0; priv->op_flags |= OP_INVALID; ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); if (!ah) return -ENOMEM; ah->hw_version.devid = devid; ah->hw_version.subsysid = 0; /* FIXME */ ah->hw_version.usbdev = drv_info; ah->ah_flags |= AH_USE_EEPROM; ah->reg_ops.read = ath9k_regread; ah->reg_ops.multi_read = ath9k_multi_regread; ah->reg_ops.write = ath9k_regwrite; ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer; ah->reg_ops.write_flush = ath9k_regwrite_flush; ah->reg_ops.rmw = ath9k_reg_rmw; priv->ah = ah; common = ath9k_hw_common(ah); common->ops = &ah->reg_ops; common->bus_ops = &ath9k_usb_bus_ops; common->ah = ah; common->hw = priv->hw; common->priv = priv; common->debug_mask = ath9k_debug; spin_lock_init(&priv->beacon_lock); spin_lock_init(&priv->tx.tx_lock); mutex_init(&priv->mutex); mutex_init(&priv->htc_pm_lock); tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet, (unsigned long)priv); tasklet_init(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet, (unsigned long)priv); INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work); INIT_WORK(&priv->ps_work, ath9k_ps_work); INIT_WORK(&priv->fatal_work, ath9k_fatal_work); setup_timer(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer, (unsigned long)priv); /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ ath_read_cachesize(common, &csz); common->cachelsz = csz << 2; /* convert to bytes */ ret = ath9k_hw_init(ah); if (ret) { ath_err(common, "Unable to initialize hardware; initialization status: %d\n", ret); goto err_hw; } ret = ath9k_init_queues(priv); if (ret) goto err_queues; for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) priv->cur_beacon_conf.bslot[i] = NULL; ath9k_init_crypto(priv); ath9k_init_channels_rates(priv); ath9k_init_misc(priv); if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) { ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE; ath9k_init_btcoex(priv); } return 0; err_queues: ath9k_hw_deinit(ah); err_hw: kfree(ah); priv->ah = NULL; return ret; } static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, struct ieee80211_hw *hw) { struct ath_common *common = ath9k_hw_common(priv->ah); hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_HAS_RATE_CONTROL | IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT); hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->queues = 4; hw->channel_change_time = 5000; hw->max_listen_interval = 10; hw->vif_data_size = sizeof(struct ath9k_htc_vif); hw->sta_data_size = sizeof(struct ath9k_htc_sta); /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */ hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) + sizeof(struct htc_frame_hdr) + 4; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->sbands[IEEE80211_BAND_2GHZ]; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->sbands[IEEE80211_BAND_5GHZ]; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) { if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) setup_ht_cap(priv, &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap); if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) setup_ht_cap(priv, &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap); } SET_IEEE80211_PERM_ADDR(hw, common->macaddr); } static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) { struct ieee80211_hw *hw = priv->hw; struct wmi_fw_version cmd_rsp; int ret; memset(&cmd_rsp, 0, sizeof(cmd_rsp)); WMI_CMD(WMI_GET_FW_VERSION); if (ret) return -EINVAL; priv->fw_version_major = be16_to_cpu(cmd_rsp.major); priv->fw_version_minor = be16_to_cpu(cmd_rsp.minor); snprintf(hw->wiphy->fw_version, ETHTOOL_BUSINFO_LEN, "%d.%d", priv->fw_version_major, priv->fw_version_minor); dev_info(priv->dev, "ath9k_htc: FW Version: %d.%d\n", priv->fw_version_major, priv->fw_version_minor); /* * Check if the available FW matches the driver's * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || priv->fw_version_minor != MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; } return 0; } static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid, char *product, u32 drv_info) { struct ieee80211_hw *hw = priv->hw; struct ath_common *common; struct ath_hw *ah; int error = 0; struct ath_regulatory *reg; char hw_name[64]; /* Bring up device */ error = ath9k_init_priv(priv, devid, product, drv_info); if (error != 0) goto err_init; ah = priv->ah; common = ath9k_hw_common(ah); ath9k_set_hw_capab(priv, hw); error = ath9k_init_firmware_version(priv); if (error != 0) goto err_fw; /* Initialize regulatory */ error = ath_regd_init(&common->regulatory, priv->hw->wiphy, ath9k_reg_notifier); if (error) goto err_regd; reg = &common->regulatory; /* Setup TX */ error = ath9k_tx_init(priv); if (error != 0) goto err_tx; /* Setup RX */ error = ath9k_rx_init(priv); if (error != 0) goto err_rx; #ifdef CONFIG_MAC80211_LEDS /* must be initialized before ieee80211_register_hw */ priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_htc_tpt_blink, ARRAY_SIZE(ath9k_htc_tpt_blink)); #endif /* Register with mac80211 */ error = ieee80211_register_hw(hw); if (error) goto err_register; /* Handle world regulatory */ if (!ath_is_world_regd(reg)) { error = regulatory_hint(hw->wiphy, reg->alpha2); if (error) goto err_world; } error = ath9k_htc_init_debug(priv->ah); if (error) { ath_err(common, "Unable to create debugfs files\n"); goto err_world; } ath_dbg(common, ATH_DBG_CONFIG, "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, " "BE:%d, BK:%d, VI:%d, VO:%d\n", priv->wmi_cmd_ep, priv->beacon_ep, priv->cab_ep, priv->uapsd_ep, priv->mgmt_ep, priv->data_be_ep, priv->data_bk_ep, priv->data_vi_ep, priv->data_vo_ep); ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name)); wiphy_info(hw->wiphy, "%s\n", hw_name); ath9k_init_leds(priv); ath9k_start_rfkill_poll(priv); return 0; err_world: ieee80211_unregister_hw(hw); err_register: ath9k_rx_cleanup(priv); err_rx: ath9k_tx_cleanup(priv); err_tx: /* Nothing */ err_regd: /* Nothing */ err_fw: ath9k_deinit_priv(priv); err_init: return error; } int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, u16 devid, char *product, u32 drv_info) { struct ieee80211_hw *hw; struct ath9k_htc_priv *priv; int ret; hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); if (!hw) return -ENOMEM; priv = hw->priv; priv->hw = hw; priv->htc = htc_handle; priv->dev = dev; htc_handle->drv_priv = priv; SET_IEEE80211_DEV(hw, priv->dev); ret = ath9k_htc_wait_for_target(priv); if (ret) goto err_free; priv->wmi = ath9k_init_wmi(priv); if (!priv->wmi) { ret = -EINVAL; goto err_free; } ret = ath9k_init_htc_services(priv, devid, drv_info); if (ret) goto err_init; ret = ath9k_init_device(priv, devid, product, drv_info); if (ret) goto err_init; return 0; err_init: ath9k_deinit_wmi(priv); err_free: ieee80211_free_hw(hw); return ret; } void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) { if (htc_handle->drv_priv) { /* Check if the device has been yanked out. */ if (hotunplug) htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; ath9k_deinit_device(htc_handle->drv_priv); ath9k_deinit_wmi(htc_handle->drv_priv); ieee80211_free_hw(htc_handle->drv_priv->hw); } } #ifdef CONFIG_PM void ath9k_htc_suspend(struct htc_target *htc_handle) { ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP); } int ath9k_htc_resume(struct htc_target *htc_handle) { struct ath9k_htc_priv *priv = htc_handle->drv_priv; int ret; ret = ath9k_htc_wait_for_target(priv); if (ret) return ret; ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid, priv->ah->hw_version.usbdev); return ret; } #endif static int __init ath9k_htc_init(void) { if (ath9k_hif_usb_init() < 0) { printk(KERN_ERR "ath9k_htc: No USB devices found," " driver not installed.\n"); return -ENODEV; } return 0; } module_init(ath9k_htc_init); static void __exit ath9k_htc_exit(void) { ath9k_hif_usb_exit(); printk(KERN_INFO "ath9k_htc: Driver unloaded\n"); } module_exit(ath9k_htc_exit);
gpl-2.0
blkredstarV/kernel_asus_moorefield
drivers/clocksource/vt8500_timer.c
1973
4902
/* * arch/arm/mach-vt8500/timer.c * * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz> * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * This file is copied and modified from the original timer.c provided by * Alexey Charkov. Minor changes have been made for Device Tree Support. */ #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/delay.h> #include <asm/mach/time.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #define VT8500_TIMER_OFFSET 0x0100 #define VT8500_TIMER_HZ 3000000 #define TIMER_MATCH_VAL 0x0000 #define TIMER_COUNT_VAL 0x0010 #define TIMER_STATUS_VAL 0x0014 #define TIMER_IER_VAL 0x001c /* interrupt enable */ #define TIMER_CTRL_VAL 0x0020 #define TIMER_AS_VAL 0x0024 /* access status */ #define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */ #define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */ #define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) static void __iomem *regbase; static cycle_t vt8500_timer_read(struct clocksource *cs) { int loops = msecs_to_loops(10); writel(3, regbase + TIMER_CTRL_VAL); while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE) && --loops) cpu_relax(); return readl(regbase + TIMER_COUNT_VAL); } static struct clocksource clocksource = { .name = "vt8500_timer", .rating = 200, .read = vt8500_timer_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int vt8500_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { int loops = msecs_to_loops(10); cycle_t alarm = clocksource.read(&clocksource) + cycles; while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) && --loops) cpu_relax(); writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) return -ETIME; writel(1, regbase + TIMER_IER_VAL); return 0; } static void vt8500_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL); writel(0, regbase + TIMER_IER_VAL); break; } } static struct clock_event_device clockevent = { .name = "vt8500_timer", .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .set_next_event = vt8500_timer_set_next_event, .set_mode = vt8500_timer_set_mode, }; static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; writel(0xf, regbase + TIMER_STATUS_VAL); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction irq = { .name = "vt8500_timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = vt8500_timer_interrupt, .dev_id = &clockevent, }; static void __init vt8500_timer_init(struct device_node *np) { int timer_irq; regbase = of_iomap(np, 0); if (!regbase) { pr_err("%s: Missing iobase description in Device Tree\n", __func__); of_node_put(np); return; } timer_irq = irq_of_parse_and_map(np, 0); if (!timer_irq) { pr_err("%s: Missing irq description in Device Tree\n", __func__); of_node_put(np); return; } writel(1, regbase + TIMER_CTRL_VAL); writel(0xf, regbase + TIMER_STATUS_VAL); writel(~0, regbase + TIMER_MATCH_VAL); if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", __func__, clocksource.name); clockevent.cpumask = cpumask_of(0); if (setup_irq(timer_irq, &irq)) pr_err("%s: setup_irq failed for %s\n", __func__, clockevent.name); clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, 4, 0xf0000000); } CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
gpl-2.0
SM-G920P/SM-N920
drivers/gpu/drm/exynos/exynos_drm_encoder.c
2229
14141
/* exynos_drm_encoder.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Authors: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" #include "exynos_drm_connector.h" #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ drm_encoder) /* * exynos specific encoder structure. * * @drm_encoder: encoder object. * @manager: specific encoder has its own manager to control a hardware * appropriately and we can access a hardware drawing on this manager. * @dpms: store the encoder dpms value. * @updated: indicate whether overlay data updating is needed or not. */ struct exynos_drm_encoder { struct drm_crtc *old_crtc; struct drm_encoder drm_encoder; struct exynos_drm_manager *manager; int dpms; bool updated; }; static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (exynos_drm_best_encoder(connector) == encoder) { DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", connector->base.id, mode); exynos_drm_display_power(connector, mode); } } } static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); if (exynos_encoder->dpms == mode) { DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); return; } mutex_lock(&dev->struct_mutex); switch (mode) { case DRM_MODE_DPMS_ON: if (manager_ops && manager_ops->apply) if (!exynos_encoder->updated) manager_ops->apply(manager->dev); exynos_drm_connector_power(encoder, mode); exynos_encoder->dpms = mode; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: exynos_drm_connector_power(encoder, mode); exynos_encoder->dpms = mode; exynos_encoder->updated = false; break; default: DRM_ERROR("unspecified mode %d\n", mode); break; } mutex_unlock(&dev->struct_mutex); } static bool exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s\n", __FILE__); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) if (manager_ops && manager_ops->mode_fixup) manager_ops->mode_fixup(manager->dev, connector, mode, adjusted_mode); } return true; } static void disable_plane_to_crtc(struct drm_device *dev, struct drm_crtc *old_crtc, struct drm_crtc *new_crtc) { struct drm_plane *plane; /* * if old_crtc isn't same as encoder->crtc then it means that * user changed crtc id to another one so the plane to old_crtc * should be disabled and plane->crtc should be set to new_crtc * (encoder->crtc) */ list_for_each_entry(plane, &dev->mode_config.plane_list, head) { if (plane->crtc == old_crtc) { /* * do not change below call order. * * plane->funcs->disable_plane call checks * if encoder->crtc is same as plane->crtc and if same * then overlay_ops->disable callback will be called * to diasble current hw overlay so plane->crtc should * have new_crtc because new_crtc was set to * encoder->crtc in advance. */ plane->crtc = new_crtc; plane->funcs->disable_plane(plane); } } } static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager; struct exynos_drm_manager_ops *manager_ops; DRM_DEBUG_KMS("%s\n", __FILE__); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { struct exynos_drm_encoder *exynos_encoder; exynos_encoder = to_exynos_encoder(encoder); if (exynos_encoder->old_crtc != encoder->crtc && exynos_encoder->old_crtc) { /* * disable a plane to old crtc and change * crtc of the plane to new one. */ disable_plane_to_crtc(dev, exynos_encoder->old_crtc, encoder->crtc); } manager = exynos_drm_get_manager(encoder); manager_ops = manager->ops; if (manager_ops && manager_ops->mode_set) manager_ops->mode_set(manager->dev, adjusted_mode); exynos_encoder->old_crtc = encoder->crtc; } } } static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL. */ } static void exynos_drm_encoder_commit(struct drm_encoder *encoder) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); struct exynos_drm_manager *manager = exynos_encoder->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->commit) manager_ops->commit(manager->dev); /* * this will avoid one issue that overlay data is updated to * real hardware two times. * And this variable will be used to check if the data was * already updated or not by exynos_drm_encoder_dpms function. */ exynos_encoder->updated = true; /* * In case of setcrtc, there is no way to update encoder's dpms * so update it here. */ exynos_encoder->dpms = DRM_MODE_DPMS_ON; } void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb) { struct exynos_drm_encoder *exynos_encoder; struct exynos_drm_manager_ops *ops; struct drm_device *dev = fb->dev; struct drm_encoder *encoder; /* * make sure that overlay data are updated to real hardware * for all encoders. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { exynos_encoder = to_exynos_encoder(encoder); ops = exynos_encoder->manager->ops; /* * wait for vblank interrupt * - this makes sure that overlay data are updated to * real hardware. */ if (ops->wait_for_vblank) ops->wait_for_vblank(exynos_encoder->manager->dev); } } static void exynos_drm_encoder_disable(struct drm_encoder *encoder) { struct drm_plane *plane; struct drm_device *dev = encoder->dev; exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); /* all planes connected to this encoder should be also disabled. */ list_for_each_entry(plane, &dev->mode_config.plane_list, head) { if (plane->crtc == encoder->crtc) plane->funcs->disable_plane(plane); } } static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { .dpms = exynos_drm_encoder_dpms, .mode_fixup = exynos_drm_encoder_mode_fixup, .mode_set = exynos_drm_encoder_mode_set, .prepare = exynos_drm_encoder_prepare, .commit = exynos_drm_encoder_commit, .disable = exynos_drm_encoder_disable, }; static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); DRM_DEBUG_KMS("%s\n", __FILE__); exynos_encoder->manager->pipe = -1; drm_encoder_cleanup(encoder); kfree(exynos_encoder); } static struct drm_encoder_funcs exynos_encoder_funcs = { .destroy = exynos_drm_encoder_destroy, }; static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder) { struct drm_encoder *clone; struct drm_device *dev = encoder->dev; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); struct exynos_drm_display_ops *display_ops = exynos_encoder->manager->display_ops; unsigned int clone_mask = 0; int cnt = 0; list_for_each_entry(clone, &dev->mode_config.encoder_list, head) { switch (display_ops->type) { case EXYNOS_DISPLAY_TYPE_LCD: case EXYNOS_DISPLAY_TYPE_HDMI: case EXYNOS_DISPLAY_TYPE_VIDI: clone_mask |= (1 << (cnt++)); break; default: continue; } } return clone_mask; } void exynos_drm_encoder_setup(struct drm_device *dev) { struct drm_encoder *encoder; DRM_DEBUG_KMS("%s\n", __FILE__); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) encoder->possible_clones = exynos_drm_encoder_clones(encoder); } struct drm_encoder * exynos_drm_encoder_create(struct drm_device *dev, struct exynos_drm_manager *manager, unsigned int possible_crtcs) { struct drm_encoder *encoder; struct exynos_drm_encoder *exynos_encoder; DRM_DEBUG_KMS("%s\n", __FILE__); if (!manager || !possible_crtcs) return NULL; if (!manager->dev) return NULL; exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); if (!exynos_encoder) { DRM_ERROR("failed to allocate encoder\n"); return NULL; } exynos_encoder->dpms = DRM_MODE_DPMS_OFF; exynos_encoder->manager = manager; encoder = &exynos_encoder->drm_encoder; encoder->possible_crtcs = possible_crtcs; DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(dev, encoder, &exynos_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs); DRM_DEBUG_KMS("encoder has been created\n"); return encoder; } struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder) { return to_exynos_encoder(encoder)->manager; } void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, void (*fn)(struct drm_encoder *, void *)) { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; struct exynos_drm_private *private = dev->dev_private; struct exynos_drm_manager *manager; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { /* * if crtc is detached from encoder, check pipe, * otherwise check crtc attached to encoder */ if (!encoder->crtc) { manager = to_exynos_encoder(encoder)->manager; if (manager->pipe < 0 || private->crtc[manager->pipe] != crtc) continue; } else { if (encoder->crtc != crtc) continue; } fn(encoder, data); } } void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; if (manager->pipe != crtc) return; if (manager_ops->enable_vblank) manager_ops->enable_vblank(manager->dev); } void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; if (manager->pipe != crtc) return; if (manager_ops->disable_vblank) manager_ops->disable_vblank(manager->dev); } void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); struct exynos_drm_manager *manager = exynos_encoder->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int mode = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->dpms) manager_ops->dpms(manager->dev, mode); /* * if this condition is ok then it means that the crtc is already * detached from encoder and last function for detaching is properly * done, so clear pipe from manager to prevent repeated call. */ if (mode > DRM_MODE_DPMS_ON) { if (!encoder->crtc) manager->pipe = -1; } } void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; int pipe = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); /* * when crtc is detached from encoder, this pipe is used * to select manager operation */ manager->pipe = pipe; } void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = data; DRM_DEBUG_KMS("%s\n", __FILE__); if (overlay_ops && overlay_ops->mode_set) overlay_ops->mode_set(manager->dev, overlay); } void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("%s\n", __FILE__); if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->commit) overlay_ops->commit(manager->dev, zpos); } void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("%s\n", __FILE__); if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->enable) overlay_ops->enable(manager->dev, zpos); } void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("%s\n", __FILE__); if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->disable) overlay_ops->disable(manager->dev, zpos); }
gpl-2.0
jakwu/linux-imx
drivers/usb/serial/omninet.c
2229
7090
/* * USB ZyXEL omni.net LCD PLUS driver * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * See Documentation/usb/usb-serial.txt for more information on using this * driver * * Please report both successes and troubles to the author at omninet@kroah.com */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_AUTHOR "Alessandro Zummo" #define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver" #define ZYXEL_VENDOR_ID 0x0586 #define ZYXEL_OMNINET_ID 0x1000 /* This one seems to be a re-branded ZyXEL device */ #define BT_IGNITIONPRO_ID 0x2000 /* function prototypes */ static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port); static void omninet_process_read_urb(struct urb *urb); static void omninet_write_bulk_callback(struct urb *urb); static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static int omninet_write_room(struct tty_struct *tty); static void omninet_disconnect(struct usb_serial *serial); static int omninet_port_probe(struct usb_serial_port *port); static int omninet_port_remove(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver zyxel_omninet_device = { .driver = { .owner = THIS_MODULE, .name = "omninet", }, .description = "ZyXEL - omni.net lcd plus usb", .id_table = id_table, .num_ports = 1, .port_probe = omninet_port_probe, .port_remove = omninet_port_remove, .open = omninet_open, .write = omninet_write, .write_room = omninet_write_room, .write_bulk_callback = omninet_write_bulk_callback, .process_read_urb = omninet_process_read_urb, .disconnect = omninet_disconnect, }; static struct usb_serial_driver * const serial_drivers[] = { &zyxel_omninet_device, NULL }; /* * The protocol. * * The omni.net always exchange 64 bytes of data with the host. The first * four bytes are the control header. * * oh_seq is a sequence number. Don't know if/how it's used. * oh_len is the length of the data bytes in the packet. * oh_xxx Bit-mapped, related to handshaking and status info. * I normally set it to 0x03 in transmitted frames. * 7: Active when the TA is in a CONNECTed state. * 6: unknown * 5: handshaking, unknown * 4: handshaking, unknown * 3: unknown, usually 0 * 2: unknown, usually 0 * 1: handshaking, unknown, usually set to 1 in transmitted frames * 0: handshaking, unknown, usually set to 1 in transmitted frames * oh_pad Probably a pad byte. * * After the header you will find data bytes if oh_len was greater than zero. */ struct omninet_header { __u8 oh_seq; __u8 oh_len; __u8 oh_xxx; __u8 oh_pad; }; struct omninet_data { __u8 od_outseq; /* Sequence number for bulk_out URBs */ }; static int omninet_port_probe(struct usb_serial_port *port) { struct omninet_data *od; od = kzalloc(sizeof(*od), GFP_KERNEL); if (!od) return -ENOMEM; usb_set_serial_port_data(port, od); return 0; } static int omninet_port_remove(struct usb_serial_port *port) { struct omninet_data *od; od = usb_get_serial_port_data(port); kfree(od); return 0; } static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_serial_port *wport; wport = serial->port[1]; tty_port_tty_set(&wport->port, tty); return usb_serial_generic_open(tty, port); } #define OMNINET_HEADERLEN 4 #define OMNINET_BULKOUTSIZE 64 #define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) static void omninet_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; const struct omninet_header *hdr = urb->transfer_buffer; const unsigned char *data; size_t data_len; if (urb->actual_length <= OMNINET_HEADERLEN || !hdr->oh_len) return; data = (char *)urb->transfer_buffer + OMNINET_HEADERLEN; data_len = min_t(size_t, urb->actual_length - OMNINET_HEADERLEN, hdr->oh_len); tty_insert_flip_string(&port->port, data, data_len); tty_flip_buffer_push(&port->port); } static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct usb_serial *serial = port->serial; struct usb_serial_port *wport = serial->port[1]; struct omninet_data *od = usb_get_serial_port_data(port); struct omninet_header *header = (struct omninet_header *) wport->write_urb->transfer_buffer; int result; if (count == 0) { dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__); return 0; } if (!test_and_clear_bit(0, &port->write_urbs_free)) { dev_dbg(&port->dev, "%s - already writing\n", __func__); return 0; } count = (count > OMNINET_PAYLOADSIZE) ? OMNINET_PAYLOADSIZE : count; memcpy(wport->write_urb->transfer_buffer + OMNINET_HEADERLEN, buf, count); usb_serial_debug_data(&port->dev, __func__, count, wport->write_urb->transfer_buffer); header->oh_seq = od->od_outseq++; header->oh_len = count; header->oh_xxx = 0x03; header->oh_pad = 0x00; /* send the data out the bulk port, always 64 bytes */ wport->write_urb->transfer_buffer_length = OMNINET_BULKOUTSIZE; result = usb_submit_urb(wport->write_urb, GFP_ATOMIC); if (result) { set_bit(0, &wport->write_urbs_free); dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); } else result = count; return result; } static int omninet_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct usb_serial_port *wport = serial->port[1]; int room = 0; /* Default: no room */ if (test_bit(0, &wport->write_urbs_free)) room = wport->bulk_out_size - OMNINET_HEADERLEN; dev_dbg(&port->dev, "%s - returns %d\n", __func__, room); return room; } static void omninet_write_bulk_callback(struct urb *urb) { /* struct omninet_header *header = (struct omninet_header *) urb->transfer_buffer; */ struct usb_serial_port *port = urb->context; int status = urb->status; set_bit(0, &port->write_urbs_free); if (status) { dev_dbg(&port->dev, "%s - nonzero write bulk status received: %d\n", __func__, status); return; } usb_serial_port_softint(port); } static void omninet_disconnect(struct usb_serial *serial) { struct usb_serial_port *wport = serial->port[1]; usb_kill_urb(wport->write_urb); } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
faux123/private-tuna
drivers/input/keyboard/jornada720_kbd.c
4021
5675
/* * drivers/input/keyboard/jornada720_kbd.c * * HP Jornada 720 keyboard platform driver * * Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@Gmail.com> * * Copyright (C) 2006 jornada 720 kbd driver by Filip Zyzniewsk <Filip.Zyzniewski@tefnet.plX * based on (C) 2004 jornada 720 kbd driver by Alex Lange <chicken@handhelds.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <mach/jornada720.h> #include <mach/hardware.h> MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver"); MODULE_LICENSE("GPL v2"); static unsigned short jornada_std_keymap[128] = { /* ROW */ 0, KEY_ESC, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, /* #1 */ KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE, /* -> */ 0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, /* #2 */ KEY_0, KEY_MINUS, KEY_EQUAL,0, 0, 0, /* -> */ 0, KEY_Q, KEY_W, KEY_E, KEY_R, KEY_T, KEY_Y, KEY_U, KEY_I, KEY_O, /* #3 */ KEY_P, KEY_BACKSLASH, KEY_BACKSPACE, 0, 0, 0, /* -> */ 0, KEY_A, KEY_S, KEY_D, KEY_F, KEY_G, KEY_H, KEY_J, KEY_K, KEY_L, /* #4 */ KEY_SEMICOLON, KEY_LEFTBRACE, KEY_RIGHTBRACE, 0, 0, 0, /* -> */ 0, KEY_Z, KEY_X, KEY_C, KEY_V, KEY_B, KEY_N, KEY_M, KEY_COMMA, /* #5 */ KEY_DOT, KEY_KPMINUS, KEY_APOSTROPHE, KEY_ENTER, 0, 0,0, /* -> */ 0, KEY_TAB, 0, KEY_LEFTSHIFT, 0, KEY_APOSTROPHE, 0, 0, 0, 0, /* #6 */ KEY_UP, 0, KEY_RIGHTSHIFT, 0, 0, 0,0, 0, 0, 0, 0, KEY_LEFTALT, KEY_GRAVE, /* -> */ 0, 0, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0,0, KEY_KPASTERISK, /* -> */ KEY_LEFTCTRL, 0, KEY_SPACE, 0, 0, 0, KEY_SLASH, KEY_DELETE, 0, 0, /* -> */ 0, 0, 0, KEY_POWER, /* -> */ }; struct jornadakbd { unsigned short keymap[ARRAY_SIZE(jornada_std_keymap)]; struct input_dev *input; }; static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct jornadakbd *jornadakbd = platform_get_drvdata(pdev); struct input_dev *input = jornadakbd->input; u8 count, kbd_data, scan_code; /* startup ssp with spinlock */ jornada_ssp_start(); if (jornada_ssp_inout(GETSCANKEYCODE) != TXDUMMY) { printk(KERN_DEBUG "jornada720_kbd: " "GetKeycode command failed with ETIMEDOUT, " "flushed bus\n"); } else { /* How many keycodes are waiting for us? */ count = jornada_ssp_byte(TXDUMMY); /* Lets drag them out one at a time */ while (count--) { /* Exchange TxDummy for location (keymap[kbddata]) */ kbd_data = jornada_ssp_byte(TXDUMMY); scan_code = kbd_data & 0x7f; input_event(input, EV_MSC, MSC_SCAN, scan_code); input_report_key(input, jornadakbd->keymap[scan_code], !(kbd_data & 0x80)); input_sync(input); } } /* release spinlock and turn off ssp */ jornada_ssp_end(); return IRQ_HANDLED; }; static int __devinit jornada720_kbd_probe(struct platform_device *pdev) { struct jornadakbd *jornadakbd; struct input_dev *input_dev; int i, err; jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!jornadakbd || !input_dev) { err = -ENOMEM; goto fail1; } platform_set_drvdata(pdev, jornadakbd); memcpy(jornadakbd->keymap, jornada_std_keymap, sizeof(jornada_std_keymap)); jornadakbd->input = input_dev; input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); input_dev->name = "HP Jornada 720 keyboard"; input_dev->phys = "jornadakbd/input0"; input_dev->keycode = jornadakbd->keymap; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(jornada_std_keymap); input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; for (i = 0; i < ARRAY_SIZE(jornadakbd->keymap); i++) __set_bit(jornadakbd->keymap[i], input_dev->keybit); __clear_bit(KEY_RESERVED, input_dev->keybit); input_set_capability(input_dev, EV_MSC, MSC_SCAN); err = request_irq(IRQ_GPIO0, jornada720_kbd_interrupt, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "jornadakbd", pdev); if (err) { printk(KERN_INFO "jornadakbd720_kbd: Unable to grab IRQ\n"); goto fail1; } err = input_register_device(jornadakbd->input); if (err) goto fail2; return 0; fail2: /* IRQ, DEVICE, MEMORY */ free_irq(IRQ_GPIO0, pdev); fail1: /* DEVICE, MEMORY */ platform_set_drvdata(pdev, NULL); input_free_device(input_dev); kfree(jornadakbd); return err; }; static int __devexit jornada720_kbd_remove(struct platform_device *pdev) { struct jornadakbd *jornadakbd = platform_get_drvdata(pdev); free_irq(IRQ_GPIO0, pdev); platform_set_drvdata(pdev, NULL); input_unregister_device(jornadakbd->input); kfree(jornadakbd); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:jornada720_kbd"); static struct platform_driver jornada720_kbd_driver = { .driver = { .name = "jornada720_kbd", .owner = THIS_MODULE, }, .probe = jornada720_kbd_probe, .remove = __devexit_p(jornada720_kbd_remove), }; static int __init jornada720_kbd_init(void) { return platform_driver_register(&jornada720_kbd_driver); } static void __exit jornada720_kbd_exit(void) { platform_driver_unregister(&jornada720_kbd_driver); } module_init(jornada720_kbd_init); module_exit(jornada720_kbd_exit);
gpl-2.0
lloydchang/ubuntu-oneiric
drivers/net/wan/hdlc_raw_eth.c
4277
3155
/* * Generic HDLC support routines for Linux * HDLC Ethernet emulation support * * Copyright (C) 2002-2006 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pkt_sched.h> #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev) { int pad = ETH_ZLEN - skb->len; if (pad > 0) { /* Pad the frame with zeros */ int len = skb->len; if (skb_tailroom(skb) < pad) if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } skb_put(skb, pad); memset(skb->data + len, 0, pad); } return dev_to_hdlc(dev)->xmit(skb, dev); } static struct hdlc_proto proto = { .type_trans = eth_type_trans, .xmit = eth_tx, .ioctl = raw_eth_ioctl, .module = THIS_MODULE, }; static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) { raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); int result, old_qlen; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(raw_s, hdlc->state, size)) return -EFAULT; return 0; case IF_PROTO_HDLC_ETH: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&new_settings, raw_s, size)) return -EFAULT; if (new_settings.encoding == ENCODING_DEFAULT) new_settings.encoding = ENCODING_NRZ; if (new_settings.parity == PARITY_DEFAULT) new_settings.parity = PARITY_CRC16_PR1_CCITT; result = hdlc->attach(dev, new_settings.encoding, new_settings.parity); if (result) return result; result = attach_hdlc_protocol(dev, &proto, sizeof(raw_hdlc_proto)); if (result) return result; memcpy(hdlc->state, &new_settings, size); old_qlen = dev->tx_queue_len; ether_setup(dev); dev->tx_queue_len = old_qlen; random_ether_addr(dev->dev_addr); netif_dormant_off(dev); return 0; } return -EINVAL; } static int __init mod_init(void) { register_hdlc_protocol(&proto); return 0; } static void __exit mod_exit(void) { unregister_hdlc_protocol(&proto); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC"); MODULE_LICENSE("GPL v2");
gpl-2.0
MikeC84/mac_kernel_moto_minnow
drivers/media/pci/zoran/zr36016.c
4533
14208
/* * Zoran ZR36016 basic configuration functions * * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at> * * $Id: zr36016.c,v 1.1.2.14 2003/08/20 19:46:55 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR016_VERSION "v0.7" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* v4l API */ /* headerfile of this module */ #include "zr36016.h" /* codec io API */ #include "videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36016_codecs; /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36016_read (struct zr36016 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data-> readreg(ptr->codec, reg)) & 0xFF; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_write (struct zr36016 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, reg, value); } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* indirect read and write functions */ /* the 016 supports auto-addr-increment, but * writing it all time cost not much and is safer... */ static u8 zr36016_readi (struct zr36016 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read (i)!\n", ptr->name); dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36016_writei (struct zr36016 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) { ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA } else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written (i)!\n", ptr->name); } /* ========================================================================= Local helper function: version read ========================================================================= */ /* version kept in datastructure */ static u8 zr36016_read_version (struct zr36016 *ptr) { ptr->version = zr36016_read(ptr, 0) >> 4; return ptr->version; } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from PAX-Lo register ========================================================================= */ static int zr36016_basic_test (struct zr36016 *ptr) { if (debug) { int i; zr36016_writei(ptr, ZR016I_PAX_LO, 0x55); dprintk(1, KERN_INFO "%s: registers: ", ptr->name); for (i = 0; i <= 0x0b; i++) dprintk(1, "%02x ", zr36016_readi(ptr, i)); dprintk(1, "\n"); } // for testing just write 0, then the default value to a register and read // it back in both cases zr36016_writei(ptr, ZR016I_PAX_LO, 0x00); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0); if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to vfe processor!\n", ptr->name); return -ENXIO; } // we allow version numbers from 0-3, should be enough, though zr36016_read_version(ptr); if (ptr->version & 0x0c) { dprintk(1, KERN_ERR "%s: attach failed, suspicious version %d found...\n", ptr->name, ptr->version); return -ENXIO; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets - NO USE -- ========================================================================= */ #if 0 static int zr36016_pushit (struct zr36016 *ptr, u16 startreg, u16 len, const char *data) { int i=0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg,len); while (i<len) { zr36016_writei(ptr, startreg++, data[i++]); } return i; } #endif /* ========================================================================= Basic datasets & init: //TODO// ========================================================================= */ // needed offset values PAL NTSC SECAM static const int zr016_xoff[] = { 20, 20, 20 }; static const int zr016_yoff[] = { 8, 9, 7 }; static void zr36016_init (struct zr36016 *ptr) { // stop any processing zr36016_write(ptr, ZR016_GOSTOP, 0); // mode setup (yuv422 in and out, compression/expansuon due to mode) zr36016_write(ptr, ZR016_MODE, ZR016_YUV422 | ZR016_YUV422_YUV422 | (ptr->mode == CODEC_DO_COMPRESSION ? ZR016_COMPRESSION : ZR016_EXPANSION)); // misc setup zr36016_writei(ptr, ZR016I_SETUP1, (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) | (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI); zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR); // Window setup // (no extra offset for now, norm defines offset, default width height) zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8); zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF); zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8); zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF); zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8); zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF); zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8); zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF); /* shall we continue now, please? */ zr36016_write(ptr, ZR016_GOSTOP, 1); } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36016_set_mode (struct videocodec *codec, int mode) { struct zr36016 *ptr = (struct zr36016 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36016_init(ptr); return 0; } /* set picture size */ static int zr36016_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36016 *ptr = (struct zr36016 *) codec->data; dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", ptr->name, norm->HStart, norm->VStart, cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y for now ... */ ptr->width = cap->width; ptr->height = cap->height; /* (Ronald) This is ugly. zoran_device.c, line 387 * already mentions what happens if HStart is even * (blue faces, etc., cr/cb inversed). There's probably * some good reason why HStart is 0 instead of 1, so I'm * leaving it to this for now, but really... This can be * done a lot simpler */ ptr->xoff = (norm->HStart ? norm->HStart : 1) + cap->x; /* Something to note here (I don't understand it), setting * VStart too high will cause the codec to 'not work'. I * really don't get it. values of 16 (VStart) already break * it here. Just '0' seems to work. More testing needed! */ ptr->yoff = norm->VStart + cap->y; /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */ ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1; ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1; return 0; } /* additional control functions */ static int zr36016_control (struct videocodec *codec, int type, int size, void *data) { struct zr36016 *ptr = (struct zr36016 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status - we don't know it ... */ if (size != sizeof(int)) return -EFAULT; *ival = 0; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = 0; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != 0) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36016_unset (struct videocodec *codec) { struct zr36016 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36016_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36016_setup (struct videocodec *codec) { struct zr36016 *ptr; int res; dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs); if (zr36016_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36016: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]", zr36016_codecs); ptr->num = zr36016_codecs++; ptr->codec = codec; //testing res = zr36016_basic_test(ptr); if (res < 0) { zr36016_unset(codec); return res; } //final setup ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 768; ptr->height = 288; ptr->xdec = 1; ptr->ydec = 0; zr36016_init(ptr); dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", ptr->name, ptr->version); return 0; } static const struct videocodec zr36016_codec = { .owner = THIS_MODULE, .name = "zr36016", .magic = 0L, // magic not used .flags = CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER, .type = CODEC_TYPE_ZR36016, .setup = zr36016_setup, // functionality .unset = zr36016_unset, .set_mode = zr36016_set_mode, .set_video = zr36016_set_video, .control = zr36016_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36016_init_module (void) { //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION); zr36016_codecs = 0; return videocodec_register(&zr36016_codec); } static void __exit zr36016_cleanup_module (void) { if (zr36016_codecs) { dprintk(1, "zr36016: something's wrong - %d codecs left somehow.\n", zr36016_codecs); } videocodec_unregister(&zr36016_codec); } module_init(zr36016_init_module); module_exit(zr36016_cleanup_module); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Driver module for ZR36016 video frontends " ZR016_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
opinsys/opinsys-linux
arch/arm/mach-omap2/clockdomains33xx_data.c
5045
5532
/* * AM33XX Clock Domain data. * * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ * Vaibhav Hiremath <hvaibhav@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm.h" #include "cm33xx.h" #include "cm-regbits-33xx.h" static struct clockdomain l4ls_am33xx_clkdm = { .name = "l4ls_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3s_am33xx_clkdm = { .name = "l3s_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4fw_am33xx_clkdm = { .name = "l4fw_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_am33xx_clkdm = { .name = "l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4hs_am33xx_clkdm = { .name = "l4hs_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ocpwp_l3_am33xx_clkdm = { .name = "ocpwp_l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain pruss_ocp_am33xx_clkdm = { .name = "pruss_ocp_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain cpsw_125mhz_am33xx_clkdm = { .name = "cpsw_125mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain lcdc_am33xx_clkdm = { .name = "lcdc_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain clk_24mhz_am33xx_clkdm = { .name = "clk_24mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_am33xx_clkdm = { .name = "l4_wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_WKUP_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_aon_am33xx_clkdm = { .name = "l3_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_aon_am33xx_clkdm = { .name = "l4_wkup_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu_am33xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .cm_inst = AM33XX_CM_MPU_MOD, .clkdm_offs = AM33XX_CM_MPU_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_rtc_am33xx_clkdm = { .name = "l4_rtc_clkdm", .pwrdm = { .name = "rtc_pwrdm" }, .cm_inst = AM33XX_CM_RTC_MOD, .clkdm_offs = AM33XX_CM_RTC_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l3_am33xx_clkdm = { .name = "gfx_l3_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .cm_inst = AM33XX_CM_GFX_MOD, .clkdm_offs = AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l4ls_gfx_am33xx_clkdm = { .name = "gfx_l4ls_gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .cm_inst = AM33XX_CM_GFX_MOD, .clkdm_offs = AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_cefuse_am33xx_clkdm = { .name = "l4_cefuse_clkdm", .pwrdm = { .name = "cefuse_pwrdm" }, .cm_inst = AM33XX_CM_CEFUSE_MOD, .clkdm_offs = AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain *clockdomains_am33xx[] __initdata = { &l4ls_am33xx_clkdm, &l3s_am33xx_clkdm, &l4fw_am33xx_clkdm, &l3_am33xx_clkdm, &l4hs_am33xx_clkdm, &ocpwp_l3_am33xx_clkdm, &pruss_ocp_am33xx_clkdm, &cpsw_125mhz_am33xx_clkdm, &lcdc_am33xx_clkdm, &clk_24mhz_am33xx_clkdm, &l4_wkup_am33xx_clkdm, &l3_aon_am33xx_clkdm, &l4_wkup_aon_am33xx_clkdm, &mpu_am33xx_clkdm, &l4_rtc_am33xx_clkdm, &gfx_l3_am33xx_clkdm, &gfx_l4ls_gfx_am33xx_clkdm, &l4_cefuse_am33xx_clkdm, NULL, }; void __init am33xx_clockdomains_init(void) { clkdm_register_platform_funcs(&am33xx_clkdm_operations); clkdm_register_clkdms(clockdomains_am33xx); clkdm_complete_init(); }
gpl-2.0
DC07/spirit_msm8916
arch/arm/mach-omap2/clockdomains33xx_data.c
5045
5532
/* * AM33XX Clock Domain data. * * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ * Vaibhav Hiremath <hvaibhav@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm.h" #include "cm33xx.h" #include "cm-regbits-33xx.h" static struct clockdomain l4ls_am33xx_clkdm = { .name = "l4ls_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3s_am33xx_clkdm = { .name = "l3s_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4fw_am33xx_clkdm = { .name = "l4fw_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_am33xx_clkdm = { .name = "l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4hs_am33xx_clkdm = { .name = "l4hs_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ocpwp_l3_am33xx_clkdm = { .name = "ocpwp_l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain pruss_ocp_am33xx_clkdm = { .name = "pruss_ocp_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain cpsw_125mhz_am33xx_clkdm = { .name = "cpsw_125mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain lcdc_am33xx_clkdm = { .name = "lcdc_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain clk_24mhz_am33xx_clkdm = { .name = "clk_24mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_am33xx_clkdm = { .name = "l4_wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_WKUP_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_aon_am33xx_clkdm = { .name = "l3_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_aon_am33xx_clkdm = { .name = "l4_wkup_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu_am33xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .cm_inst = AM33XX_CM_MPU_MOD, .clkdm_offs = AM33XX_CM_MPU_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_rtc_am33xx_clkdm = { .name = "l4_rtc_clkdm", .pwrdm = { .name = "rtc_pwrdm" }, .cm_inst = AM33XX_CM_RTC_MOD, .clkdm_offs = AM33XX_CM_RTC_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l3_am33xx_clkdm = { .name = "gfx_l3_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .cm_inst = AM33XX_CM_GFX_MOD, .clkdm_offs = AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l4ls_gfx_am33xx_clkdm = { .name = "gfx_l4ls_gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .cm_inst = AM33XX_CM_GFX_MOD, .clkdm_offs = AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_cefuse_am33xx_clkdm = { .name = "l4_cefuse_clkdm", .pwrdm = { .name = "cefuse_pwrdm" }, .cm_inst = AM33XX_CM_CEFUSE_MOD, .clkdm_offs = AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain *clockdomains_am33xx[] __initdata = { &l4ls_am33xx_clkdm, &l3s_am33xx_clkdm, &l4fw_am33xx_clkdm, &l3_am33xx_clkdm, &l4hs_am33xx_clkdm, &ocpwp_l3_am33xx_clkdm, &pruss_ocp_am33xx_clkdm, &cpsw_125mhz_am33xx_clkdm, &lcdc_am33xx_clkdm, &clk_24mhz_am33xx_clkdm, &l4_wkup_am33xx_clkdm, &l3_aon_am33xx_clkdm, &l4_wkup_aon_am33xx_clkdm, &mpu_am33xx_clkdm, &l4_rtc_am33xx_clkdm, &gfx_l3_am33xx_clkdm, &gfx_l4ls_gfx_am33xx_clkdm, &l4_cefuse_am33xx_clkdm, NULL, }; void __init am33xx_clockdomains_init(void) { clkdm_register_platform_funcs(&am33xx_clkdm_operations); clkdm_register_clkdms(clockdomains_am33xx); clkdm_complete_init(); }
gpl-2.0
TWRP-J5/android_kernel_samsung_j5lte
arch/powerpc/kernel/module.c
7093
2464
/* Kernel module help for powerpc. Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. Copyright (C) 2008 Freescale Semiconductor, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/elf.h> #include <linux/moduleloader.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/bug.h> #include <asm/module.h> #include <asm/uaccess.h> #include <asm/firmware.h> #include <linux/sort.h> #include "setup.h" LIST_HEAD(module_bug_list); static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) { char *secstrings; unsigned int i; secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (i = 1; i < hdr->e_shnum; i++) if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0) return &sechdrs[i]; return NULL; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { const Elf_Shdr *sect; /* Apply feature fixups */ sect = find_section(hdr, sechdrs, "__ftr_fixup"); if (sect != NULL) do_feature_fixups(cur_cpu_spec->cpu_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup"); if (sect != NULL) do_feature_fixups(cur_cpu_spec->mmu_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); #ifdef CONFIG_PPC64 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup"); if (sect != NULL) do_feature_fixups(powerpc_firmware_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); #endif sect = find_section(hdr, sechdrs, "__lwsync_fixup"); if (sect != NULL) do_lwsync_fixups(cur_cpu_spec->cpu_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); return 0; }
gpl-2.0
kula85/perf-sqlite3
arch/cris/arch-v32/drivers/mach-a3/nandflash.c
8885
4205
/* * arch/cris/arch-v32/drivers/nandflash.c * * Copyright (c) 2007 * * Derived from drivers/mtd/nand/spia.c * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <arch/memmap.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/pio_defs.h> #include <pinmux.h> #include <asm/io.h> #define MANUAL_ALE_CLE_CONTROL 1 #define regf_ALE a0 #define regf_CLE a1 #define regf_NCE ce0_n #define CLE_BIT 10 #define ALE_BIT 11 #define CE_BIT 12 struct mtd_info_wrapper { struct mtd_info info; struct nand_chip chip; }; /* Bitmask for control pins */ #define PIN_BITMASK ((1 << CE_BIT) | (1 << CLE_BIT) | (1 << ALE_BIT)) static struct mtd_info *crisv32_mtd; /* * hardware specific access to control-lines */ static void crisv32_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { unsigned long flags; reg_pio_rw_dout dout; struct nand_chip *this = mtd->priv; local_irq_save(flags); /* control bits change */ if (ctrl & NAND_CTRL_CHANGE) { dout = REG_RD(pio, regi_pio, rw_dout); dout.regf_NCE = (ctrl & NAND_NCE) ? 0 : 1; #if !MANUAL_ALE_CLE_CONTROL if (ctrl & NAND_ALE) { /* A0 = ALE high */ this->IO_ADDR_W = (void __iomem *)REG_ADDR(pio, regi_pio, rw_io_access1); } else if (ctrl & NAND_CLE) { /* A1 = CLE high */ this->IO_ADDR_W = (void __iomem *)REG_ADDR(pio, regi_pio, rw_io_access2); } else { /* A1 = CLE and A0 = ALE low */ this->IO_ADDR_W = (void __iomem *)REG_ADDR(pio, regi_pio, rw_io_access0); } #else dout.regf_CLE = (ctrl & NAND_CLE) ? 1 : 0; dout.regf_ALE = (ctrl & NAND_ALE) ? 1 : 0; #endif REG_WR(pio, regi_pio, rw_dout, dout); } /* command to chip */ if (cmd != NAND_CMD_NONE) writeb(cmd, this->IO_ADDR_W); local_irq_restore(flags); } /* * read device ready pin */ static int crisv32_device_ready(struct mtd_info *mtd) { reg_pio_r_din din = REG_RD(pio, regi_pio, r_din); return din.rdy; } /* * Main initialization routine */ struct mtd_info *__init crisv32_nand_flash_probe(void) { void __iomem *read_cs; void __iomem *write_cs; struct mtd_info_wrapper *wrapper; struct nand_chip *this; int err = 0; reg_pio_rw_man_ctrl man_ctrl = { .regf_NCE = regk_pio_yes, #if MANUAL_ALE_CLE_CONTROL .regf_ALE = regk_pio_yes, .regf_CLE = regk_pio_yes #endif }; reg_pio_rw_oe oe = { .regf_NCE = regk_pio_yes, #if MANUAL_ALE_CLE_CONTROL .regf_ALE = regk_pio_yes, .regf_CLE = regk_pio_yes #endif }; reg_pio_rw_dout dout = { .regf_NCE = 1 }; /* Allocate pio pins to pio */ crisv32_pinmux_alloc_fixed(pinmux_pio); /* Set up CE, ALE, CLE (ce0_n, a0, a1) for manual control and output */ REG_WR(pio, regi_pio, rw_man_ctrl, man_ctrl); REG_WR(pio, regi_pio, rw_dout, dout); REG_WR(pio, regi_pio, rw_oe, oe); /* Allocate memory for MTD device structure and private data */ wrapper = kzalloc(sizeof(struct mtd_info_wrapper), GFP_KERNEL); if (!wrapper) { printk(KERN_ERR "Unable to allocate CRISv32 NAND MTD " "device structure.\n"); err = -ENOMEM; return NULL; } read_cs = write_cs = (void __iomem *)REG_ADDR(pio, regi_pio, rw_io_access0); /* Get pointer to private data */ this = &wrapper->chip; crisv32_mtd = &wrapper->info; /* Link the private data with the MTD structure */ crisv32_mtd->priv = this; /* Set address of NAND IO lines */ this->IO_ADDR_R = read_cs; this->IO_ADDR_W = write_cs; this->cmd_ctrl = crisv32_hwcontrol; this->dev_ready = crisv32_device_ready; /* 20 us command delay time */ this->chip_delay = 20; this->ecc.mode = NAND_ECC_SOFT; /* Enable the following for a flash based bad block table */ /* this->bbt_options = NAND_BBT_USE_FLASH; */ /* Scan to find existence of the device */ if (nand_scan(crisv32_mtd, 1)) { err = -ENXIO; goto out_mtd; } return crisv32_mtd; out_mtd: kfree(wrapper); return NULL; }
gpl-2.0
aapav01/android_kernel_samsung_ms013g_stock
arch/arm/mach-s3c24xx/setup-ts.c
8885
1085
/* linux/arch/arm/plat-s3c24xx/setup-ts.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Based on S3C24XX setup for i2c device * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/gpio.h> struct platform_device; /* don't need the contents */ #include <mach/hardware.h> #include <mach/regs-gpio.h> /** * s3c24xx_ts_cfg_gpio - configure gpio for s3c2410 systems * * Configure the GPIO for the S3C2410 system, where we have external FETs * connected to the device (later systems such as the S3C2440 integrate * these into the device). */ void s3c24xx_ts_cfg_gpio(struct platform_device *dev) { s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON); s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON); s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON); s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON); }
gpl-2.0
vovanx500/ConceptKernel
scripts/selinux/genheaders/genheaders.c
12725
3517
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <ctype.h> struct security_class_mapping { const char *name; const char *perms[sizeof(unsigned) * 8 + 1]; }; #include "classmap.h" #include "initial_sid_to_string.h" #define max(x, y) (((int)(x) > (int)(y)) ? x : y) const char *progname; static void usage(void) { printf("usage: %s flask.h av_permissions.h\n", progname); exit(1); } static char *stoupperx(const char *s) { char *s2 = strdup(s); char *p; if (!s2) { fprintf(stderr, "%s: out of memory\n", progname); exit(3); } for (p = s2; *p; p++) *p = toupper(*p); return s2; } int main(int argc, char *argv[]) { int i, j, k; int isids_len; FILE *fout; const char *needle = "SOCKET"; char *substr; progname = argv[0]; if (argc < 3) usage(); fout = fopen(argv[1], "w"); if (!fout) { fprintf(stderr, "Could not open %s for writing: %s\n", argv[1], strerror(errno)); exit(2); } for (i = 0; secclass_map[i].name; i++) { struct security_class_mapping *map = &secclass_map[i]; map->name = stoupperx(map->name); for (j = 0; map->perms[j]; j++) map->perms[j] = stoupperx(map->perms[j]); } isids_len = sizeof(initial_sid_to_string) / sizeof (char *); for (i = 1; i < isids_len; i++) initial_sid_to_string[i] = stoupperx(initial_sid_to_string[i]); fprintf(fout, "/* This file is automatically generated. Do not edit. */\n"); fprintf(fout, "#ifndef _SELINUX_FLASK_H_\n#define _SELINUX_FLASK_H_\n\n"); for (i = 0; secclass_map[i].name; i++) { struct security_class_mapping *map = &secclass_map[i]; fprintf(fout, "#define SECCLASS_%s", map->name); for (j = 0; j < max(1, 40 - strlen(map->name)); j++) fprintf(fout, " "); fprintf(fout, "%2d\n", i+1); } fprintf(fout, "\n"); for (i = 1; i < isids_len; i++) { const char *s = initial_sid_to_string[i]; fprintf(fout, "#define SECINITSID_%s", s); for (j = 0; j < max(1, 40 - strlen(s)); j++) fprintf(fout, " "); fprintf(fout, "%2d\n", i); } fprintf(fout, "\n#define SECINITSID_NUM %d\n", i-1); fprintf(fout, "\nstatic inline bool security_is_socket_class(u16 kern_tclass)\n"); fprintf(fout, "{\n"); fprintf(fout, "\tbool sock = false;\n\n"); fprintf(fout, "\tswitch (kern_tclass) {\n"); for (i = 0; secclass_map[i].name; i++) { struct security_class_mapping *map = &secclass_map[i]; substr = strstr(map->name, needle); if (substr && strcmp(substr, needle) == 0) fprintf(fout, "\tcase SECCLASS_%s:\n", map->name); } fprintf(fout, "\t\tsock = true;\n"); fprintf(fout, "\t\tbreak;\n"); fprintf(fout, "\tdefault:\n"); fprintf(fout, "\t\tbreak;\n"); fprintf(fout, "\t}\n\n"); fprintf(fout, "\treturn sock;\n"); fprintf(fout, "}\n"); fprintf(fout, "\n#endif\n"); fclose(fout); fout = fopen(argv[2], "w"); if (!fout) { fprintf(stderr, "Could not open %s for writing: %s\n", argv[2], strerror(errno)); exit(4); } fprintf(fout, "/* This file is automatically generated. Do not edit. */\n"); fprintf(fout, "#ifndef _SELINUX_AV_PERMISSIONS_H_\n#define _SELINUX_AV_PERMISSIONS_H_\n\n"); for (i = 0; secclass_map[i].name; i++) { struct security_class_mapping *map = &secclass_map[i]; for (j = 0; map->perms[j]; j++) { fprintf(fout, "#define %s__%s", map->name, map->perms[j]); for (k = 0; k < max(1, 40 - strlen(map->name) - strlen(map->perms[j])); k++) fprintf(fout, " "); fprintf(fout, "0x%08xUL\n", (1<<j)); } } fprintf(fout, "\n#endif\n"); fclose(fout); exit(0); }
gpl-2.0
Kurre/kernel_exynos_KK
drivers/scsi/nsp32_debug.c
12725
7493
/* * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver * Debug routine * * This software may be used and distributed according to the terms of * the GNU General Public License. */ /* * Show the command data of a command */ static const char unknown[] = "UNKNOWN"; static const char * group_0_commands[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense", /* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks", /* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown, /* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve", /* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit", /* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", unknown, }; static const char *group_1_commands[] = { /* 20-22 */ unknown, unknown, unknown, /* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)", /* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown, /* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", /* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", /* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer", /* 3d-3f */ "Update Block", "Read Long", "Write Long", }; static const char *group_2_commands[] = { /* 40-41 */ "Change Definition", "Write Same", /* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)", /* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown, /* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)", /* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown, /* 5c-5f */ unknown, unknown, unknown, }; #define group(opcode) (((opcode) >> 5) & 7) #define RESERVED_GROUP 0 #define VENDOR_GROUP 1 #define NOTEXT_GROUP 2 static const char **commands[] = { group_0_commands, group_1_commands, group_2_commands, (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP, (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP, (const char **) VENDOR_GROUP }; static const char reserved[] = "RESERVED"; static const char vendor[] = "VENDOR SPECIFIC"; static void print_opcodek(unsigned char opcode) { const char **table = commands[ group(opcode) ]; switch ((unsigned long) table) { case RESERVED_GROUP: printk("%s[%02x] ", reserved, opcode); break; case NOTEXT_GROUP: printk("%s(notext)[%02x] ", unknown, opcode); break; case VENDOR_GROUP: printk("%s[%02x] ", vendor, opcode); break; default: if (table[opcode & 0x1f] != unknown) printk("%s[%02x] ", table[opcode & 0x1f], opcode); else printk("%s[%02x] ", unknown, opcode); break; } } static void print_commandk (unsigned char *command) { int i,s; // printk(KERN_DEBUG); print_opcodek(command[0]); /*printk(KERN_DEBUG "%s ", __func__);*/ if ((command[0] >> 5) == 6 || (command[0] >> 5) == 7 ) { s = 12; /* vender specific */ } else { s = COMMAND_SIZE(command[0]); } for ( i = 1; i < s; ++i) { printk("%02x ", command[i]); } switch (s) { case 6: printk("LBA=%d len=%d", (((unsigned int)command[1] & 0x0f) << 16) | ( (unsigned int)command[2] << 8) | ( (unsigned int)command[3] ), (unsigned int)command[4] ); break; case 10: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[7] << 8) | ((unsigned int)command[8] ) ); break; case 12: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[6] << 24) | ((unsigned int)command[7] << 16) | ((unsigned int)command[8] << 8) | ((unsigned int)command[9] ) ); break; default: break; } printk("\n"); } static void show_command(Scsi_Cmnd *SCpnt) { print_commandk(SCpnt->cmnd); } static void show_busphase(unsigned char stat) { switch(stat) { case BUSPHASE_COMMAND: printk( "BUSPHASE_COMMAND\n"); break; case BUSPHASE_MESSAGE_IN: printk( "BUSPHASE_MESSAGE_IN\n"); break; case BUSPHASE_MESSAGE_OUT: printk( "BUSPHASE_MESSAGE_OUT\n"); break; case BUSPHASE_DATA_IN: printk( "BUSPHASE_DATA_IN\n"); break; case BUSPHASE_DATA_OUT: printk( "BUSPHASE_DATA_OUT\n"); break; case BUSPHASE_STATUS: printk( "BUSPHASE_STATUS\n"); break; case BUSPHASE_SELECT: printk( "BUSPHASE_SELECT\n"); break; default: printk( "BUSPHASE_other: 0x%x\n", stat); break; } } static void show_autophase(unsigned short i) { printk("auto: 0x%x,", i); if(i & COMMAND_PHASE) { printk(" cmd"); } if(i & DATA_IN_PHASE) { printk(" din"); } if(i & DATA_OUT_PHASE) { printk(" dout"); } if(i & MSGOUT_PHASE) { printk(" mout"); } if(i & STATUS_PHASE) { printk(" stat"); } if(i & ILLEGAL_PHASE) { printk(" ill"); } if(i & BUS_FREE_OCCUER) { printk(" bfree-o"); } if(i & MSG_IN_OCCUER) { printk(" min-o"); } if(i & MSG_OUT_OCCUER) { printk(" mout-o"); } if(i & SELECTION_TIMEOUT) { printk(" sel"); } if(i & MSGIN_00_VALID) { printk(" m0"); } if(i & MSGIN_02_VALID) { printk(" m2"); } if(i & MSGIN_03_VALID) { printk(" m3"); } if(i & MSGIN_04_VALID) { printk(" m4"); } if(i & AUTOSCSI_BUSY) { printk(" busy"); } printk("\n"); } static void nsp32_print_register(int base) { if (!(NSP32_DEBUG_MASK & NSP32_SPECIAL_PRINT_REGISTER)) return; printk("Phase=0x%x, ", nsp32_read1(base, SCSI_BUS_MONITOR)); printk("OldPhase=0x%x, ", nsp32_index_read1(base, OLD_SCSI_PHASE)); printk("syncreg=0x%x, ", nsp32_read1(base, SYNC_REG)); printk("ackwidth=0x%x, ", nsp32_read1(base, ACK_WIDTH)); printk("sgtpaddr=0x%lx, ", nsp32_read4(base, SGT_ADR)); printk("scsioutlatch=0x%x, ", nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); printk("msgout=0x%lx, ", nsp32_read4(base, SCSI_MSG_OUT)); printk("miscrd=0x%x, ", nsp32_index_read2(base, MISC_WR)); printk("seltimeout=0x%x, ", nsp32_read2(base, SEL_TIME_OUT)); printk("sreqrate=0x%x, ", nsp32_read1(base, SREQ_SMPL_RATE)); printk("transStatus=0x%x, ", nsp32_read2(base, TRANSFER_STATUS)); printk("reselectid=0x%x, ", nsp32_read2(base, COMMAND_CONTROL)); printk("arbit=0x%x, ", nsp32_read1(base, ARBIT_STATUS)); printk("BmStart=0x%lx, ", nsp32_read4(base, BM_START_ADR)); printk("BmCount=0x%lx, ", nsp32_read4(base, BM_CNT)); printk("SackCnt=0x%lx, ", nsp32_read4(base, SACK_CNT)); printk("SReqCnt=0x%lx, ", nsp32_read4(base, SREQ_CNT)); printk("SavedSackCnt=0x%lx, ", nsp32_read4(base, SAVED_SACK_CNT)); printk("ScsiBusControl=0x%x, ", nsp32_read1(base, SCSI_BUS_CONTROL)); printk("FifoRestCnt=0x%x, ", nsp32_read2(base, FIFO_REST_CNT)); printk("CdbIn=0x%x, ", nsp32_read1(base, SCSI_CSB_IN)); printk("\n"); if (0) { printk("execph=0x%x, ", nsp32_read2(base, SCSI_EXECUTE_PHASE)); printk("IrqStatus=0x%x, ", nsp32_read2(base, IRQ_STATUS)); printk("\n"); } } /* end */
gpl-2.0
Koshu/thinkpad_tablet_ics_kernel
arch/arm/mach-at91/board-sam9rlek.c
950
7244
/* * Copyright (C) 2005 SAN People * Copyright (C) 2007 Atmel Corporation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include <mach/at91_shdwc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_map_io(void) { /* Initialize processor: 12.000 MHz crystal */ at91sam9rl_initialize(12000000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91SAM9RL_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init ek_init_irq(void) { at91sam9rl_init_interrupts(NULL); } /* * USB HS Device port */ static struct usba_platform_data __initdata ek_usba_udc_data = { .vbus_pin = AT91_PIN_PA8, }; /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata ek_mmc_data = { .wire4 = 1, .det_pin = AT91_PIN_PA15, // .wp_pin = ... not connected // .vcc_pin = ... not connected }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Partition 1", .offset = 0, .size = SZ_256K, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(ek_nand_partition); return ek_nand_partition; } static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, // .det_pin = ... not connected .rdy_pin = AT91_PIN_PD17, .enable_pin = AT91_PIN_PB6, .partition_info = nand_partitions, }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * SPI devices */ static struct spi_board_info ek_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, }; /* * LCD Controller */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static struct fb_videomode at91_tft_vga_modes[] = { { .name = "TX09D50VM1CCA @ 60", .refresh = 60, .xres = 240, .yres = 320, .pixclock = KHZ2PICOS(4965), .left_margin = 1, .right_margin = 33, .upper_margin = 1, .lower_margin = 0, .hsync_len = 5, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs at91fb_default_monspecs = { .manufacturer = "HIT", .monitor = "TX09D50VM1CCA", .modedb = at91_tft_vga_modes, .modedb_len = ARRAY_SIZE(at91_tft_vga_modes), .hfmin = 15000, .hfmax = 64000, .vfmin = 50, .vfmax = 150, }; #define AT91SAM9RL_DEFAULT_LCDCON2 (ATMEL_LCDC_MEMOR_LITTLE \ | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) static void at91_lcdc_power_control(int on) { if (on) at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */ else at91_set_gpio_value(AT91_PIN_PC1, 1); /* power down */ } /* Driver datas */ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, .default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2, .default_monspecs = &at91fb_default_monspecs, .atmel_lcdfb_power_control = at91_lcdc_power_control, .guard_time = 1, .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, }; #else static struct atmel_lcdfb_info __initdata ek_lcdc_data; #endif /* * AC97 * reset_pin is not connected: NRST */ static struct ac97c_platform_data ek_ac97_data = { }; /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds1", .gpio = AT91_PIN_PD15, .active_low = 1, .default_trigger = "none", }, { /* "bottom" led, green, userled2 to be defined */ .name = "ds2", .gpio = AT91_PIN_PD16, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds3", .gpio = AT91_PIN_PD14, .default_trigger = "heartbeat", } }; /* * Touchscreen */ static struct at91_tsadcc_data ek_tsadcc_data = { .adc_clock = 1000000, .pendet_debounce = 0x0f, .ts_sample_hold_time = 0x03, }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { .gpio = AT91_PIN_PB0, .code = BTN_2, .desc = "Right Click", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB1, .code = BTN_1, .desc = "Left Click", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PB1, 1); /* btn1 */ at91_set_deglitch(AT91_PIN_PB1, 1); at91_set_gpio_input(AT91_PIN_PB0, 1); /* btn2 */ at91_set_deglitch(AT91_PIN_PB0, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB HS */ at91_add_device_usba(&ek_usba_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* NAND */ ek_add_device_nand(); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* MMC */ at91_add_device_mmc(0, &ek_mmc_data); /* LCD Controller */ at91_add_device_lcdc(&ek_lcdc_data); /* AC97 */ at91_add_device_ac97(&ek_ac97_data); /* Touch Screen Controller */ at91_add_device_tsadcc(&ek_tsadcc_data); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); /* Push Buttons */ ek_add_device_buttons(); } MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK") /* Maintainer: Atmel */ .phys_io = AT91_BASE_SYS, .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc, .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = ek_map_io, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
blackbox87/zte_skate_gb_kernel
drivers/media/video/cx23885/cimax2.c
950
12737
/* * cimax2.c * * CIMax2(R) SP2 driver in conjunction with NetUp Dual DVB-S2 CI card * * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * Copyright (C) 2009 Abylay Ospan <aospan@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #include "dvb_ca_en50221.h" /**** Bit definitions for MC417_RWD and MC417_OEN registers *** bits 31-16 +-----------+ | Reserved | +-----------+ bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8 +-------+-------+-------+-------+-------+-------+-------+-------+ | WR# | RD# | | ACK# | ADHI | ADLO | CS1# | CS0# | +-------+-------+-------+-------+-------+-------+-------+-------+ bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0 +-------+-------+-------+-------+-------+-------+-------+-------+ | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0| +-------+-------+-------+-------+-------+-------+-------+-------+ ***/ /* MC417 */ #define NETUP_DATA 0x000000ff #define NETUP_WR 0x00008000 #define NETUP_RD 0x00004000 #define NETUP_ACK 0x00001000 #define NETUP_ADHI 0x00000800 #define NETUP_ADLO 0x00000400 #define NETUP_CS1 0x00000200 #define NETUP_CS0 0x00000100 #define NETUP_EN_ALL 0x00001000 #define NETUP_CTRL_OFF (NETUP_CS1 | NETUP_CS0 | NETUP_WR | NETUP_RD) #define NETUP_CI_CTL 0x04 #define NETUP_CI_RD 1 #define NETUP_IRQ_DETAM 0x1 #define NETUP_IRQ_IRQAM 0x4 static unsigned int ci_dbg; module_param(ci_dbg, int, 0644); MODULE_PARM_DESC(ci_dbg, "Enable CI debugging"); static unsigned int ci_irq_enable; module_param(ci_irq_enable, int, 0644); MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM"); #define ci_dbg_print(args...) \ do { \ if (ci_dbg) \ printk(KERN_DEBUG args); \ } while (0) #define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0) /* stores all private variables for communication with CI */ struct netup_ci_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; struct i2c_adapter *i2c_adap; u8 ci_i2c_addr; int status; struct work_struct work; void *priv; u8 current_irq_mode; int current_ci_flag; unsigned long next_status_checked_time; }; int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, u8 *buf, int len) { int ret; struct i2c_msg msg[] = { { .addr = addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = addr, .flags = I2C_M_RD, .buf = buf, .len = len } }; ret = i2c_transfer(i2c_adap, msg, 2); if (ret != 2) { ci_dbg_print("%s: i2c read error, Reg = 0x%02x, Status = %d\n", __func__, reg, ret); return -1; } ci_dbg_print("%s: i2c read Addr=0x%04x, Reg = 0x%02x, data = %02x\n", __func__, addr, reg, buf[0]); return 0; } int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, u8 *buf, int len) { int ret; u8 buffer[len + 1]; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = &buffer[0], .len = len + 1 }; buffer[0] = reg; memcpy(&buffer[1], buf, len); ret = i2c_transfer(i2c_adap, &msg, 1); if (ret != 1) { ci_dbg_print("%s: i2c write error, Reg=[0x%02x], Status=%d\n", __func__, reg, ret); return -1; } return 0; } int netup_ci_get_mem(struct cx23885_dev *dev) { int mem; unsigned long timeout = jiffies + msecs_to_jiffies(1); for (;;) { mem = cx_read(MC417_RWD); if ((mem & NETUP_ACK) == 0) break; if (time_after(jiffies, timeout)) break; udelay(1); } cx_set(MC417_RWD, NETUP_CTRL_OFF); return mem & 0xff; } int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 flag, u8 read, int addr, u8 data) { struct netup_ci_state *state = en50221->data; struct cx23885_tsport *port = state->priv; struct cx23885_dev *dev = port->dev; u8 store; int mem; int ret; if (0 != slot) return -EINVAL; if (state->current_ci_flag != flag) { ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &store, 1); if (ret != 0) return ret; store &= ~0x0c; store |= flag; ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &store, 1); if (ret != 0) return ret; }; state->current_ci_flag = flag; mutex_lock(&dev->gpio_lock); /* write addr */ cx_write(MC417_OEN, NETUP_EN_ALL); cx_write(MC417_RWD, NETUP_CTRL_OFF | NETUP_ADLO | (0xff & addr)); cx_clear(MC417_RWD, NETUP_ADLO); cx_write(MC417_RWD, NETUP_CTRL_OFF | NETUP_ADHI | (0xff & (addr >> 8))); cx_clear(MC417_RWD, NETUP_ADHI); if (read) { /* data in */ cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA); } else /* data out */ cx_write(MC417_RWD, NETUP_CTRL_OFF | data); /* choose chip */ cx_clear(MC417_RWD, (state->ci_i2c_addr == 0x40) ? NETUP_CS0 : NETUP_CS1); /* read/write */ cx_clear(MC417_RWD, (read) ? NETUP_RD : NETUP_WR); mem = netup_ci_get_mem(dev); mutex_unlock(&dev->gpio_lock); if (!read) if (mem < 0) return -EREMOTEIO; ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__, (read) ? "read" : "write", state->ci_i2c_addr, addr, (flag == NETUP_CI_CTL) ? "ctl" : "mem", (read) ? mem : data); if (read) return mem; return 0; } int netup_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr) { return netup_ci_op_cam(en50221, slot, 0, NETUP_CI_RD, addr, 0); } int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data) { return netup_ci_op_cam(en50221, slot, 0, 0, addr, data); } int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr) { return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, NETUP_CI_RD, addr, 0); } int netup_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data) { return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, 0, addr, data); } int netup_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot) { struct netup_ci_state *state = en50221->data; u8 buf = 0x80; int ret; if (0 != slot) return -EINVAL; udelay(500); ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); if (ret != 0) return ret; udelay(500); buf = 0x00; ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); msleep(1000); dvb_ca_en50221_camready_irq(&state->ca, 0); return 0; } int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot) { /* not implemented */ return 0; } int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode) { struct netup_ci_state *state = en50221->data; int ret; if (irq_mode == state->current_irq_mode) return 0; ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n", __func__, state->ci_i2c_addr, irq_mode); ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0x1b, &irq_mode, 1); if (ret != 0) return ret; state->current_irq_mode = irq_mode; return 0; } int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot) { struct netup_ci_state *state = en50221->data; u8 buf; if (0 != slot) return -EINVAL; netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); buf |= 0x60; return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); } /* work handler */ static void netup_read_ci_status(struct work_struct *work) { struct netup_ci_state *state = container_of(work, struct netup_ci_state, work); u8 buf[33]; int ret; /* CAM module IRQ processing. fast operation */ dvb_ca_en50221_frda_irq(&state->ca, 0); /* CAM module INSERT/REMOVE processing. slow operation because of i2c * transfers */ if (time_after(jiffies, state->next_status_checked_time) || !state->status) { ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf[0], 33); state->next_status_checked_time = jiffies + msecs_to_jiffies(1000); if (ret != 0) return; ci_dbg_print("%s: Slot Status Addr=[0x%04x], " "Reg=[0x%02x], data=%02x, " "TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0], buf[0]); if (buf[0] & 1) state->status = DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; else state->status = 0; }; } /* CI irq handler */ int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status) { struct cx23885_tsport *port = NULL; struct netup_ci_state *state = NULL; if (pci_status & PCI_MSK_GPIO0) port = &dev->ts1; else if (pci_status & PCI_MSK_GPIO1) port = &dev->ts2; else /* who calls ? */ return 0; state = port->port_priv; schedule_work(&state->work); return 1; } int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open) { struct netup_ci_state *state = en50221->data; if (0 != slot) return -EINVAL; netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags()) : NETUP_IRQ_DETAM); return state->status; } int netup_ci_init(struct cx23885_tsport *port) { struct netup_ci_state *state; u8 cimax_init[34] = { 0x00, /* module A control*/ 0x00, /* auto select mask high A */ 0x00, /* auto select mask low A */ 0x00, /* auto select pattern high A */ 0x00, /* auto select pattern low A */ 0x44, /* memory access time A */ 0x00, /* invert input A */ 0x00, /* RFU */ 0x00, /* RFU */ 0x00, /* module B control*/ 0x00, /* auto select mask high B */ 0x00, /* auto select mask low B */ 0x00, /* auto select pattern high B */ 0x00, /* auto select pattern low B */ 0x44, /* memory access time B */ 0x00, /* invert input B */ 0x00, /* RFU */ 0x00, /* RFU */ 0x00, /* auto select mask high Ext */ 0x00, /* auto select mask low Ext */ 0x00, /* auto select pattern high Ext */ 0x00, /* auto select pattern low Ext */ 0x00, /* RFU */ 0x02, /* destination - module A */ 0x01, /* power on (use it like store place) */ 0x00, /* RFU */ 0x00, /* int status read only */ ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */ 0x05, /* EXTINT=active-high, INT=push-pull */ 0x00, /* USCG1 */ 0x04, /* ack active low */ 0x00, /* LOCK = 0 */ 0x33, /* serial mode, rising in, rising out, MSB first*/ 0x31, /* syncronization */ }; int ret; ci_dbg_print("%s\n", __func__); state = kzalloc(sizeof(struct netup_ci_state), GFP_KERNEL); if (!state) { ci_dbg_print("%s: Unable create CI structure!\n", __func__); ret = -ENOMEM; goto err; } port->port_priv = state; switch (port->nr) { case 1: state->ci_i2c_addr = 0x40; break; case 2: state->ci_i2c_addr = 0x41; break; } state->i2c_adap = &port->dev->i2c_bus[0].i2c_adap; state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = netup_ci_read_attribute_mem; state->ca.write_attribute_mem = netup_ci_write_attribute_mem; state->ca.read_cam_control = netup_ci_read_cam_ctl; state->ca.write_cam_control = netup_ci_write_cam_ctl; state->ca.slot_reset = netup_ci_slot_reset; state->ca.slot_shutdown = netup_ci_slot_shutdown; state->ca.slot_ts_enable = netup_ci_slot_ts_ctl; state->ca.poll_slot_status = netup_poll_ci_slot_status; state->ca.data = state; state->priv = port; state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM; ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &cimax_init[0], 34); /* lock registers */ ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0x1f, &cimax_init[0x18], 1); /* power on slots */ ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0x18, &cimax_init[0x18], 1); if (0 != ret) goto err; ret = dvb_ca_en50221_init(&port->frontends.adapter, &state->ca, /* flags */ 0, /* n_slots */ 1); if (0 != ret) goto err; INIT_WORK(&state->work, netup_read_ci_status); schedule_work(&state->work); ci_dbg_print("%s: CI initialized!\n", __func__); return 0; err: ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); kfree(state); return ret; } void netup_ci_exit(struct cx23885_tsport *port) { struct netup_ci_state *state; if (NULL == port) return; state = (struct netup_ci_state *)port->port_priv; if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); kfree(state); }
gpl-2.0
Droid-Concepts/DC-Elite_kernel_jf
drivers/rpmsg/virtio_rpmsg_bus.c
1974
31503
/* * Virtio-based remote processor messaging bus * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. * * Ohad Ben-Cohen <ohad@wizery.com> * Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/rpmsg.h> #include <linux/mutex.h> /** * struct virtproc_info - virtual remote processor state * @vdev: the virtio device * @rvq: rx virtqueue * @svq: tx virtqueue * @rbufs: kernel address of rx buffers * @sbufs: kernel address of tx buffers * @last_sbuf: index of last tx buffer used * @bufs_dma: dma base addr of the buffers * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders. * sending a message might require waking up a dozing remote * processor, which involves sleeping, hence the mutex. * @endpoints: idr of local endpoints, allows fast retrieval * @endpoints_lock: lock of the endpoints set * @sendq: wait queue of sending contexts waiting for a tx buffers * @sleepers: number of senders that are waiting for a tx buffer * @ns_ept: the bus's name service endpoint * * This structure stores the rpmsg state of a given virtio remote processor * device (there might be several virtio proc devices for each physical * remote processor). */ struct virtproc_info { struct virtio_device *vdev; struct virtqueue *rvq, *svq; void *rbufs, *sbufs; int last_sbuf; dma_addr_t bufs_dma; struct mutex tx_lock; struct idr endpoints; struct mutex endpoints_lock; wait_queue_head_t sendq; atomic_t sleepers; struct rpmsg_endpoint *ns_ept; }; /** * struct rpmsg_channel_info - internal channel info representation * @name: name of service * @src: local address * @dst: destination address */ struct rpmsg_channel_info { char name[RPMSG_NAME_SIZE]; u32 src; u32 dst; }; #define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev) #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv) /* * We're allocating 512 buffers of 512 bytes for communications, and then * using the first 256 buffers for RX, and the last 256 buffers for TX. * * Each buffer will have 16 bytes for the msg header and 496 bytes for * the payload. * * This will require a total space of 256KB for the buffers. * * We might also want to add support for user-provided buffers in time. * This will allow bigger buffer size flexibility, and can also be used * to achieve zero-copy messaging. * * Note that these numbers are purely a decision of this driver - we * can change this without changing anything in the firmware of the remote * processor. */ #define RPMSG_NUM_BUFS (512) #define RPMSG_BUF_SIZE (512) #define RPMSG_TOTAL_BUF_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE) /* * Local addresses are dynamically allocated on-demand. * We do not dynamically assign addresses from the low 1024 range, * in order to reserve that address range for predefined services. */ #define RPMSG_RESERVED_ADDRESSES (1024) /* Address 53 is reserved for advertising remote services */ #define RPMSG_NS_ADDR (53) /* sysfs show configuration fields */ #define rpmsg_show_attr(field, path, format_string) \ static ssize_t \ field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \ \ return sprintf(buf, format_string, rpdev->path); \ } /* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */ rpmsg_show_attr(name, id.name, "%s\n"); rpmsg_show_attr(src, src, "0x%x\n"); rpmsg_show_attr(dst, dst, "0x%x\n"); rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n"); /* * Unique (and free running) index for rpmsg devices. * * Yeah, we're not recycling those numbers (yet?). will be easy * to change if/when we want to. */ static unsigned int rpmsg_dev_index; static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name); } static struct device_attribute rpmsg_dev_attrs[] = { __ATTR_RO(name), __ATTR_RO(modalias), __ATTR_RO(dst), __ATTR_RO(src), __ATTR_RO(announce), __ATTR_NULL }; /* rpmsg devices and drivers are matched using the service name */ static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev, const struct rpmsg_device_id *id) { return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0; } /* match rpmsg channel and rpmsg driver */ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv); const struct rpmsg_device_id *ids = rpdrv->id_table; unsigned int i; for (i = 0; ids[i].name[0]; i++) if (rpmsg_id_match(rpdev, &ids[i])) return 1; return 0; } static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT, rpdev->id.name); } /** * __ept_release() - deallocate an rpmsg endpoint * @kref: the ept's reference count * * This function deallocates an ept, and is invoked when its @kref refcount * drops to zero. * * Never invoke this function directly! */ static void __ept_release(struct kref *kref) { struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, refcount); /* * At this point no one holds a reference to ept anymore, * so we can directly free it */ kfree(ept); } /* for more info, see below documentation of rpmsg_create_ept() */ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp, struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, void *priv, u32 addr) { int err, tmpaddr, request; struct rpmsg_endpoint *ept; struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev; if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL)) return NULL; ept = kzalloc(sizeof(*ept), GFP_KERNEL); if (!ept) { dev_err(dev, "failed to kzalloc a new ept\n"); return NULL; } kref_init(&ept->refcount); mutex_init(&ept->cb_lock); ept->rpdev = rpdev; ept->cb = cb; ept->priv = priv; /* do we need to allocate a local address ? */ request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr; mutex_lock(&vrp->endpoints_lock); /* bind the endpoint to an rpmsg address (and allocate one if needed) */ err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr); if (err) { dev_err(dev, "idr_get_new_above failed: %d\n", err); goto free_ept; } /* make sure the user's address request is fulfilled, if relevant */ if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) { dev_err(dev, "address 0x%x already in use\n", addr); goto rem_idr; } ept->addr = tmpaddr; mutex_unlock(&vrp->endpoints_lock); return ept; rem_idr: idr_remove(&vrp->endpoints, request); free_ept: mutex_unlock(&vrp->endpoints_lock); kref_put(&ept->refcount, __ept_release); return NULL; } /** * rpmsg_create_ept() - create a new rpmsg_endpoint * @rpdev: rpmsg channel device * @cb: rx callback handler * @priv: private data for the driver's use * @addr: local rpmsg address to bind with @cb * * Every rpmsg address in the system is bound to an rx callback (so when * inbound messages arrive, they are dispatched by the rpmsg bus using the * appropriate callback handler) by means of an rpmsg_endpoint struct. * * This function allows drivers to create such an endpoint, and by that, * bind a callback, and possibly some private data too, to an rpmsg address * (either one that is known in advance, or one that will be dynamically * assigned for them). * * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint * is already created for them when they are probed by the rpmsg bus * (using the rx callback provided when they registered to the rpmsg bus). * * So things should just work for simple drivers: they already have an * endpoint, their rx callback is bound to their rpmsg address, and when * relevant inbound messages arrive (i.e. messages which their dst address * equals to the src address of their rpmsg channel), the driver's handler * is invoked to process it. * * That said, more complicated drivers might do need to allocate * additional rpmsg addresses, and bind them to different rx callbacks. * To accomplish that, those drivers need to call this function. * * Drivers should provide their @rpdev channel (so the new endpoint would belong * to the same remote processor their channel belongs to), an rx callback * function, an optional private data (which is provided back when the * rx callback is invoked), and an address they want to bind with the * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will * dynamically assign them an available rpmsg address (drivers should have * a very good reason why not to always use RPMSG_ADDR_ANY here). * * Returns a pointer to the endpoint on success, or NULL on error. */ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, void *priv, u32 addr) { return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr); } EXPORT_SYMBOL(rpmsg_create_ept); /** * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint * @vrp: virtproc which owns this ept * @ept: endpoing to destroy * * An internal function which destroy an ept without assuming it is * bound to an rpmsg channel. This is needed for handling the internal * name service endpoint, which isn't bound to an rpmsg channel. * See also __rpmsg_create_ept(). */ static void __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept) { /* make sure new inbound messages can't find this ept anymore */ mutex_lock(&vrp->endpoints_lock); idr_remove(&vrp->endpoints, ept->addr); mutex_unlock(&vrp->endpoints_lock); /* make sure in-flight inbound messages won't invoke cb anymore */ mutex_lock(&ept->cb_lock); ept->cb = NULL; mutex_unlock(&ept->cb_lock); kref_put(&ept->refcount, __ept_release); } /** * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint * @ept: endpoing to destroy * * Should be used by drivers to destroy an rpmsg endpoint previously * created with rpmsg_create_ept(). */ void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) { __rpmsg_destroy_ept(ept->rpdev->vrp, ept); } EXPORT_SYMBOL(rpmsg_destroy_ept); /* * when an rpmsg driver is probed with a channel, we seamlessly create * it an endpoint, binding its rx callback to a unique local rpmsg * address. * * if we need to, we also announce about this channel to the remote * processor (needed in case the driver is exposing an rpmsg service). */ static int rpmsg_dev_probe(struct device *dev) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); struct virtproc_info *vrp = rpdev->vrp; struct rpmsg_endpoint *ept; int err; ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src); if (!ept) { dev_err(dev, "failed to create endpoint\n"); err = -ENOMEM; goto out; } rpdev->ept = ept; rpdev->src = ept->addr; err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); rpmsg_destroy_ept(ept); goto out; } /* need to tell remote processor's name service about this channel ? */ if (rpdev->announce && virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { struct rpmsg_ns_msg nsm; strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); nsm.addr = rpdev->src; nsm.flags = RPMSG_NS_CREATE; err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR); if (err) dev_err(dev, "failed to announce service %d\n", err); } out: return err; } static int rpmsg_dev_remove(struct device *dev) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); struct virtproc_info *vrp = rpdev->vrp; int err = 0; /* tell remote processor's name service we're removing this channel */ if (rpdev->announce && virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { struct rpmsg_ns_msg nsm; strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); nsm.addr = rpdev->src; nsm.flags = RPMSG_NS_DESTROY; err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR); if (err) dev_err(dev, "failed to announce service %d\n", err); } rpdrv->remove(rpdev); rpmsg_destroy_ept(rpdev->ept); return err; } static struct bus_type rpmsg_bus = { .name = "rpmsg", .match = rpmsg_dev_match, .dev_attrs = rpmsg_dev_attrs, .uevent = rpmsg_uevent, .probe = rpmsg_dev_probe, .remove = rpmsg_dev_remove, }; /** * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus * @rpdrv: pointer to a struct rpmsg_driver * * Returns 0 on success, and an appropriate error value on failure. */ int register_rpmsg_driver(struct rpmsg_driver *rpdrv) { rpdrv->drv.bus = &rpmsg_bus; return driver_register(&rpdrv->drv); } EXPORT_SYMBOL(register_rpmsg_driver); /** * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus * @rpdrv: pointer to a struct rpmsg_driver * * Returns 0 on success, and an appropriate error value on failure. */ void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv) { driver_unregister(&rpdrv->drv); } EXPORT_SYMBOL(unregister_rpmsg_driver); static void rpmsg_release_device(struct device *dev) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); kfree(rpdev); } /* * match an rpmsg channel with a channel info struct. * this is used to make sure we're not creating rpmsg devices for channels * that already exist. */ static int rpmsg_channel_match(struct device *dev, void *data) { struct rpmsg_channel_info *chinfo = data; struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src) return 0; if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst) return 0; if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE)) return 0; /* found a match ! */ return 1; } /* * create an rpmsg channel using its name and address info. * this function will be used to create both static and dynamic * channels. */ static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp, struct rpmsg_channel_info *chinfo) { struct rpmsg_channel *rpdev; struct device *tmp, *dev = &vrp->vdev->dev; int ret; /* make sure a similar channel doesn't already exist */ tmp = device_find_child(dev, chinfo, rpmsg_channel_match); if (tmp) { /* decrement the matched device's refcount back */ put_device(tmp); dev_err(dev, "channel %s:%x:%x already exist\n", chinfo->name, chinfo->src, chinfo->dst); return NULL; } rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL); if (!rpdev) { pr_err("kzalloc failed\n"); return NULL; } rpdev->vrp = vrp; rpdev->src = chinfo->src; rpdev->dst = chinfo->dst; /* * rpmsg server channels has predefined local address (for now), * and their existence needs to be announced remotely */ rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false; strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE); /* very simple device indexing plumbing which is enough for now */ dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++); rpdev->dev.parent = &vrp->vdev->dev; rpdev->dev.bus = &rpmsg_bus; rpdev->dev.release = rpmsg_release_device; ret = device_register(&rpdev->dev); if (ret) { dev_err(dev, "device_register failed: %d\n", ret); put_device(&rpdev->dev); return NULL; } return rpdev; } /* * find an existing channel using its name + address properties, * and destroy it */ static int rpmsg_destroy_channel(struct virtproc_info *vrp, struct rpmsg_channel_info *chinfo) { struct virtio_device *vdev = vrp->vdev; struct device *dev; dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match); if (!dev) return -EINVAL; device_unregister(dev); put_device(dev); return 0; } /* super simple buffer "allocator" that is just enough for now */ static void *get_a_tx_buf(struct virtproc_info *vrp) { unsigned int len; void *ret; /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* * either pick the next unused tx buffer * (half of our buffers are used for sending messages) */ if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2) ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; /* or recycle a used one */ else ret = virtqueue_get_buf(vrp->svq, &len); mutex_unlock(&vrp->tx_lock); return ret; } /** * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed * @vrp: virtual remote processor state * * This function is called before a sender is blocked, waiting for * a tx buffer to become available. * * If we already have blocking senders, this function merely increases * the "sleepers" reference count, and exits. * * Otherwise, if this is the first sender to block, we also enable * virtio's tx callbacks, so we'd be immediately notified when a tx * buffer is consumed (we rely on virtio's tx callback in order * to wake up sleeping senders as soon as a tx buffer is used by the * remote processor). */ static void rpmsg_upref_sleepers(struct virtproc_info *vrp) { /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* are we the first sleeping context waiting for tx buffers ? */ if (atomic_inc_return(&vrp->sleepers) == 1) /* enable "tx-complete" interrupts before dozing off */ virtqueue_enable_cb(vrp->svq); mutex_unlock(&vrp->tx_lock); } /** * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed * @vrp: virtual remote processor state * * This function is called after a sender, that waited for a tx buffer * to become available, is unblocked. * * If we still have blocking senders, this function merely decreases * the "sleepers" reference count, and exits. * * Otherwise, if there are no more blocking senders, we also disable * virtio's tx callbacks, to avoid the overhead incurred with handling * those (now redundant) interrupts. */ static void rpmsg_downref_sleepers(struct virtproc_info *vrp) { /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* are we the last sleeping context waiting for tx buffers ? */ if (atomic_dec_and_test(&vrp->sleepers)) /* disable "tx-complete" interrupts */ virtqueue_disable_cb(vrp->svq); mutex_unlock(&vrp->tx_lock); } /** * rpmsg_send_offchannel_raw() - send a message across to the remote processor * @rpdev: the rpmsg channel * @src: source address * @dst: destination address * @data: payload of message * @len: length of payload * @wait: indicates whether caller should block in case no TX buffers available * * This function is the base implementation for all of the rpmsg sending API. * * It will send @data of length @len to @dst, and say it's from @src. The * message will be sent to the remote processor which the @rpdev channel * belongs to. * * The message is sent using one of the TX buffers that are available for * communication with this remote processor. * * If @wait is true, the caller will be blocked until either a TX buffer is * available, or 15 seconds elapses (we don't want callers to * sleep indefinitely due to misbehaving remote processors), and in that * case -ERESTARTSYS is returned. The number '15' itself was picked * arbitrarily; there's little point in asking drivers to provide a timeout * value themselves. * * Otherwise, if @wait is false, and there are no TX buffers available, * the function will immediately fail, and -ENOMEM will be returned. * * Normally drivers shouldn't use this function directly; instead, drivers * should use the appropriate rpmsg_{try}send{to, _offchannel} API * (see include/linux/rpmsg.h). * * Returns 0 on success and an appropriate error value on failure. */ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, void *data, int len, bool wait) { struct virtproc_info *vrp = rpdev->vrp; struct device *dev = &rpdev->dev; struct scatterlist sg; struct rpmsg_hdr *msg; int err; /* bcasting isn't allowed */ if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); return -EINVAL; } /* * We currently use fixed-sized buffers, and therefore the payload * length is limited. * * One of the possible improvements here is either to support * user-provided buffers (and then we can also support zero-copy * messaging), or to improve the buffer allocator, to support * variable-length buffer sizes. */ if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { dev_err(dev, "message is too big (%d)\n", len); return -EMSGSIZE; } /* grab a buffer */ msg = get_a_tx_buf(vrp); if (!msg && !wait) return -ENOMEM; /* no free buffer ? wait for one (but bail after 15 seconds) */ while (!msg) { /* enable "tx-complete" interrupts, if not already enabled */ rpmsg_upref_sleepers(vrp); /* * sleep until a free buffer is available or 15 secs elapse. * the timeout period is not configurable because there's * little point in asking drivers to specify that. * if later this happens to be required, it'd be easy to add. */ err = wait_event_interruptible_timeout(vrp->sendq, (msg = get_a_tx_buf(vrp)), msecs_to_jiffies(15000)); /* disable "tx-complete" interrupts if we're the last sleeper */ rpmsg_downref_sleepers(vrp); /* timeout ? */ if (!err) { dev_err(dev, "timeout waiting for a tx buffer\n"); return -ERESTARTSYS; } } msg->len = len; msg->flags = 0; msg->src = src; msg->dst = dst; msg->reserved = 0; memcpy(msg->data, data, len); dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); sg_init_one(&sg, msg, sizeof(*msg) + len); mutex_lock(&vrp->tx_lock); /* add message to the remote processor's virtqueue */ err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL); if (err < 0) { /* * need to reclaim the buffer here, otherwise it's lost * (memory won't leak, but rpmsg won't use it again for TX). * this will wait for a buffer management overhaul. */ dev_err(dev, "virtqueue_add_buf failed: %d\n", err); goto out; } /* tell the remote processor it has a pending message to read */ virtqueue_kick(vrp->svq); err = 0; out: mutex_unlock(&vrp->tx_lock); return err; } EXPORT_SYMBOL(rpmsg_send_offchannel_raw); /* called when an rx buffer is used, and it's time to digest a message */ static void rpmsg_recv_done(struct virtqueue *rvq) { struct rpmsg_hdr *msg; unsigned int len; struct rpmsg_endpoint *ept; struct scatterlist sg; struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; int err; msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); /* * We currently use fixed-sized buffers, so trivially sanitize * the reported payload length. */ if (len > RPMSG_BUF_SIZE || msg->len > (len - sizeof(struct rpmsg_hdr))) { dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); return; } /* use the dst addr to fetch the callback of the appropriate user */ mutex_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); /* let's make sure no one deallocates ept while we use it */ if (ept) kref_get(&ept->refcount); mutex_unlock(&vrp->endpoints_lock); if (ept) { /* make sure ept->cb doesn't go away while we use it */ mutex_lock(&ept->cb_lock); if (ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); mutex_unlock(&ept->cb_lock); /* farewell, ept, we don't need you anymore */ kref_put(&ept->refcount, __ept_release); } else dev_warn(dev, "msg received with no recepient\n"); /* publish the real size of the buffer */ sg_init_one(&sg, msg, RPMSG_BUF_SIZE); /* add the buffer back to the remote processor's virtqueue */ err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return; } /* tell the remote processor we added another available rx buffer */ virtqueue_kick(vrp->rvq); } /* * This is invoked whenever the remote processor completed processing * a TX msg we just sent it, and the buffer is put back to the used ring. * * Normally, though, we suppress this "tx complete" interrupt in order to * avoid the incurred overhead. */ static void rpmsg_xmit_done(struct virtqueue *svq) { struct virtproc_info *vrp = svq->vdev->priv; dev_dbg(&svq->vdev->dev, "%s\n", __func__); /* wake up potential senders that are waiting for a tx buffer */ wake_up_interruptible(&vrp->sendq); } /* invoked when a name service announcement arrives */ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len, void *priv, u32 src) { struct rpmsg_ns_msg *msg = data; struct rpmsg_channel *newch; struct rpmsg_channel_info chinfo; struct virtproc_info *vrp = priv; struct device *dev = &vrp->vdev->dev; int ret; print_hex_dump(KERN_DEBUG, "NS announcement: ", DUMP_PREFIX_NONE, 16, 1, data, len, true); if (len != sizeof(*msg)) { dev_err(dev, "malformed ns msg (%d)\n", len); return; } /* * the name service ept does _not_ belong to a real rpmsg channel, * and is handled by the rpmsg bus itself. * for sanity reasons, make sure a valid rpdev has _not_ sneaked * in somehow. */ if (rpdev) { dev_err(dev, "anomaly: ns ept has an rpdev handle\n"); return; } /* don't trust the remote processor for null terminating the name */ msg->name[RPMSG_NAME_SIZE - 1] = '\0'; dev_info(dev, "%sing channel %s addr 0x%x\n", msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat", msg->name, msg->addr); strncpy(chinfo.name, msg->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = msg->addr; if (msg->flags & RPMSG_NS_DESTROY) { ret = rpmsg_destroy_channel(vrp, &chinfo); if (ret) dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret); } else { newch = rpmsg_create_channel(vrp, &chinfo); if (!newch) dev_err(dev, "rpmsg_create_channel failed\n"); } } static int rpmsg_probe(struct virtio_device *vdev) { vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; const char *names[] = { "input", "output" }; struct virtqueue *vqs[2]; struct virtproc_info *vrp; void *bufs_va; int err = 0, i; vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); if (!vrp) return -ENOMEM; vrp->vdev = vdev; idr_init(&vrp->endpoints); mutex_init(&vrp->endpoints_lock); mutex_init(&vrp->tx_lock); init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); if (err) goto free_vrp; vrp->rvq = vqs[0]; vrp->svq = vqs[1]; /* allocate coherent memory for the buffers */ bufs_va = dma_alloc_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, &vrp->bufs_dma, GFP_KERNEL); if (!bufs_va) goto vqs_del; dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va, (unsigned long long)vrp->bufs_dma); /* half of the buffers is dedicated for RX */ vrp->rbufs = bufs_va; /* and half is dedicated for TX */ vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2; /* set up the receive buffers */ for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) { struct scatterlist sg; void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE; sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr, GFP_KERNEL); WARN_ON(err < 0); /* sanity check; this can't really happen */ } /* suppress "tx-complete" interrupts */ virtqueue_disable_cb(vrp->svq); vdev->priv = vrp; /* if supported by the remote processor, enable the name service */ if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { /* a dedicated endpoint handles the name service msgs */ vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb, vrp, RPMSG_NS_ADDR); if (!vrp->ns_ept) { dev_err(&vdev->dev, "failed to create the ns ept\n"); err = -ENOMEM; goto free_coherent; } } /* tell the remote processor it can start sending messages */ virtqueue_kick(vrp->rvq); dev_info(&vdev->dev, "rpmsg host is online\n"); return 0; free_coherent: dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, bufs_va, vrp->bufs_dma); vqs_del: vdev->config->del_vqs(vrp->vdev); free_vrp: kfree(vrp); return err; } static int rpmsg_remove_device(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __devexit rpmsg_remove(struct virtio_device *vdev) { struct virtproc_info *vrp = vdev->priv; int ret; vdev->config->reset(vdev); ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device); if (ret) dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret); if (vrp->ns_ept) __rpmsg_destroy_ept(vrp, vrp->ns_ept); idr_remove_all(&vrp->endpoints); idr_destroy(&vrp->endpoints); vdev->config->del_vqs(vrp->vdev); dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, vrp->rbufs, vrp->bufs_dma); kfree(vrp); } static struct virtio_device_id id_table[] = { { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_RPMSG_F_NS, }; static struct virtio_driver virtio_ipc_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = rpmsg_probe, .remove = __devexit_p(rpmsg_remove), }; static int __init rpmsg_init(void) { int ret; ret = bus_register(&rpmsg_bus); if (ret) { pr_err("failed to register rpmsg bus: %d\n", ret); return ret; } ret = register_virtio_driver(&virtio_ipc_driver); if (ret) { pr_err("failed to register virtio driver: %d\n", ret); bus_unregister(&rpmsg_bus); } return ret; } subsys_initcall(rpmsg_init); static void __exit rpmsg_fini(void) { unregister_virtio_driver(&virtio_ipc_driver); bus_unregister(&rpmsg_bus); } module_exit(rpmsg_fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio-based remote processor messaging bus"); MODULE_LICENSE("GPL v2");
gpl-2.0
xjljian/android_kernel_huawei_msm8916
drivers/regulator/fixed.c
1974
7555
/* * fixed.c * * Copyright 2008 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * Copyright (c) 2009 Nokia Corporation * Roger Quadros <ext-roger.quadros@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This is useful for systems with mixed controllable and * non-controllable regulators, as well as for allowing testing on * systems with no controllable regulators. */ #include <linux/err.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/fixed.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/machine.h> struct fixed_voltage_data { struct regulator_desc desc; struct regulator_dev *dev; int microvolts; }; /** * of_get_fixed_voltage_config - extract fixed_voltage_config structure info * @dev: device requesting for fixed_voltage_config * * Populates fixed_voltage_config structure by extracting data from device * tree node, returns a pointer to the populated structure of NULL if memory * alloc fails. */ static struct fixed_voltage_config * of_get_fixed_voltage_config(struct device *dev) { struct fixed_voltage_config *config; struct device_node *np = dev->of_node; const __be32 *delay; struct regulator_init_data *init_data; config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config), GFP_KERNEL); if (!config) return ERR_PTR(-ENOMEM); config->init_data = of_get_regulator_init_data(dev, dev->of_node); if (!config->init_data) return ERR_PTR(-EINVAL); init_data = config->init_data; init_data->constraints.apply_uV = 0; config->supply_name = init_data->constraints.name; if (init_data->constraints.min_uV == init_data->constraints.max_uV) { config->microvolts = init_data->constraints.min_uV; } else { dev_err(dev, "Fixed regulator specified with variable voltages\n"); return ERR_PTR(-EINVAL); } if (init_data->constraints.boot_on) config->enabled_at_boot = true; config->gpio = of_get_named_gpio(np, "gpio", 0); /* * of_get_named_gpio() currently returns ENODEV rather than * EPROBE_DEFER. This code attempts to be compatible with both * for now; the ENODEV check can be removed once the API is fixed. * of_get_named_gpio() doesn't differentiate between a missing * property (which would be fine here, since the GPIO is optional) * and some other error. Patches have been posted for both issues. * Once they are check in, we should replace this with: * if (config->gpio < 0 && config->gpio != -ENOENT) */ if ((config->gpio == -ENODEV) || (config->gpio == -EPROBE_DEFER)) return ERR_PTR(-EPROBE_DEFER); delay = of_get_property(np, "startup-delay-us", NULL); if (delay) config->startup_delay = be32_to_cpu(*delay); if (of_find_property(np, "enable-active-high", NULL)) config->enable_high = true; if (of_find_property(np, "gpio-open-drain", NULL)) config->gpio_is_open_drain = true; if (of_find_property(np, "vin-supply", NULL)) config->input_supply = "vin"; return config; } static int fixed_voltage_get_voltage(struct regulator_dev *dev) { struct fixed_voltage_data *data = rdev_get_drvdata(dev); if (data->microvolts) return data->microvolts; else return -EINVAL; } static int fixed_voltage_list_voltage(struct regulator_dev *dev, unsigned selector) { struct fixed_voltage_data *data = rdev_get_drvdata(dev); if (selector != 0) return -EINVAL; return data->microvolts; } static struct regulator_ops fixed_voltage_ops = { .get_voltage = fixed_voltage_get_voltage, .list_voltage = fixed_voltage_list_voltage, }; static int reg_fixed_voltage_probe(struct platform_device *pdev) { struct fixed_voltage_config *config; struct fixed_voltage_data *drvdata; struct regulator_config cfg = { }; int ret; if (pdev->dev.of_node) { config = of_get_fixed_voltage_config(&pdev->dev); if (IS_ERR(config)) return PTR_ERR(config); } else { config = pdev->dev.platform_data; } if (!config) return -ENOMEM; drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data), GFP_KERNEL); if (drvdata == NULL) { dev_err(&pdev->dev, "Failed to allocate device data\n"); ret = -ENOMEM; goto err; } drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { dev_err(&pdev->dev, "Failed to allocate supply name\n"); ret = -ENOMEM; goto err; } drvdata->desc.type = REGULATOR_VOLTAGE; drvdata->desc.owner = THIS_MODULE; drvdata->desc.ops = &fixed_voltage_ops; drvdata->desc.enable_time = config->startup_delay; if (config->input_supply) { drvdata->desc.supply_name = kstrdup(config->input_supply, GFP_KERNEL); if (!drvdata->desc.supply_name) { dev_err(&pdev->dev, "Failed to allocate input supply\n"); ret = -ENOMEM; goto err_name; } } if (config->microvolts) drvdata->desc.n_voltages = 1; drvdata->microvolts = config->microvolts; if (config->gpio >= 0) cfg.ena_gpio = config->gpio; cfg.ena_gpio_invert = !config->enable_high; if (config->enabled_at_boot) { if (config->enable_high) { cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; } else { cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; } } else { if (config->enable_high) { cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; } else { cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; } } if (config->gpio_is_open_drain) cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN; cfg.dev = &pdev->dev; cfg.init_data = config->init_data; cfg.driver_data = drvdata; cfg.of_node = pdev->dev.of_node; drvdata->dev = regulator_register(&drvdata->desc, &cfg); if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); goto err_input; } platform_set_drvdata(pdev, drvdata); dev_dbg(&pdev->dev, "%s supplying %duV\n", drvdata->desc.name, drvdata->microvolts); return 0; err_input: kfree(drvdata->desc.supply_name); err_name: kfree(drvdata->desc.name); err: return ret; } static int reg_fixed_voltage_remove(struct platform_device *pdev) { struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); regulator_unregister(drvdata->dev); kfree(drvdata->desc.supply_name); kfree(drvdata->desc.name); return 0; } #if defined(CONFIG_OF) static const struct of_device_id fixed_of_match[] = { { .compatible = "regulator-fixed", }, {}, }; MODULE_DEVICE_TABLE(of, fixed_of_match); #endif static struct platform_driver regulator_fixed_voltage_driver = { .probe = reg_fixed_voltage_probe, .remove = reg_fixed_voltage_remove, .driver = { .name = "reg-fixed-voltage", .owner = THIS_MODULE, .of_match_table = of_match_ptr(fixed_of_match), }, }; static int __init regulator_fixed_voltage_init(void) { return platform_driver_register(&regulator_fixed_voltage_driver); } subsys_initcall(regulator_fixed_voltage_init); static void __exit regulator_fixed_voltage_exit(void) { platform_driver_unregister(&regulator_fixed_voltage_driver); } module_exit(regulator_fixed_voltage_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("Fixed voltage regulator"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:reg-fixed-voltage");
gpl-2.0
multipath-tcp/mptcp_3.0.x
arch/avr32/boards/mimc200/setup.c
2998
6060
/* * Board-specific setup code for the MIMC200 * * Copyright (C) 2008 Mercury IMC Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ extern struct atmel_lcdfb_info mimc200_lcdc_data; #include <linux/clk.h> #include <linux/etherdevice.h> #include <linux/i2c-gpio.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/leds.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> #include <video/atmel_lcdc.h> #include <linux/fb.h> #include <linux/atmel-mci.h> #include <linux/io.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/board.h> #include <mach/init.h> #include <mach/portmux.h> /* Oscillator frequencies. These are board-specific */ unsigned long at32_board_osc_rates[3] = { [0] = 32768, /* 32.768 kHz on RTC osc */ [1] = 10000000, /* 10 MHz on osc0 */ [2] = 12000000, /* 12 MHz on osc1 */ }; /* Initialized by bootloader-specific startup code. */ struct tag *bootloader_tags __initdata; static struct fb_videomode __initdata pt0434827_modes[] = { { .name = "480x272 @ 72", .refresh = 72, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(10000), .left_margin = 1, .right_margin = 1, .upper_margin = 12, .lower_margin = 1, .hsync_len = 42, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata mimc200_default_monspecs = { .manufacturer = "PT", .monitor = "PT0434827-A401", .modedb = pt0434827_modes, .modedb_len = ARRAY_SIZE(pt0434827_modes), .hfmin = 14820, .hfmax = 22230, .vfmin = 60, .vfmax = 85, .dclkmax = 25200000, }; struct atmel_lcdfb_info __initdata mimc200_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_INVCLK | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_MEMOR_BIG), .default_monspecs = &mimc200_default_monspecs, .guard_time = 2, }; struct eth_addr { u8 addr[6]; }; static struct eth_addr __initdata hw_addr[2]; static struct eth_platform_data __initdata eth_data[2]; static struct spi_eeprom eeprom_25lc010 = { .name = "25lc010", .byte_len = 128, .page_size = 16, .flags = EE_ADDR1, }; static struct spi_board_info spi0_board_info[] __initdata = { { .modalias = "rtc-ds1390", .max_speed_hz = 4000000, .chip_select = 2, }, { .modalias = "at25", .max_speed_hz = 1000000, .chip_select = 1, .mode = SPI_MODE_3, .platform_data = &eeprom_25lc010, }, }; static struct mci_platform_data __initdata mci0_data = { .slot[0] = { .bus_width = 4, .detect_pin = GPIO_PIN_PA(26), .wp_pin = GPIO_PIN_PA(27), }, }; /* * The next two functions should go away as the boot loader is * supposed to initialize the macb address registers with a valid * ethernet address. But we need to keep it around for a while until * we can be reasonably sure the boot loader does this. * * The phy_id is ignored as the driver will probe for it. */ static int __init parse_tag_ethernet(struct tag *tag) { int i; i = tag->u.ethernet.mac_index; if (i < ARRAY_SIZE(hw_addr)) memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address, sizeof(hw_addr[i].addr)); return 0; } __tagtable(ATAG_ETHERNET, parse_tag_ethernet); static void __init set_hw_addr(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); const u8 *addr; void __iomem *regs; struct clk *pclk; if (!res) return; if (pdev->id >= ARRAY_SIZE(hw_addr)) return; addr = hw_addr[pdev->id].addr; if (!is_valid_ether_addr(addr)) return; /* * Since this is board-specific code, we'll cheat and use the * physical address directly as we happen to know that it's * the same as the virtual address. */ regs = (void __iomem __force *)res->start; pclk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(pclk)) return; clk_enable(pclk); __raw_writel((addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0], regs + 0x98); __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c); clk_disable(pclk); clk_put(pclk); } void __init setup_board(void) { at32_map_usart(0, 0, 0); /* USART 0: /dev/ttyS0 (TTL --> Altera) */ at32_map_usart(1, 1, 0); /* USART 1: /dev/ttyS1 (RS232) */ at32_map_usart(2, 2, 0); /* USART 2: /dev/ttyS2 (RS485) */ at32_map_usart(3, 3, 0); /* USART 3: /dev/ttyS3 (RS422 Multidrop) */ } static struct i2c_gpio_platform_data i2c_gpio_data = { .sda_pin = GPIO_PIN_PA(6), .scl_pin = GPIO_PIN_PA(7), .sda_is_open_drain = 1, .scl_is_open_drain = 1, .udelay = 2, /* close to 100 kHz */ }; static struct platform_device i2c_gpio_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_gpio_data, }, }; static struct i2c_board_info __initdata i2c_info[] = { }; static int __init mimc200_init(void) { /* * MIMC200 uses 16-bit SDRAM interface, so we don't need to * reserve any pins for it. */ at32_add_device_usart(0); at32_add_device_usart(1); at32_add_device_usart(2); at32_add_device_usart(3); set_hw_addr(at32_add_device_eth(0, &eth_data[0])); set_hw_addr(at32_add_device_eth(1, &eth_data[1])); at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); at32_add_device_mci(0, &mci0_data); at32_add_device_usba(0, NULL); at32_select_periph(GPIO_PIOB_BASE, 1 << 28, 0, AT32_GPIOF_PULLUP); at32_select_gpio(i2c_gpio_data.sda_pin, AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH); at32_select_gpio(i2c_gpio_data.scl_pin, AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH); platform_device_register(&i2c_gpio_device); i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info)); at32_add_device_lcdc(0, &mimc200_lcdc_data, fbmem_start, fbmem_size, ATMEL_LCDC_CONTROL | ATMEL_LCDC_ALT_CONTROL | ATMEL_LCDC_ALT_24B_DATA); return 0; } postcore_initcall(mimc200_init);
gpl-2.0
AKKP/lge-kernel-star
drivers/mmc/host/pxamci.c
2998
21119
/* * linux/drivers/mmc/host/pxa.c - PXA MMCI driver * * Copyright (C) 2003 Russell King, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This hardware is really sick: * - No way to clear interrupts. * - Have to turn off the clock whenever we touch the device. * - Doesn't tell you how many data blocks were transferred. * Yuck! * * 1 and 3 byte data transfers not supported * max block length up to 1023 */ #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/mmc/host.h> #include <linux/io.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> #include <linux/gfp.h> #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/dma.h> #include <mach/mmc.h> #include "pxamci.h" #define DRIVER_NAME "pxa2xx-mci" #define NR_SG 1 #define CLKRT_OFF (~0) #define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \ || cpu_is_pxa935()) struct pxamci_host { struct mmc_host *mmc; spinlock_t lock; struct resource *res; void __iomem *base; struct clk *clk; unsigned long clkrate; int irq; int dma; unsigned int clkrt; unsigned int cmdat; unsigned int imask; unsigned int power_mode; struct pxamci_platform_data *pdata; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; dma_addr_t sg_dma; struct pxa_dma_desc *sg_cpu; unsigned int dma_len; unsigned int dma_dir; unsigned int dma_drcmrrx; unsigned int dma_drcmrtx; struct regulator *vcc; }; static inline void pxamci_init_ocr(struct pxamci_host *host) { #ifdef CONFIG_REGULATOR host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); if (IS_ERR(host->vcc)) host->vcc = NULL; else { host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); if (host->pdata && host->pdata->ocr_mask) dev_warn(mmc_dev(host->mmc), "ocr_mask/setpower will not be used\n"); } #endif if (host->vcc == NULL) { /* fall-back to platform data */ host->mmc->ocr_avail = host->pdata ? host->pdata->ocr_mask : MMC_VDD_32_33 | MMC_VDD_33_34; } } static inline int pxamci_set_power(struct pxamci_host *host, unsigned char power_mode, unsigned int vdd) { int on; if (host->vcc) { int ret; if (power_mode == MMC_POWER_UP) { ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); if (ret) return ret; } else if (power_mode == MMC_POWER_OFF) { ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); if (ret) return ret; } } if (!host->vcc && host->pdata && gpio_is_valid(host->pdata->gpio_power)) { on = ((1 << vdd) & host->pdata->ocr_mask); gpio_set_value(host->pdata->gpio_power, !!on ^ host->pdata->gpio_power_invert); } if (!host->vcc && host->pdata && host->pdata->setpower) host->pdata->setpower(mmc_dev(host->mmc), vdd); return 0; } static void pxamci_stop_clock(struct pxamci_host *host) { if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { unsigned long timeout = 10000; unsigned int v; writel(STOP_CLOCK, host->base + MMC_STRPCL); do { v = readl(host->base + MMC_STAT); if (!(v & STAT_CLK_EN)) break; udelay(1); } while (timeout--); if (v & STAT_CLK_EN) dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); } } static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->imask &= ~mask; writel(host->imask, host->base + MMC_I_MASK); spin_unlock_irqrestore(&host->lock, flags); } static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->imask |= mask; writel(host->imask, host->base + MMC_I_MASK); spin_unlock_irqrestore(&host->lock, flags); } static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned long long clks; unsigned int timeout; bool dalgn = 0; u32 dcmd; int i; host->data = data; if (data->flags & MMC_DATA_STREAM) nob = 0xffff; writel(nob, host->base + MMC_NOB); writel(data->blksz, host->base + MMC_BLKLEN); clks = (unsigned long long)data->timeout_ns * host->clkrate; do_div(clks, 1000000000UL); timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); writel((timeout + 255) / 256, host->base + MMC_RDTO); if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; DRCMR(host->dma_drcmrtx) = 0; DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; } else { host->dma_dir = DMA_TO_DEVICE; dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; DRCMR(host->dma_drcmrrx) = 0; DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; } dcmd |= DCMD_BURST32 | DCMD_WIDTH1; host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); for (i = 0; i < host->dma_len; i++) { unsigned int length = sg_dma_len(&data->sg[i]); host->sg_cpu[i].dcmd = dcmd | length; if (length & 31 && !(data->flags & MMC_DATA_READ)) host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN; /* Not aligned to 8-byte boundary? */ if (sg_dma_address(&data->sg[i]) & 0x7) dalgn = 1; if (data->flags & MMC_DATA_READ) { host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO; host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]); } else { host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]); host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO; } host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc); } host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP; wmb(); /* * The PXA27x DMA controller encounters overhead when working with * unaligned (to 8-byte boundaries) data, so switch on byte alignment * mode only if we have unaligned data. */ if (dalgn) DALGN |= (1 << host->dma); else DALGN &= ~(1 << host->dma); DDADR(host->dma) = host->sg_dma; /* * workaround for erratum #91: * only start DMA now if we are doing a read, * otherwise we wait until CMD/RESP has finished * before starting DMA. */ if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ) DCSR(host->dma) = DCSR_RUN; } static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) { WARN_ON(host->cmd != NULL); host->cmd = cmd; if (cmd->flags & MMC_RSP_BUSY) cmdat |= CMDAT_BUSY; #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) switch (RSP_TYPE(mmc_resp_type(cmd))) { case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ cmdat |= CMDAT_RESP_SHORT; break; case RSP_TYPE(MMC_RSP_R3): cmdat |= CMDAT_RESP_R3; break; case RSP_TYPE(MMC_RSP_R2): cmdat |= CMDAT_RESP_R2; break; default: break; } writel(cmd->opcode, host->base + MMC_CMD); writel(cmd->arg >> 16, host->base + MMC_ARGH); writel(cmd->arg & 0xffff, host->base + MMC_ARGL); writel(cmdat, host->base + MMC_CMDAT); writel(host->clkrt, host->base + MMC_CLKRT); writel(START_CLOCK, host->base + MMC_STRPCL); pxamci_enable_irq(host, END_CMD_RES); } static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) { host->mrq = NULL; host->cmd = NULL; host->data = NULL; mmc_request_done(host->mmc, mrq); } static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) { struct mmc_command *cmd = host->cmd; int i; u32 v; if (!cmd) return 0; host->cmd = NULL; /* * Did I mention this is Sick. We always need to * discard the upper 8 bits of the first 16-bit word. */ v = readl(host->base + MMC_RES) & 0xffff; for (i = 0; i < 4; i++) { u32 w1 = readl(host->base + MMC_RES) & 0xffff; u32 w2 = readl(host->base + MMC_RES) & 0xffff; cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8; v = w2; } if (stat & STAT_TIME_OUT_RESPONSE) { cmd->error = -ETIMEDOUT; } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) { /* * workaround for erratum #42: * Intel PXA27x Family Processor Specification Update Rev 001 * A bogus CRC error can appear if the msb of a 136 bit * response is a one. */ if (cpu_is_pxa27x() && (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000)) pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); else cmd->error = -EILSEQ; } pxamci_disable_irq(host, END_CMD_RES); if (host->data && !cmd->error) { pxamci_enable_irq(host, DATA_TRAN_DONE); /* * workaround for erratum #91, if doing write * enable DMA late */ if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) DCSR(host->dma) = DCSR_RUN; } else { pxamci_finish_request(host, host->mrq); } return 1; } static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) { struct mmc_data *data = host->data; if (!data) return 0; DCSR(host->dma) = 0; dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); if (stat & STAT_READ_TIME_OUT) data->error = -ETIMEDOUT; else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR)) data->error = -EILSEQ; /* * There appears to be a hardware design bug here. There seems to * be no way to find out how much data was transferred to the card. * This means that if there was an error on any block, we mark all * data blocks as being in error. */ if (!data->error) data->bytes_xfered = data->blocks * data->blksz; else data->bytes_xfered = 0; pxamci_disable_irq(host, DATA_TRAN_DONE); host->data = NULL; if (host->mrq->stop) { pxamci_stop_clock(host); pxamci_start_cmd(host, host->mrq->stop, host->cmdat); } else { pxamci_finish_request(host, host->mrq); } return 1; } static irqreturn_t pxamci_irq(int irq, void *devid) { struct pxamci_host *host = devid; unsigned int ireg; int handled = 0; ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK); if (ireg) { unsigned stat = readl(host->base + MMC_STAT); pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat); if (ireg & END_CMD_RES) handled |= pxamci_cmd_done(host, stat); if (ireg & DATA_TRAN_DONE) handled |= pxamci_data_done(host, stat); if (ireg & SDIO_INT) { mmc_signal_sdio_irq(host->mmc); handled = 1; } } return IRQ_RETVAL(handled); } static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct pxamci_host *host = mmc_priv(mmc); unsigned int cmdat; WARN_ON(host->mrq != NULL); host->mrq = mrq; pxamci_stop_clock(host); cmdat = host->cmdat; host->cmdat &= ~CMDAT_INIT; if (mrq->data) { pxamci_setup_data(host, mrq->data); cmdat &= ~CMDAT_BUSY; cmdat |= CMDAT_DATAEN | CMDAT_DMAEN; if (mrq->data->flags & MMC_DATA_WRITE) cmdat |= CMDAT_WRITE; if (mrq->data->flags & MMC_DATA_STREAM) cmdat |= CMDAT_STREAM; } pxamci_start_cmd(host, mrq->cmd, cmdat); } static int pxamci_get_ro(struct mmc_host *mmc) { struct pxamci_host *host = mmc_priv(mmc); if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { if (host->pdata->gpio_card_ro_invert) return !gpio_get_value(host->pdata->gpio_card_ro); else return gpio_get_value(host->pdata->gpio_card_ro); } if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* * Board doesn't support read only detection; let the mmc core * decide what to do. */ return -ENOSYS; } static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct pxamci_host *host = mmc_priv(mmc); if (ios->clock) { unsigned long rate = host->clkrate; unsigned int clk = rate / ios->clock; if (host->clkrt == CLKRT_OFF) clk_enable(host->clk); if (ios->clock == 26000000) { /* to support 26MHz */ host->clkrt = 7; } else { /* to handle (19.5MHz, 26MHz) */ if (!clk) clk = 1; /* * clk might result in a lower divisor than we * desire. check for that condition and adjust * as appropriate. */ if (rate / clk > ios->clock) clk <<= 1; host->clkrt = fls(clk) - 1; } /* * we write clkrt on the next command */ } else { pxamci_stop_clock(host); if (host->clkrt != CLKRT_OFF) { host->clkrt = CLKRT_OFF; clk_disable(host->clk); } } if (host->power_mode != ios->power_mode) { int ret; host->power_mode = ios->power_mode; ret = pxamci_set_power(host, ios->power_mode, ios->vdd); if (ret) { dev_err(mmc_dev(mmc), "unable to set power\n"); /* * The .set_ios() function in the mmc_host_ops * struct return void, and failing to set the * power should be rare so we print an error and * return here. */ return; } if (ios->power_mode == MMC_POWER_ON) host->cmdat |= CMDAT_INIT; } if (ios->bus_width == MMC_BUS_WIDTH_4) host->cmdat |= CMDAT_SD_4DAT; else host->cmdat &= ~CMDAT_SD_4DAT; dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n", host->clkrt, host->cmdat); } static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) { struct pxamci_host *pxa_host = mmc_priv(host); if (enable) pxamci_enable_irq(pxa_host, SDIO_INT); else pxamci_disable_irq(pxa_host, SDIO_INT); } static const struct mmc_host_ops pxamci_ops = { .request = pxamci_request, .get_ro = pxamci_get_ro, .set_ios = pxamci_set_ios, .enable_sdio_irq = pxamci_enable_sdio_irq, }; static void pxamci_dma_irq(int dma, void *devid) { struct pxamci_host *host = devid; int dcsr = DCSR(dma); DCSR(dma) = dcsr & ~DCSR_STOPIRQEN; if (dcsr & DCSR_ENDINTR) { writel(BUF_PART_FULL, host->base + MMC_PRTBUF); } else { printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", mmc_hostname(host->mmc), dma, dcsr); host->data->error = -EIO; pxamci_data_done(host, 0); } } static irqreturn_t pxamci_detect_irq(int irq, void *devid) { struct pxamci_host *host = mmc_priv(devid); mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms)); return IRQ_HANDLED; } static int pxamci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct pxamci_host *host = NULL; struct resource *r, *dmarx, *dmatx; int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq < 0) return -ENXIO; r = request_mem_region(r->start, SZ_4K, DRIVER_NAME); if (!r) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } mmc->ops = &pxamci_ops; /* * We can do SG-DMA, but we don't because we never know how much * data we successfully wrote to the card. */ mmc->max_segs = NR_SG; /* * Our hardware DMA can handle a maximum of one page per SG entry. */ mmc->max_seg_size = PAGE_SIZE; /* * Block length register is only 10 bits before PXA27x. */ mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048; /* * Block count register is 16 bits. */ mmc->max_blk_count = 65535; host = mmc_priv(mmc); host->mmc = mmc; host->dma = -1; host->pdata = pdev->dev.platform_data; host->clkrt = CLKRT_OFF; host->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); host->clk = NULL; goto out; } host->clkrate = clk_get_rate(host->clk); /* * Calculate minimum clock rate, rounding up. */ mmc->f_min = (host->clkrate + 63) / 64; mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate; pxamci_init_ocr(host); mmc->caps = 0; host->cmdat = 0; if (!cpu_is_pxa25x()) { mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; host->cmdat |= CMDAT_SDIO_INT_EN; if (mmc_has_26MHz()) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; } host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); if (!host->sg_cpu) { ret = -ENOMEM; goto out; } spin_lock_init(&host->lock); host->res = r; host->irq = irq; host->imask = MMC_I_MASK_ALL; host->base = ioremap(r->start, SZ_4K); if (!host->base) { ret = -ENOMEM; goto out; } /* * Ensure that the host controller is shut down, and setup * with our defaults. */ pxamci_stop_clock(host); writel(0, host->base + MMC_SPI); writel(64, host->base + MMC_RESTO); writel(host->imask, host->base + MMC_I_MASK); host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW, pxamci_dma_irq, host); if (host->dma < 0) { ret = -EBUSY; goto out; } ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); if (ret) goto out; platform_set_drvdata(pdev, mmc); dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmarx) { ret = -ENXIO; goto out; } host->dma_drcmrrx = dmarx->start; dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!dmatx) { ret = -ENXIO; goto out; } host->dma_drcmrtx = dmatx->start; if (host->pdata) { gpio_cd = host->pdata->gpio_card_detect; gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } if (gpio_is_valid(gpio_power)) { ret = gpio_request(gpio_power, "mmc card power"); if (ret) { dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); goto out; } gpio_direction_output(gpio_power, host->pdata->gpio_power_invert); } if (gpio_is_valid(gpio_ro)) { ret = gpio_request(gpio_ro, "mmc card read only"); if (ret) { dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); goto err_gpio_ro; } gpio_direction_input(gpio_ro); } if (gpio_is_valid(gpio_cd)) { ret = gpio_request(gpio_cd, "mmc card detect"); if (ret) { dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); goto err_gpio_cd; } gpio_direction_input(gpio_cd); ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "mmc card detect", mmc); if (ret) { dev_err(&pdev->dev, "failed to request card detect IRQ\n"); goto err_request_irq; } } if (host->pdata && host->pdata->init) host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); if (gpio_is_valid(gpio_power) && host->pdata->setpower) dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n"); if (gpio_is_valid(gpio_ro) && host->pdata->get_ro) dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n"); mmc_add_host(mmc); return 0; err_request_irq: gpio_free(gpio_cd); err_gpio_cd: gpio_free(gpio_ro); err_gpio_ro: gpio_free(gpio_power); out: if (host) { if (host->dma >= 0) pxa_free_dma(host->dma); if (host->base) iounmap(host->base); if (host->sg_cpu) dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); if (host->clk) clk_put(host->clk); } if (mmc) mmc_free_host(mmc); release_resource(r); return ret; } static int pxamci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; platform_set_drvdata(pdev, NULL); if (mmc) { struct pxamci_host *host = mmc_priv(mmc); mmc_remove_host(mmc); if (host->pdata) { gpio_cd = host->pdata->gpio_card_detect; gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } if (gpio_is_valid(gpio_cd)) { free_irq(gpio_to_irq(gpio_cd), mmc); gpio_free(gpio_cd); } if (gpio_is_valid(gpio_ro)) gpio_free(gpio_ro); if (gpio_is_valid(gpio_power)) gpio_free(gpio_power); if (host->vcc) regulator_put(host->vcc); if (host->pdata && host->pdata->exit) host->pdata->exit(&pdev->dev, mmc); pxamci_stop_clock(host); writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, host->base + MMC_I_MASK); DRCMR(host->dma_drcmrrx) = 0; DRCMR(host->dma_drcmrtx) = 0; free_irq(host->irq, host); pxa_free_dma(host->dma); iounmap(host->base); dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); clk_put(host->clk); release_resource(host->res); mmc_free_host(mmc); } return 0; } #ifdef CONFIG_PM static int pxamci_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); int ret = 0; if (mmc) ret = mmc_suspend_host(mmc); return ret; } static int pxamci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); int ret = 0; if (mmc) ret = mmc_resume_host(mmc); return ret; } static const struct dev_pm_ops pxamci_pm_ops = { .suspend = pxamci_suspend, .resume = pxamci_resume, }; #endif static struct platform_driver pxamci_driver = { .probe = pxamci_probe, .remove = pxamci_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pxamci_pm_ops, #endif }, }; static int __init pxamci_init(void) { return platform_driver_register(&pxamci_driver); } static void __exit pxamci_exit(void) { platform_driver_unregister(&pxamci_driver); } module_init(pxamci_init); module_exit(pxamci_exit); MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-mci");
gpl-2.0
SlimRoms/kernel_htc_msm8960
fs/proc/root.c
4022
5747
/* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include <linux/parser.h> #include "internal.h" static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; } static int proc_set_super(struct super_block *sb, void *data) { int err = set_anon_super(sb, NULL); if (!err) { struct pid_namespace *ns = (struct pid_namespace *)data; sb->s_fs_info = get_pid_ns(ns); } return err; } enum { Opt_gid, Opt_hidepid, Opt_err, }; static const match_table_t tokens = { {Opt_hidepid, "hidepid=%u"}, {Opt_gid, "gid=%u"}, {Opt_err, NULL}, }; static int proc_parse_options(char *options, struct pid_namespace *pid) { char *p; substring_t args[MAX_OPT_ARGS]; int option; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; args[0].to = args[0].from = 0; token = match_token(p, tokens, args); switch (token) { case Opt_gid: if (match_int(&args[0], &option)) return 0; pid->pid_gid = option; break; case Opt_hidepid: if (match_int(&args[0], &option)) return 0; if (option < 0 || option > 2) { pr_err("proc: hidepid value must be between 0 and 2.\n"); return 0; } pid->hide_pid = option; break; default: pr_err("proc: unrecognized mount option \"%s\" " "or missing value\n", p); return 0; } } return 1; } int proc_remount(struct super_block *sb, int *flags, char *data) { struct pid_namespace *pid = sb->s_fs_info; return !proc_parse_options(data, pid); } static struct dentry *proc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; char *options; if (flags & MS_KERNMOUNT) { ns = (struct pid_namespace *)data; options = NULL; } else { ns = current->nsproxy->pid_ns; options = data; } sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); if (!proc_parse_options(options, ns)) { deactivate_locked_super(sb); return ERR_PTR(-EINVAL); } if (!sb->s_root) { sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } sb->s_flags |= MS_ACTIVE; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } return dget(sb->s_root); } static void proc_kill_sb(struct super_block *sb) { struct pid_namespace *ns; ns = (struct pid_namespace *)sb->s_fs_info; kill_anon_super(sb); put_pid_ns(ns); } static struct file_system_type proc_fs_type = { .name = "proc", .mount = proc_mount, .kill_sb = proc_kill_sb, }; void __init proc_root_init(void) { int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; err = pid_ns_prepare_proc(&init_pid_ns); if (err) { unregister_filesystem(&proc_fs_type); return; } proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { generic_fillattr(dentry->d_inode, stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos; int ret; if (nr < FIRST_PROCESS_ENTRY) { int error = proc_readdir(filp, dirent, filldir); if (error <= 0) return error; filp->f_pos = FIRST_PROCESS_ENTRY; } ret = proc_pid_readdir(filp, dirent, filldir); return ret; } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, .llseek = default_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROC_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .count = ATOMIC_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, .name = "/proc", }; int pid_ns_prepare_proc(struct pid_namespace *ns) { struct vfsmount *mnt; mnt = kern_mount_data(&proc_fs_type, ns); if (IS_ERR(mnt)) return PTR_ERR(mnt); ns->proc_mnt = mnt; return 0; } void pid_ns_release_proc(struct pid_namespace *ns) { kern_unmount(ns->proc_mnt); }
gpl-2.0
fnoji/android_kernel_htc_impj
fs/gfs2/glops.c
5046
15491
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/posix_acl.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "recovery.h" #include "rgrp.h" #include "util.h" #include "trans.h" #include "dir.h" static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) { fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", bh, (unsigned long long)bh->b_blocknr, bh->b_state, bh->b_page->mapping, bh->b_page->flags); fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", gl->gl_name.ln_type, gl->gl_name.ln_number, gfs2_glock2aspace(gl)); gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); } /** * __gfs2_ail_flush - remove all buffers for a given lock from the AIL * @gl: the glock * @fsync: set when called from fsync (not all buffers will be clean) * * None of the buffers should be dirty, locked, or pinned. */ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { struct gfs2_sbd *sdp = gl->gl_sbd; struct list_head *head = &gl->gl_ail_list; struct gfs2_bufdata *bd, *tmp; struct buffer_head *bh; const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); sector_t blocknr; gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { bh = bd->bd_bh; if (bh->b_state & b_state) { if (fsync) continue; gfs2_ail_error(gl, bh); } blocknr = bh->b_blocknr; bh->b_private = NULL; gfs2_remove_from_ail(bd); /* drops ref on bh */ bd->bd_bh = NULL; bd->bd_blkno = blocknr; gfs2_trans_add_revoke(sdp, bd); } BUG_ON(!fsync && atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); gfs2_log_unlock(sdp); } static void gfs2_ail_empty_gl(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_trans tr; memset(&tr, 0, sizeof(tr)); tr.tr_revokes = atomic_read(&gl->gl_ail_count); if (!tr.tr_revokes) return; /* A shortened, inline version of gfs2_trans_begin() */ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); tr.tr_ip = (unsigned long)__builtin_return_address(0); INIT_LIST_HEAD(&tr.tr_list_buf); gfs2_log_reserve(sdp, tr.tr_reserved); BUG_ON(current->journal_info); current->journal_info = &tr; __gfs2_ail_flush(gl, 0); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); } void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { struct gfs2_sbd *sdp = gl->gl_sbd; unsigned int revokes = atomic_read(&gl->gl_ail_count); int ret; if (!revokes) return; ret = gfs2_trans_begin(sdp, 0, revokes); if (ret) return; __gfs2_ail_flush(gl, fsync); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); } /** * rgrp_go_sync - sync out the metadata for this glock * @gl: the glock * * Called when demoting or unlocking an EX glock. We must flush * to disk all dirty buffers/pages relating to this glock, and must not * not return to caller to demote/unlock the glock until I/O is complete. */ static void rgrp_go_sync(struct gfs2_glock *gl) { struct address_space *metamapping = gfs2_glock2aspace(gl); struct gfs2_rgrpd *rgd; int error; if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) return; BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); gfs2_log_flush(gl->gl_sbd, gl); filemap_fdatawrite(metamapping); error = filemap_fdatawait(metamapping); mapping_set_error(metamapping, error); gfs2_ail_empty_gl(gl); spin_lock(&gl->gl_spin); rgd = gl->gl_object; if (rgd) gfs2_free_clones(rgd); spin_unlock(&gl->gl_spin); } /** * rgrp_go_inval - invalidate the metadata for this glock * @gl: the glock * @flags: * * We never used LM_ST_DEFERRED with resource groups, so that we * should always see the metadata flag set here. * */ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { struct address_space *mapping = gfs2_glock2aspace(gl); BUG_ON(!(flags & DIO_METADATA)); gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); truncate_inode_pages(mapping, 0); if (gl->gl_object) { struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; rgd->rd_flags &= ~GFS2_RDF_UPTODATE; } } /** * inode_go_sync - Sync the dirty data and/or metadata for an inode glock * @gl: the glock protecting the inode * */ static void inode_go_sync(struct gfs2_glock *gl) { struct gfs2_inode *ip = gl->gl_object; struct address_space *metamapping = gfs2_glock2aspace(gl); int error; if (ip && !S_ISREG(ip->i_inode.i_mode)) ip = NULL; if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) return; BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); gfs2_log_flush(gl->gl_sbd, gl); filemap_fdatawrite(metamapping); if (ip) { struct address_space *mapping = ip->i_inode.i_mapping; filemap_fdatawrite(mapping); error = filemap_fdatawait(mapping); mapping_set_error(mapping, error); } error = filemap_fdatawait(metamapping); mapping_set_error(metamapping, error); gfs2_ail_empty_gl(gl); /* * Writeback of the data mapping may cause the dirty flag to be set * so we have to clear it again here. */ smp_mb__before_clear_bit(); clear_bit(GLF_DIRTY, &gl->gl_flags); } /** * inode_go_inval - prepare a inode glock to be released * @gl: the glock * @flags: * * Normally we invlidate everything, but if we are moving into * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we * can keep hold of the metadata, since it won't have changed. * */ static void inode_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_inode *ip = gl->gl_object; gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); if (flags & DIO_METADATA) { struct address_space *mapping = gfs2_glock2aspace(gl); truncate_inode_pages(mapping, 0); if (ip) { set_bit(GIF_INVALID, &ip->i_flags); forget_all_cached_acls(&ip->i_inode); gfs2_dir_hash_inval(ip); } } if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { gfs2_log_flush(gl->gl_sbd, NULL); gl->gl_sbd->sd_rindex_uptodate = 0; } if (ip && S_ISREG(ip->i_inode.i_mode)) truncate_inode_pages(ip->i_inode.i_mapping, 0); } /** * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock * @gl: the glock * * Returns: 1 if it's ok */ static int inode_go_demote_ok(const struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_holder *gh; if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) return 0; if (!list_empty(&gl->gl_holders)) { gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); if (gh->gh_list.next != &gl->gl_holders) return 0; } return 1; } /** * gfs2_set_nlink - Set the inode's link count based on on-disk info * @inode: The inode in question * @nlink: The link count * * If the link count has hit zero, it must never be raised, whatever the * on-disk inode might say. When new struct inodes are created the link * count is set to 1, so that we can safely use this test even when reading * in on disk information for the first time. */ static void gfs2_set_nlink(struct inode *inode, u32 nlink) { /* * We will need to review setting the nlink count here in the * light of the forthcoming ro bind mount work. This is a reminder * to do that. */ if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { if (nlink == 0) clear_nlink(inode); else set_nlink(inode, nlink); } } static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { const struct gfs2_dinode *str = buf; struct timespec atime; u16 height, depth; if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) goto corrupt; ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); ip->i_inode.i_mode = be32_to_cpu(str->di_mode); ip->i_inode.i_rdev = 0; switch (ip->i_inode.i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), be32_to_cpu(str->di_minor)); break; }; ip->i_inode.i_uid = be32_to_cpu(str->di_uid); ip->i_inode.i_gid = be32_to_cpu(str->di_gid); gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); atime.tv_sec = be64_to_cpu(str->di_atime); atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) ip->i_inode.i_atime = atime; ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); ip->i_goal = be64_to_cpu(str->di_goal_meta); ip->i_generation = be64_to_cpu(str->di_generation); ip->i_diskflags = be32_to_cpu(str->di_flags); ip->i_eattr = be64_to_cpu(str->di_eattr); /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ gfs2_set_inode_flags(&ip->i_inode); height = be16_to_cpu(str->di_height); if (unlikely(height > GFS2_MAX_META_HEIGHT)) goto corrupt; ip->i_height = (u8)height; depth = be16_to_cpu(str->di_depth); if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) goto corrupt; ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); if (S_ISREG(ip->i_inode.i_mode)) gfs2_set_aops(&ip->i_inode); return 0; corrupt: gfs2_consist_inode(ip); return -EIO; } /** * gfs2_inode_refresh - Refresh the incore copy of the dinode * @ip: The GFS2 inode * * Returns: errno */ int gfs2_inode_refresh(struct gfs2_inode *ip) { struct buffer_head *dibh; int error; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) return error; if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) { brelse(dibh); return -EIO; } error = gfs2_dinode_in(ip, dibh->b_data); brelse(dibh); clear_bit(GIF_INVALID, &ip->i_flags); return error; } /** * inode_go_lock - operation done after an inode lock is locked by a process * @gl: the glock * @flags: * * Returns: errno */ static int inode_go_lock(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_inode *ip = gl->gl_object; int error = 0; if (!ip || (gh->gh_flags & GL_SKIP)) return 0; if (test_bit(GIF_INVALID, &ip->i_flags)) { error = gfs2_inode_refresh(ip); if (error) return error; } if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && (gl->gl_state == LM_ST_EXCLUSIVE) && (gh->gh_state == LM_ST_EXCLUSIVE)) { spin_lock(&sdp->sd_trunc_lock); if (list_empty(&ip->i_trunc_list)) list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); spin_unlock(&sdp->sd_trunc_lock); wake_up(&sdp->sd_quota_wait); return 1; } return error; } /** * inode_go_dump - print information about an inode * @seq: The iterator * @ip: the inode * * Returns: 0 on success, -ENOBUFS when we run out of space */ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) { const struct gfs2_inode *ip = gl->gl_object; if (ip == NULL) return 0; gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", (unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_addr, IF2DT(ip->i_inode.i_mode), ip->i_flags, (unsigned int)ip->i_diskflags, (unsigned long long)i_size_read(&ip->i_inode)); return 0; } /** * trans_go_sync - promote/demote the transaction glock * @gl: the glock * @state: the requested state * @flags: * */ static void trans_go_sync(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; if (gl->gl_state != LM_ST_UNLOCKED && test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { gfs2_meta_syncfs(sdp); gfs2_log_shutdown(sdp); } } /** * trans_go_xmote_bh - After promoting/demoting the transaction glock * @gl: the glock * */ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) { struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_log_header_host head; int error; if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); error = gfs2_find_jhead(sdp->sd_jdesc, &head); if (error) gfs2_consist(sdp); if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) gfs2_consist(sdp); /* Initialize some head of the log stuff */ if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { sdp->sd_log_sequence = head.lh_sequence + 1; gfs2_log_pointers_init(sdp, head.lh_blkno); } } return 0; } /** * trans_go_demote_ok * @gl: the glock * * Always returns 0 */ static int trans_go_demote_ok(const struct gfs2_glock *gl) { return 0; } /** * iopen_go_callback - schedule the dcache entry for the inode to be deleted * @gl: the glock * * gl_spin lock is held while calling this */ static void iopen_go_callback(struct gfs2_glock *gl) { struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; struct gfs2_sbd *sdp = gl->gl_sbd; if (sdp->sd_vfs->s_flags & MS_RDONLY) return; if (gl->gl_demote_state == LM_ST_UNLOCKED && gl->gl_state == LM_ST_SHARED && ip) { gfs2_glock_hold(gl); if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gfs2_glock_put_nolock(gl); } } const struct gfs2_glock_operations gfs2_meta_glops = { .go_type = LM_TYPE_META, }; const struct gfs2_glock_operations gfs2_inode_glops = { .go_xmote_th = inode_go_sync, .go_inval = inode_go_inval, .go_demote_ok = inode_go_demote_ok, .go_lock = inode_go_lock, .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, .go_flags = GLOF_ASPACE, }; const struct gfs2_glock_operations gfs2_rgrp_glops = { .go_xmote_th = rgrp_go_sync, .go_inval = rgrp_go_inval, .go_lock = gfs2_rgrp_go_lock, .go_unlock = gfs2_rgrp_go_unlock, .go_dump = gfs2_rgrp_dump, .go_type = LM_TYPE_RGRP, .go_flags = GLOF_ASPACE, }; const struct gfs2_glock_operations gfs2_trans_glops = { .go_xmote_th = trans_go_sync, .go_xmote_bh = trans_go_xmote_bh, .go_demote_ok = trans_go_demote_ok, .go_type = LM_TYPE_NONDISK, }; const struct gfs2_glock_operations gfs2_iopen_glops = { .go_type = LM_TYPE_IOPEN, .go_callback = iopen_go_callback, }; const struct gfs2_glock_operations gfs2_flock_glops = { .go_type = LM_TYPE_FLOCK, }; const struct gfs2_glock_operations gfs2_nondisk_glops = { .go_type = LM_TYPE_NONDISK, }; const struct gfs2_glock_operations gfs2_quota_glops = { .go_type = LM_TYPE_QUOTA, }; const struct gfs2_glock_operations gfs2_journal_glops = { .go_type = LM_TYPE_JOURNAL, }; const struct gfs2_glock_operations *gfs2_glops_list[] = { [LM_TYPE_META] = &gfs2_meta_glops, [LM_TYPE_INODE] = &gfs2_inode_glops, [LM_TYPE_RGRP] = &gfs2_rgrp_glops, [LM_TYPE_IOPEN] = &gfs2_iopen_glops, [LM_TYPE_FLOCK] = &gfs2_flock_glops, [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, [LM_TYPE_QUOTA] = &gfs2_quota_glops, [LM_TYPE_JOURNAL] = &gfs2_journal_glops, };
gpl-2.0