repo_name
string
path
string
copies
string
size
string
content
string
license
string
HighwindONE/android_kernel_lge_msm8226
drivers/media/dvb/mantis/hopper_vp3028.c
11278
2297
/* Hopper VP-3028 driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "zl10353.h" #include "mantis_common.h" #include "mantis_ioc.h" #include "mantis_dvb.h" #include "hopper_vp3028.h" struct zl10353_config hopper_vp3028_config = { .demod_address = 0x0f, }; #define MANTIS_MODEL_NAME "VP-3028" #define MANTIS_DEV_TYPE "DVB-T" static int vp3028_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe) { struct i2c_adapter *adapter = &mantis->adapter; struct mantis_hwconfig *config = mantis->hwconfig; int err = 0; mantis_gpio_set_bits(mantis, config->reset, 0); msleep(100); err = mantis_frontend_power(mantis, POWER_ON); msleep(100); mantis_gpio_set_bits(mantis, config->reset, 1); err = mantis_frontend_power(mantis, POWER_ON); if (err == 0) { msleep(250); dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)"); fe = dvb_attach(zl10353_attach, &hopper_vp3028_config, adapter); if (!fe) return -1; } else { dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>", adapter->name, err); return -EIO; } dprintk(MANTIS_ERROR, 1, "Done!"); return 0; } struct mantis_hwconfig vp3028_config = { .model_name = MANTIS_MODEL_NAME, .dev_type = MANTIS_DEV_TYPE, .ts_size = MANTIS_TS_188, .baud_rate = MANTIS_BAUD_9600, .parity = MANTIS_PARITY_NONE, .bytes = 0, .frontend_init = vp3028_frontend_init, .power = GPIF_A00, .reset = GPIF_A03, };
gpl-2.0
finnq/android_kernel_lge_g3
net/irda/irlan/irlan_filter.c
11278
6478
/********************************************************************* * * Filename: irlan_filter.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Fri Jan 29 11:16:38 1999 * Modified at: Sat Oct 30 12:58:45 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/random.h> #include <linux/seq_file.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_filter.h> /* * Function irlan_filter_request (self, skb) * * Handle filter request from client peer device * */ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_operation == DYNAMIC)) { IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n"); self->provider.mac_address[0] = 0x40; self->provider.mac_address[1] = 0x00; self->provider.mac_address[2] = 0x00; self->provider.mac_address[3] = 0x00; /* Use arbitration value to generate MAC address */ if (self->provider.access_type == ACCESS_PEER) { self->provider.mac_address[4] = self->provider.send_arb_val & 0xff; self->provider.mac_address[5] = (self->provider.send_arb_val >> 8) & 0xff; } else { /* Just generate something for now */ get_random_bytes(self->provider.mac_address+4, 1); get_random_bytes(self->provider.mac_address+5, 1); } skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x03; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001); irlan_insert_array_param(skb, "FILTER_ENTRY", self->provider.mac_address, 6); return; } if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == FILTER)) { IRDA_DEBUG(0, "Directed filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == NONE)) { IRDA_DEBUG(0, "Directed filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == FILTER)) { IRDA_DEBUG(0, "Broadcast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == NONE)) { IRDA_DEBUG(0, "Broadcast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == FILTER)) { IRDA_DEBUG(0, "Multicast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == NONE)) { IRDA_DEBUG(0, "Multicast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_operation == GET)) { IRDA_DEBUG(0, "Multicast filter get\n"); skb->data[0] = 0x00; /* Success? */ skb->data[1] = 0x02; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_insert_short_param(skb, "MAX_ENTRY", 16); return; } skb->data[0] = 0x00; /* Command not supported */ skb->data[1] = 0x00; IRDA_DEBUG(0, "Not implemented!\n"); } /* * Function check_request_param (self, param, value) * * Check parameters in request from peer device * */ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); IRDA_DEBUG(4, "%s, %s\n", param, value); /* * This is experimental!! DB. */ if (strcmp(param, "MODE") == 0) { IRDA_DEBUG(0, "%s()\n", __func__ ); self->use_udata = TRUE; return; } /* * FILTER_TYPE */ if (strcmp(param, "FILTER_TYPE") == 0) { if (strcmp(value, "DIRECTED") == 0) { self->provider.filter_type = IRLAN_DIRECTED; return; } if (strcmp(value, "MULTICAST") == 0) { self->provider.filter_type = IRLAN_MULTICAST; return; } if (strcmp(value, "BROADCAST") == 0) { self->provider.filter_type = IRLAN_BROADCAST; return; } } /* * FILTER_MODE */ if (strcmp(param, "FILTER_MODE") == 0) { if (strcmp(value, "ALL") == 0) { self->provider.filter_mode = ALL; return; } if (strcmp(value, "FILTER") == 0) { self->provider.filter_mode = FILTER; return; } if (strcmp(value, "NONE") == 0) { self->provider.filter_mode = FILTER; return; } } /* * FILTER_OPERATION */ if (strcmp(param, "FILTER_OPERATION") == 0) { if (strcmp(value, "DYNAMIC") == 0) { self->provider.filter_operation = DYNAMIC; return; } if (strcmp(value, "GET") == 0) { self->provider.filter_operation = GET; return; } } } /* * Function irlan_print_filter (filter_type, buf) * * Print status of filter. Used by /proc file system * */ #ifdef CONFIG_PROC_FS #define MASK2STR(m,s) { .mask = m, .str = s } void irlan_print_filter(struct seq_file *seq, int filter_type) { static struct { int mask; const char *str; } filter_mask2str[] = { MASK2STR(IRLAN_DIRECTED, "DIRECTED"), MASK2STR(IRLAN_FUNCTIONAL, "FUNCTIONAL"), MASK2STR(IRLAN_GROUP, "GROUP"), MASK2STR(IRLAN_MAC_FRAME, "MAC_FRAME"), MASK2STR(IRLAN_MULTICAST, "MULTICAST"), MASK2STR(IRLAN_BROADCAST, "BROADCAST"), MASK2STR(IRLAN_IPX_SOCKET, "IPX_SOCKET"), MASK2STR(0, NULL) }, *p; for (p = filter_mask2str; p->str; p++) { if (filter_type & p->mask) seq_printf(seq, "%s ", p->str); } seq_putc(seq, '\n'); } #undef MASK2STR #endif
gpl-2.0
nwhusted/audit
auparse/message.c
15
1745
/* message.c -- * Copyright 2004, 2005 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors: * Steve Grubb <sgrubb@redhat.com> */ #include "config.h" #include <stdio.h> #include <stdarg.h> #include "libaudit.h" #include "private.h" /* The message mode refers to where informational messages go 0 - stderr, 1 - syslog, 2 - quiet. The default is quiet. */ static message_t message_mode = MSG_QUIET; static debug_message_t debug_message = DBG_NO; void set_aumessage_mode(message_t mode, debug_message_t debug) { message_mode = mode; debug_message = debug; } void audit_msg(int priority, const char *fmt, ...) { va_list ap; if (message_mode == MSG_QUIET) return; if (priority == LOG_DEBUG && debug_message == DBG_NO) return; va_start(ap, fmt); if (message_mode == MSG_SYSLOG) vsyslog(priority, fmt, ap); else { vfprintf(stderr, fmt, ap); fputc('\n', stderr); } va_end( ap ); }
gpl-2.0
jluissandovalm/lammps_smd
src/USER-OMP/angle_cosine_omp.cpp
15
4511
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- Contributing author: Axel Kohlmeyer (Temple U) ------------------------------------------------------------------------- */ #include "angle_cosine_omp.h" #include "atom.h" #include "comm.h" #include "force.h" #include "neighbor.h" #include "domain.h" #include "math_const.h" #include <math.h> #include "suffix.h" using namespace LAMMPS_NS; using namespace MathConst; #define SMALL 0.001 /* ---------------------------------------------------------------------- */ AngleCosineOMP::AngleCosineOMP(class LAMMPS *lmp) : AngleCosine(lmp), ThrOMP(lmp,THR_ANGLE) { suffix_flag |= Suffix::OMP; } /* ---------------------------------------------------------------------- */ void AngleCosineOMP::compute(int eflag, int vflag) { if (eflag || vflag) { ev_setup(eflag,vflag); } else evflag = 0; const int nall = atom->nlocal + atom->nghost; const int nthreads = comm->nthreads; const int inum = neighbor->nanglelist; #if defined(_OPENMP) #pragma omp parallel default(none) shared(eflag,vflag) #endif { int ifrom, ito, tid; loop_setup_thr(ifrom, ito, tid, inum, nthreads); ThrData *thr = fix->get_thr(tid); ev_setup_thr(eflag, vflag, nall, eatom, vatom, thr); if (inum > 0) { if (evflag) { if (eflag) { if (force->newton_bond) eval<1,1,1>(ifrom, ito, thr); else eval<1,1,0>(ifrom, ito, thr); } else { if (force->newton_bond) eval<1,0,1>(ifrom, ito, thr); else eval<1,0,0>(ifrom, ito, thr); } } else { if (force->newton_bond) eval<0,0,1>(ifrom, ito, thr); else eval<0,0,0>(ifrom, ito, thr); } } reduce_thr(this, eflag, vflag, thr); } // end of omp parallel region } template <int EVFLAG, int EFLAG, int NEWTON_BOND> void AngleCosineOMP::eval(int nfrom, int nto, ThrData * const thr) { int i1,i2,i3,n,type; double delx1,dely1,delz1,delx2,dely2,delz2; double eangle,f1[3],f3[3]; double rsq1,rsq2,r1,r2,c,a,a11,a12,a22; const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0]; dbl3_t * _noalias const f = (dbl3_t *) thr->get_f()[0]; const int4_t * _noalias const anglelist = (int4_t *) neighbor->anglelist[0]; const int nlocal = atom->nlocal; eangle = 0.0; for (n = nfrom; n < nto; n++) { i1 = anglelist[n].a; i2 = anglelist[n].b; i3 = anglelist[n].c; type = anglelist[n].t; // 1st bond delx1 = x[i1].x - x[i2].x; dely1 = x[i1].y - x[i2].y; delz1 = x[i1].z - x[i2].z; rsq1 = delx1*delx1 + dely1*dely1 + delz1*delz1; r1 = sqrt(rsq1); // 2nd bond delx2 = x[i3].x - x[i2].x; dely2 = x[i3].y - x[i2].y; delz2 = x[i3].z - x[i2].z; rsq2 = delx2*delx2 + dely2*dely2 + delz2*delz2; r2 = sqrt(rsq2); // c = cosine of angle c = delx1*delx2 + dely1*dely2 + delz1*delz2; c /= r1*r2; if (c > 1.0) c = 1.0; if (c < -1.0) c = -1.0; // force & energy if (EFLAG) eangle = k[type]*(1.0+c); a = k[type]; a11 = a*c / rsq1; a12 = -a / (r1*r2); a22 = a*c / rsq2; f1[0] = a11*delx1 + a12*delx2; f1[1] = a11*dely1 + a12*dely2; f1[2] = a11*delz1 + a12*delz2; f3[0] = a22*delx2 + a12*delx1; f3[1] = a22*dely2 + a12*dely1; f3[2] = a22*delz2 + a12*delz1; // apply force to each of 3 atoms if (NEWTON_BOND || i1 < nlocal) { f[i1].x += f1[0]; f[i1].y += f1[1]; f[i1].z += f1[2]; } if (NEWTON_BOND || i2 < nlocal) { f[i2].x -= f1[0] + f3[0]; f[i2].y -= f1[1] + f3[1]; f[i2].z -= f1[2] + f3[2]; } if (NEWTON_BOND || i3 < nlocal) { f[i3].x += f3[0]; f[i3].y += f3[1]; f[i3].z += f3[2]; } if (EVFLAG) ev_tally_thr(this,i1,i2,i3,nlocal,NEWTON_BOND,eangle,f1,f3, delx1,dely1,delz1,delx2,dely2,delz2,thr); } }
gpl-2.0
Lachann/gst-plugins-bad
gst-libs/gst/gl/win32/gstglcontext_wgl.c
15
8884
/* * GStreamer * Copyright (C) 2008 Julien Isorce <julien.isorce@gmail.com> * Copyright (C) 2012 Matthew Waters <ystreet00@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <gst/gst.h> #include "../gstgl_fwd.h" #include <gst/gl/gstglcontext.h> #include "gstglcontext_wgl.h" #include <GL/wglext.h> #define gst_gl_context_wgl_parent_class parent_class G_DEFINE_TYPE (GstGLContextWGL, gst_gl_context_wgl, GST_GL_TYPE_CONTEXT); static guintptr gst_gl_context_wgl_get_gl_context (GstGLContext * context); static void gst_gl_context_wgl_swap_buffers (GstGLContext * context); static gboolean gst_gl_context_wgl_choose_format (GstGLContext * context, GError ** error); static gboolean gst_gl_context_wgl_activate (GstGLContext * context, gboolean activate); static gboolean gst_gl_context_wgl_create_context (GstGLContext * context, GstGLAPI gl_api, GstGLContext * other_context, GError ** error); static void gst_gl_context_wgl_destroy_context (GstGLContext * context); GstGLAPI gst_gl_context_wgl_get_gl_api (GstGLContext * context); static GstGLPlatform gst_gl_context_wgl_get_gl_platform (GstGLContext * context); static gpointer gst_gl_context_wgl_get_proc_address (GstGLContext * context, const gchar * name); static void gst_gl_context_wgl_class_init (GstGLContextWGLClass * klass) { GstGLContextClass *context_class = (GstGLContextClass *) klass; context_class->get_gl_context = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_get_gl_context); context_class->choose_format = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_choose_format); context_class->activate = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_activate); context_class->create_context = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_create_context); context_class->destroy_context = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_destroy_context); context_class->swap_buffers = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_swap_buffers); context_class->get_proc_address = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_get_proc_address); context_class->get_gl_api = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_get_gl_api); context_class->get_gl_platform = GST_DEBUG_FUNCPTR (gst_gl_context_wgl_get_gl_platform); } static void gst_gl_context_wgl_init (GstGLContextWGL * context_wgl) { } /* Must be called in the gl thread */ GstGLContextWGL * gst_gl_context_wgl_new (void) { GstGLContextWGL *context = g_object_new (GST_GL_TYPE_CONTEXT_WGL, NULL); return context; } static gboolean gst_gl_context_wgl_create_context (GstGLContext * context, GstGLAPI gl_api, GstGLContext * other_context, GError ** error) { GstGLWindow *window; GstGLContextWGL *context_wgl; HGLRC external_gl_context = NULL; PFNWGLCREATECONTEXTATTRIBSARBPROC wglCreateContextAttribsARB = NULL; HDC device; context_wgl = GST_GL_CONTEXT_WGL (context); window = gst_gl_context_get_window (context); device = (HDC) gst_gl_window_get_display (window); if (other_context) { if (gst_gl_context_get_gl_platform (other_context) != GST_GL_PLATFORM_WGL) { g_set_error (error, GST_GL_CONTEXT_ERROR, GST_GL_CONTEXT_ERROR_WRONG_CONFIG, "Cannot share context with a non-WGL context"); goto failure; } external_gl_context = (HGLRC) gst_gl_context_get_gl_context (other_context); } context_wgl->wgl_context = wglCreateContext (device); if (context_wgl->wgl_context) GST_DEBUG ("gl context created: %" G_GUINTPTR_FORMAT, (guintptr) context_wgl->wgl_context); else { g_set_error (error, GST_GL_CONTEXT_ERROR, GST_GL_CONTEXT_ERROR_CREATE_CONTEXT, "failed to create glcontext:0x%x", (unsigned int) GetLastError ()); goto failure; } g_assert (context_wgl->wgl_context); if (external_gl_context) { wglMakeCurrent (device, context_wgl->wgl_context); wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC) wglGetProcAddress ("wglCreateContextAttribsARB"); if (wglCreateContextAttribsARB != NULL) { wglMakeCurrent (device, 0); wglDeleteContext (context_wgl->wgl_context); context_wgl->wgl_context = wglCreateContextAttribsARB (device, external_gl_context, 0); if (context_wgl->wgl_context == NULL) { g_set_error (error, GST_GL_CONTEXT_ERROR, GST_GL_CONTEXT_ERROR_CREATE_CONTEXT, "failed to share context through wglCreateContextAttribsARB 0x%x", (unsigned int) GetLastError ()); goto failure; } } else if (!wglShareLists (external_gl_context, context_wgl->wgl_context)) { g_set_error (error, GST_GL_CONTEXT_ERROR, GST_GL_CONTEXT_ERROR_CREATE_CONTEXT, "failed to share contexts through wglShareLists 0x%x", (unsigned int) GetLastError ()); goto failure; } } GST_LOG ("gl context id: %" G_GUINTPTR_FORMAT, (guintptr) context_wgl->wgl_context); gst_object_unref (window); return TRUE; failure: gst_object_unref (window); return FALSE; } static void gst_gl_context_wgl_destroy_context (GstGLContext * context) { GstGLContextWGL *context_wgl; context_wgl = GST_GL_CONTEXT_WGL (context); if (context_wgl->wgl_context) wglDeleteContext (context_wgl->wgl_context); context_wgl->wgl_context = NULL; } static gboolean gst_gl_context_wgl_choose_format (GstGLContext * context, GError ** error) { GstGLWindow *window; PIXELFORMATDESCRIPTOR pfd; gint pixelformat = 0; gboolean res = FALSE; HDC device; window = gst_gl_context_get_window (context); gst_gl_window_win32_create_window (GST_GL_WINDOW_WIN32 (window), error); device = (HDC) gst_gl_window_get_display (window); gst_object_unref (window); pfd.nSize = sizeof (PIXELFORMATDESCRIPTOR); pfd.nVersion = 1; pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 24; pfd.cRedBits = 8; pfd.cRedShift = 0; pfd.cGreenBits = 8; pfd.cGreenShift = 0; pfd.cBlueBits = 8; pfd.cBlueShift = 0; pfd.cAlphaBits = 0; pfd.cAlphaShift = 0; pfd.cAccumBits = 0; pfd.cAccumRedBits = 0; pfd.cAccumGreenBits = 0; pfd.cAccumBlueBits = 0; pfd.cAccumAlphaBits = 0; pfd.cDepthBits = 24; pfd.cStencilBits = 8; pfd.cAuxBuffers = 0; pfd.iLayerType = PFD_MAIN_PLANE; pfd.bReserved = 0; pfd.dwLayerMask = 0; pfd.dwVisibleMask = 0; pfd.dwDamageMask = 0; pfd.cColorBits = (BYTE) GetDeviceCaps (device, BITSPIXEL); pixelformat = ChoosePixelFormat (device, &pfd); if (!pixelformat) { g_set_error (error, GST_GL_CONTEXT_ERROR, GST_GL_CONTEXT_ERROR_FAILED, "Failed to choose a pixel format"); return FALSE; } res = SetPixelFormat (device, pixelformat, &pfd); return res; } static void gst_gl_context_wgl_swap_buffers (GstGLContext * context) { GstGLWindow *window = gst_gl_context_get_window (context); HDC device = (HDC) gst_gl_window_get_display (window); SwapBuffers (device); gst_object_unref (window); } static guintptr gst_gl_context_wgl_get_gl_context (GstGLContext * context) { return (guintptr) GST_GL_CONTEXT_WGL (context)->wgl_context; } static gboolean gst_gl_context_wgl_activate (GstGLContext * context, gboolean activate) { GstGLWindow *window; GstGLContextWGL *context_wgl; HDC device; gboolean result; window = gst_gl_context_get_window (context); context_wgl = GST_GL_CONTEXT_WGL (context); device = (HDC) gst_gl_window_get_display (window); if (activate) { result = wglMakeCurrent (device, context_wgl->wgl_context); } else { result = wglMakeCurrent (NULL, NULL); } gst_object_unref (window); return result; } GstGLAPI gst_gl_context_wgl_get_gl_api (GstGLContext * context) { return GST_GL_API_OPENGL; } static GstGLPlatform gst_gl_context_wgl_get_gl_platform (GstGLContext * context) { return GST_GL_PLATFORM_WGL; } static gpointer gst_gl_context_wgl_get_proc_address (GstGLContext * context, const gchar * name) { gpointer result; if (!(result = gst_gl_context_default_get_proc_address (context, name))) { result = wglGetProcAddress ((LPCSTR) name); } return result; }
gpl-2.0
billy-wang/smdkc110-Gingerbread-u-boot
drivers/pcmcia/tqm8xx_pcmcia.c
15
7818
/* -------------------------------------------------------------------- */ /* TQM8xxL Boards by TQ Components */ /* SC8xx Boards by SinoVee Microsystems */ /* -------------------------------------------------------------------- */ #include <common.h> #ifdef CONFIG_8xx #include <mpc8xx.h> #endif #include <pcmcia.h> #undef CONFIG_PCMCIA #if defined(CONFIG_CMD_PCMCIA) #define CONFIG_PCMCIA #endif #if defined(CONFIG_CMD_IDE) && defined(CONFIG_IDE_8xx_PCCARD) #define CONFIG_PCMCIA #endif #if defined(CONFIG_PCMCIA) \ && (defined(CONFIG_TQM8xxL) || defined(CONFIG_SVM_SC8xx)) #if defined(CONFIG_VIRTLAB2) #define PCMCIA_BOARD_MSG "Virtlab2" #elif defined(CONFIG_TQM8xxL) #define PCMCIA_BOARD_MSG "TQM8xxL" #elif defined(CONFIG_SVM_SC8xx) #define PCMCIA_BOARD_MSG "SC8xx" #endif #if defined(CONFIG_NSCU) #define power_config(slot) do {} while (0) #define power_off(slot) do {} while (0) #define power_on_5_0(slot) do {} while (0) #define power_on_3_3(slot) do {} while (0) #elif defined(CONFIG_HMI10) static inline void power_config(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; /* * Configure Port B pins for * 5 Volts Enable and 3 Volts enable */ immap->im_cpm.cp_pbpar &= ~(0x00000300); } static inline void power_off(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; /* remove all power */ immap->im_cpm.cp_pbdat |= 0x00000300; } static inline void power_on_5_0(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; immap->im_cpm.cp_pbdat &= ~(0x0000100); immap->im_cpm.cp_pbdir |= 0x00000300; } static inline void power_on_3_3(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; immap->im_cpm.cp_pbdat &= ~(0x0000200); immap->im_cpm.cp_pbdir |= 0x00000300; } #elif defined(CONFIG_VIRTLAB2) #define power_config(slot) do {} while (0) static inline void power_off(int slot) { volatile unsigned char *powerctl = (volatile unsigned char *)PCMCIA_CTRL; *powerctl = 0; } static inline void power_on_5_0(int slot) { volatile unsigned char *powerctl = (volatile unsigned char *)PCMCIA_CTRL; *powerctl = 2; /* Enable 5V Vccout */ } static inline void power_on_3_3(int slot) { volatile unsigned char *powerctl = (volatile unsigned char *)PCMCIA_CTRL; *powerctl = 1; /* Enable 3.3V Vccout */ } #else static inline void power_config(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; /* * Configure Port C pins for * 5 Volts Enable and 3 Volts enable */ immap->im_ioport.iop_pcpar &= ~(0x0002 | 0x0004); immap->im_ioport.iop_pcso &= ~(0x0002 | 0x0004); } static inline void power_off(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; immap->im_ioport.iop_pcdat &= ~(0x0002 | 0x0004); } static inline void power_on_5_0(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; immap->im_ioport.iop_pcdat |= 0x0004; immap->im_ioport.iop_pcdir |= (0x0002 | 0x0004); } static inline void power_on_3_3(int slot) { volatile immap_t *immap = (immap_t *)CFG_IMMR; immap->im_ioport.iop_pcdat |= 0x0002; immap->im_ioport.iop_pcdir |= (0x0002 | 0x0004); } #endif #ifdef CONFIG_HMI10 static inline int check_card_is_absent(int slot) { volatile pcmconf8xx_t *pcmp = (pcmconf8xx_t *)(&(((immap_t *)CFG_IMMR)->im_pcmcia)); return pcmp->pcmc_pipr & (0x10000000 >> (slot << 4)); } #else static inline int check_card_is_absent(int slot) { volatile pcmconf8xx_t *pcmp = (pcmconf8xx_t *)(&(((immap_t *)CFG_IMMR)->im_pcmcia)); return pcmp->pcmc_pipr & (0x18000000 >> (slot << 4)); } #endif #ifdef NSCU_OE_INV #define NSCU_GCRX_CXOE 0 #else #define NSCU_GCRX_CXOE __MY_PCMCIA_GCRX_CXOE #endif int pcmcia_hardware_enable(int slot) { volatile pcmconf8xx_t *pcmp = (pcmconf8xx_t *)(&(((immap_t *)CFG_IMMR)->im_pcmcia)); volatile sysconf8xx_t *sysp = (sysconf8xx_t *)(&(((immap_t *)CFG_IMMR)->im_siu_conf)); uint reg, mask; debug ("hardware_enable: " PCMCIA_BOARD_MSG " Slot %c\n", 'A'+slot); udelay(10000); /* * Configure SIUMCR to enable PCMCIA port B * (VFLS[0:1] are not used for debugging, we connect FRZ# instead) */ sysp->sc_siumcr &= ~SIUMCR_DBGC11; /* set DBGC to 00 */ /* clear interrupt state, and disable interrupts */ pcmp->pcmc_pscr = PCMCIA_MASK(slot); pcmp->pcmc_per &= ~PCMCIA_MASK(slot); /* * Disable interrupts, DMA, and PCMCIA buffers * (isolate the interface) and assert RESET signal */ debug ("Disable PCMCIA buffers and assert RESET\n"); reg = 0; reg |= __MY_PCMCIA_GCRX_CXRESET; /* active high */ reg |= NSCU_GCRX_CXOE; PCMCIA_PGCRX(slot) = reg; udelay(500); power_config(slot); power_off(slot); /* * Make sure there is a card in the slot, then configure the interface. */ udelay(10000); debug ("[%d] %s: PIPR(%p)=0x%x\n", __LINE__,__FUNCTION__, &(pcmp->pcmc_pipr),pcmp->pcmc_pipr); if (check_card_is_absent(slot)) { printf (" No Card found\n"); return (1); } /* * Power On. */ mask = PCMCIA_VS1(slot) | PCMCIA_VS2(slot); reg = pcmp->pcmc_pipr; debug ("PIPR: 0x%x ==> VS1=o%s, VS2=o%s\n", reg, (reg&PCMCIA_VS1(slot))?"n":"ff", (reg&PCMCIA_VS2(slot))?"n":"ff"); if ((reg & mask) == mask) { power_on_5_0(slot); puts (" 5.0V card found: "); } else { power_on_3_3(slot); puts (" 3.3V card found: "); } #if 0 /* VCC switch error flag, PCMCIA slot INPACK_ pin */ cp->cp_pbdir &= ~(0x0020 | 0x0010); cp->cp_pbpar &= ~(0x0020 | 0x0010); udelay(500000); #endif udelay(1000); debug ("Enable PCMCIA buffers and stop RESET\n"); reg = PCMCIA_PGCRX(slot); reg &= ~__MY_PCMCIA_GCRX_CXRESET; /* active high */ reg |= __MY_PCMCIA_GCRX_CXOE; /* active low */ reg &= ~NSCU_GCRX_CXOE; PCMCIA_PGCRX(slot) = reg; udelay(250000); /* some cards need >150 ms to come up :-( */ debug ("# hardware_enable done\n"); return (0); } #if defined(CONFIG_CMD_PCMCIA) int pcmcia_hardware_disable(int slot) { u_long reg; debug ("hardware_disable: " PCMCIA_BOARD_MSG " Slot %c\n", 'A'+slot); /* remove all power */ power_off(slot); debug ("Disable PCMCIA buffers and assert RESET\n"); reg = 0; reg |= __MY_PCMCIA_GCRX_CXRESET; /* active high */ reg |= NSCU_GCRX_CXOE; /* active low */ PCMCIA_PGCRX(slot) = reg; udelay(10000); return (0); } #endif int pcmcia_voltage_set(int slot, int vcc, int vpp) { #ifndef CONFIG_NSCU u_long reg; # ifdef DEBUG volatile pcmconf8xx_t *pcmp = (pcmconf8xx_t *)(&(((immap_t *)CFG_IMMR)->im_pcmcia)); # endif debug ("voltage_set: " PCMCIA_BOARD_MSG " Slot %c, Vcc=%d.%d, Vpp=%d.%d\n", 'A'+slot, vcc/10, vcc%10, vpp/10, vcc%10); /* * Disable PCMCIA buffers (isolate the interface) * and assert RESET signal */ debug ("Disable PCMCIA buffers and assert RESET\n"); reg = PCMCIA_PGCRX(slot); reg |= __MY_PCMCIA_GCRX_CXRESET; /* active high */ reg &= ~__MY_PCMCIA_GCRX_CXOE; /* active low */ reg |= NSCU_GCRX_CXOE; /* active low */ PCMCIA_PGCRX(slot) = reg; udelay(500); debug ("PCMCIA power OFF\n"); power_config(slot); power_off(slot); switch(vcc) { case 0: break; case 33: power_on_3_3(slot); break; case 50: power_on_5_0(slot); break; default: goto done; } /* Checking supported voltages */ debug("PIPR: 0x%x --> %s\n", pcmp->pcmc_pipr, (pcmp->pcmc_pipr & 0x00008000) ? "only 5 V" : "can do 3.3V"); if (vcc) debug("PCMCIA powered at %sV\n", (vcc == 50) ? "5.0" : "3.3"); else debug("PCMCIA powered down\n"); done: debug("Enable PCMCIA buffers and stop RESET\n"); reg = PCMCIA_PGCRX(slot); reg &= ~__MY_PCMCIA_GCRX_CXRESET; /* active high */ reg |= __MY_PCMCIA_GCRX_CXOE; /* active low */ reg &= ~NSCU_GCRX_CXOE; /* active low */ PCMCIA_PGCRX(slot) = reg; udelay(500); debug("voltage_set: " PCMCIA_BOARD_MSG " Slot %c, DONE\n", slot+'A'); #endif /* CONFIG_NSCU */ return (0); } #endif /* CONFIG_PCMCIA && (CONFIG_TQM8xxL || CONFIG_SVM_SC8xx) */
gpl-2.0
DeqingSun/Glass_kernel
drivers/mfd/palmas-poweroff.c
15
4126
/* * Palmas s/w power OFF support * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/bug.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/mfd/palmas.h> /* One instance of palmas supported */ static struct palmas *my_palmas; /* * Allow to differentiate between MFD removal as part of shutdown * and driver removal */ static bool palmas_driver_removal; /** * palmas_poweroff_write() - write to palmas register * @base: module base * @reg: register * @mask: mask to use * @val: value to write */ static int palmas_poweroff_write(unsigned int base, unsigned int reg, unsigned int mask, unsigned int val) { struct palmas *palmas = my_palmas; unsigned int addr; int slave; int r; slave = PALMAS_BASE_TO_SLAVE(base); addr = PALMAS_BASE_TO_REG(base, reg); r = regmap_update_bits(palmas->regmap[slave], addr, mask, val); return r; } /** * palmas_poweroff() - power off palmas * * Power OFF palmas, but allow VBUS to wakeup Palmas to allow * basic things like charging, flashing etc. */ static void palmas_poweroff(void) { int ret; /* At least 1 palmas device needed to shutdown */ if (!my_palmas) { pr_err("%s: NO PALMAS device registered!\n", __func__); goto out; } /* Unmask VBUSIRQ so that USB power can wakeup device after shutdown */ ret = palmas_poweroff_write(PALMAS_INTERRUPT_BASE, PALMAS_INT3_MASK, INT3_MASK_VBUS, 0x0); if (ret) pr_err("%s: Unable to clear VBUS in INT3_MASK: %d\n", __func__, ret); /* Now, shutdown palmas */ ret = palmas_poweroff_write(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL, DEV_CTRL_SW_RST | DEV_CTRL_DEV_ON, 0x0); if (ret) pr_err("%s: Unable to write to DEV_CTRL_SW_RST: %d\n", __func__, ret); /* arbitary 100ms delay to ensure shutdown */ mdelay(100); out: /* Hope the reset takes care of things */ pr_err("%s: Palmas did not shutdown!!!\n", __func__); BUG(); return; } static int __devinit palmas_poweroff_probe(struct platform_device *pdev) { struct palmas *palmas = dev_get_drvdata(pdev->dev.parent); int ret = -EINVAL; /* Only one palmas can be allowed by poweroff */ if (my_palmas) goto out; my_palmas = palmas; /* Keep device in active mode */ ret = palmas_poweroff_write(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL, DEV_CTRL_DEV_ON, DEV_CTRL_DEV_ON); if (ret) pr_err("%s: Unable to write to DEV_CTRL_DEV_ON: %d\n", __func__, ret); /* Fall through */ if (ret) my_palmas = NULL; else pm_power_off = palmas_poweroff; out: return ret; } static int __devexit palmas_poweroff_remove(struct platform_device *pdev) { /* * If my module is not really getting removed, dont reset ptrs yet. * we *do* want the handlers to exist to allow system to shutdown */ if (!palmas_driver_removal) return 0; pm_power_off = NULL; my_palmas = NULL; return 0; } static struct platform_driver palmas_poweroff_driver = { .probe = palmas_poweroff_probe, .remove = __devexit_p(palmas_poweroff_remove), .driver = { .name = "palmas-poweroff", .owner = THIS_MODULE, }, }; static int __init palmas_poweroff_init(void) { return platform_driver_register(&palmas_poweroff_driver); } module_init(palmas_poweroff_init); static void __exit palmas_poweroff_exit(void) { palmas_driver_removal = true; platform_driver_unregister(&palmas_poweroff_driver); } module_exit(palmas_poweroff_exit); MODULE_AUTHOR("Nishanth Menon <nm@ti.com>"); MODULE_DESCRIPTION("Palmas poweroff driver"); MODULE_ALIAS("platform:palmas-poweroff"); MODULE_LICENSE("GPL");
gpl-2.0
fredericgermain/linux-sunxi
sound/soc/soc-devres.c
271
3187
/* * soc-devres.c -- ALSA SoC Audio Layer devres functions * * Copyright (C) 2013 Linaro Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> static void devm_component_release(struct device *dev, void *res) { snd_soc_unregister_component(*(struct device **)res); } /** * devm_snd_soc_register_component - resource managed component registration * @dev: Device used to manage component * @cmpnt_drv: Component driver * @dai_drv: DAI driver * @num_dai: Number of DAIs to register * * Register a component with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { struct device **ptr; int ret; ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_component); static void devm_card_release(struct device *dev, void *res) { snd_soc_unregister_card(*(struct snd_soc_card **)res); } /** * devm_snd_soc_register_card - resource managed card registration * @dev: Device used to manage card * @card: Card to register * * Register a card with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) { struct snd_soc_card **ptr; int ret; ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_card(card); if (ret == 0) { *ptr = card; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_card); #ifdef CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM static void devm_dmaengine_pcm_release(struct device *dev, void *res) { snd_dmaengine_pcm_unregister(*(struct device **)res); } /** * devm_snd_dmaengine_pcm_register - resource managed dmaengine PCM registration * @dev: The parent device for the PCM device * @config: Platform specific PCM configuration * @flags: Platform specific quirks * * Register a dmaengine based PCM device with automatic unregistration when the * device is unregistered. */ int devm_snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { struct device **ptr; int ret; ptr = devres_alloc(devm_dmaengine_pcm_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_dmaengine_pcm_register(dev, config, flags); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_dmaengine_pcm_register); #endif
gpl-2.0
zzyjsjcom/linux-3.18.11-zzy
drivers/mtd/maps/ixp4xx.c
527
6118
/* * drivers/mtd/maps/ixp4xx.c * * MTD Map file for IXP4XX based systems. Please do not make per-board * changes in here. If your board needs special setup, do it in your * platform level code in arch/arm/mach-ixp4xx/board-setup.c * * Original Author: Intel Corporation * Maintainer: Deepak Saxena <dsaxena@mvista.com> * * Copyright (C) 2002 Intel Corporation * Copyright (C) 2003-2004 MontaVista Software, Inc. * */ #include <linux/err.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <asm/mach/flash.h> #include <linux/reboot.h> /* * Read/write a 16 bit word from flash address 'addr'. * * When the cpu is in little-endian mode it swizzles the address lines * ('address coherency') so we need to undo the swizzling to ensure commands * and the like end up on the correct flash address. * * To further complicate matters, due to the way the expansion bus controller * handles 32 bit reads, the byte stream ABCD is stored on the flash as: * D15 D0 * +---+---+ * | A | B | 0 * +---+---+ * | C | D | 2 * +---+---+ * This means that on LE systems each 16 bit word must be swapped. Note that * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI * data and other flash commands which are always in D7-D0. */ #ifndef __ARMEB__ #ifndef CONFIG_MTD_CFI_BE_BYTE_SWAP # error CONFIG_MTD_CFI_BE_BYTE_SWAP required #endif static inline u16 flash_read16(void __iomem *addr) { return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2))); } static inline void flash_write16(u16 d, void __iomem *addr) { __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2)); } #define BYTE0(h) ((h) & 0xFF) #define BYTE1(h) (((h) >> 8) & 0xFF) #else static inline u16 flash_read16(const void __iomem *addr) { return __raw_readw(addr); } static inline void flash_write16(u16 d, void __iomem *addr) { __raw_writew(d, addr); } #define BYTE0(h) (((h) >> 8) & 0xFF) #define BYTE1(h) ((h) & 0xFF) #endif static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = flash_read16(map->virt + ofs); return val; } /* * The IXP4xx expansion bus only allows 16-bit wide acceses * when attached to a 16-bit wide device (such as the 28F128J3A), * so we can't just memcpy_fromio(). */ static void ixp4xx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { u8 *dest = (u8 *) to; void __iomem *src = map->virt + from; if (len <= 0) return; if (from & 1) { *dest++ = BYTE1(flash_read16(src-1)); src++; --len; } while (len >= 2) { u16 data = flash_read16(src); *dest++ = BYTE0(data); *dest++ = BYTE1(data); src += 2; len -= 2; } if (len > 0) *dest++ = BYTE0(flash_read16(src)); } /* * Unaligned writes are ignored, causing the 8-bit * probe to fail and proceed to the 16-bit probe (which succeeds). */ static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr) { if (!(adr & 1)) flash_write16(d.x[0], map->virt + adr); } /* * Fast write16 function without the probing check above */ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr) { flash_write16(d.x[0], map->virt + adr); } struct ixp4xx_flash_info { struct mtd_info *mtd; struct map_info map; struct resource *res; }; static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int ixp4xx_flash_remove(struct platform_device *dev) { struct flash_platform_data *plat = dev_get_platdata(&dev->dev); struct ixp4xx_flash_info *info = platform_get_drvdata(dev); if(!info) return 0; if (info->mtd) { mtd_device_unregister(info->mtd); map_destroy(info->mtd); } if (plat->exit) plat->exit(); return 0; } static int ixp4xx_flash_probe(struct platform_device *dev) { struct flash_platform_data *plat = dev_get_platdata(&dev->dev); struct ixp4xx_flash_info *info; struct mtd_part_parser_data ppdata = { .origin = dev->resource->start, }; int err = -1; if (!plat) return -ENODEV; if (plat->init) { err = plat->init(); if (err) return err; } info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info), GFP_KERNEL); if(!info) { err = -ENOMEM; goto Error; } platform_set_drvdata(dev, info); /* * Tell the MTD layer we're not 1:1 mapped so that it does * not attempt to do a direct access on us. */ info->map.phys = NO_XIP; info->map.size = resource_size(dev->resource); /* * We only support 16-bit accesses for now. If and when * any board use 8-bit access, we'll fixup the driver to * handle that. */ info->map.bankwidth = 2; info->map.name = dev_name(&dev->dev); info->map.read = ixp4xx_read16; info->map.write = ixp4xx_probe_write16; info->map.copy_from = ixp4xx_copy_from; info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource); if (IS_ERR(info->map.virt)) { err = PTR_ERR(info->map.virt); goto Error; } info->mtd = do_map_probe(plat->map_name, &info->map); if (!info->mtd) { printk(KERN_ERR "IXP4XXFlash: map_probe failed\n"); err = -ENXIO; goto Error; } info->mtd->owner = THIS_MODULE; /* Use the fast version */ info->map.write = ixp4xx_write16; err = mtd_device_parse_register(info->mtd, probes, &ppdata, plat->parts, plat->nr_parts); if (err) { printk(KERN_ERR "Could not parse partitions\n"); goto Error; } return 0; Error: ixp4xx_flash_remove(dev); return err; } static struct platform_driver ixp4xx_flash_driver = { .probe = ixp4xx_flash_probe, .remove = ixp4xx_flash_remove, .driver = { .name = "IXP4XX-Flash", .owner = THIS_MODULE, }, }; module_platform_driver(ixp4xx_flash_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems"); MODULE_AUTHOR("Deepak Saxena"); MODULE_ALIAS("platform:IXP4XX-Flash");
gpl-2.0
dan82840/Netgear-RBR40
git_home/linux.git/drivers/rtc/rtc-m41t80.c
527
18928
/* * I2C client/driver for the ST M41T80 family of i2c rtc chips. * * Author: Alexander Bigga <ab@mycable.de> * * Based on m41t00.c by Mark A. Greer <mgreer@mvista.com> * * 2006 (c) mycable GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/bcd.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/string.h> #ifdef CONFIG_RTC_DRV_M41T80_WDT #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/miscdevice.h> #include <linux/reboot.h> #include <linux/watchdog.h> #endif #define M41T80_REG_SSEC 0 #define M41T80_REG_SEC 1 #define M41T80_REG_MIN 2 #define M41T80_REG_HOUR 3 #define M41T80_REG_WDAY 4 #define M41T80_REG_DAY 5 #define M41T80_REG_MON 6 #define M41T80_REG_YEAR 7 #define M41T80_REG_ALARM_MON 0xa #define M41T80_REG_ALARM_DAY 0xb #define M41T80_REG_ALARM_HOUR 0xc #define M41T80_REG_ALARM_MIN 0xd #define M41T80_REG_ALARM_SEC 0xe #define M41T80_REG_FLAGS 0xf #define M41T80_REG_SQW 0x13 #define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1) #define M41T80_ALARM_REG_SIZE \ (M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON) #define M41T80_SEC_ST (1 << 7) /* ST: Stop Bit */ #define M41T80_ALMON_AFE (1 << 7) /* AFE: AF Enable Bit */ #define M41T80_ALMON_SQWE (1 << 6) /* SQWE: SQW Enable Bit */ #define M41T80_ALHOUR_HT (1 << 6) /* HT: Halt Update Bit */ #define M41T80_FLAGS_AF (1 << 6) /* AF: Alarm Flag Bit */ #define M41T80_FLAGS_BATT_LOW (1 << 4) /* BL: Battery Low Bit */ #define M41T80_WATCHDOG_RB2 (1 << 7) /* RB: Watchdog resolution */ #define M41T80_WATCHDOG_RB1 (1 << 1) /* RB: Watchdog resolution */ #define M41T80_WATCHDOG_RB0 (1 << 0) /* RB: Watchdog resolution */ #define M41T80_FEATURE_HT (1 << 0) /* Halt feature */ #define M41T80_FEATURE_BL (1 << 1) /* Battery low indicator */ #define M41T80_FEATURE_SQ (1 << 2) /* Squarewave feature */ #define M41T80_FEATURE_WD (1 << 3) /* Extra watchdog resolution */ #define M41T80_FEATURE_SQ_ALT (1 << 4) /* RSx bits are in reg 4 */ #define DRV_VERSION "0.05" static DEFINE_MUTEX(m41t80_rtc_mutex); static const struct i2c_device_id m41t80_id[] = { { "m41t62", M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT }, { "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD }, { "m41t80", M41T80_FEATURE_SQ }, { "m41t81", M41T80_FEATURE_HT | M41T80_FEATURE_SQ}, { "m41t81s", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ }, { "m41t82", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ }, { "m41t83", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ }, { "m41st84", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ }, { "m41st85", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ }, { "m41st87", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ }, { } }; MODULE_DEVICE_TABLE(i2c, m41t80_id); struct m41t80_data { u8 features; struct rtc_device *rtc; }; static int m41t80_get_datetime(struct i2c_client *client, struct rtc_time *tm) { u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC }; struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = dt_addr, }, { .addr = client->addr, .flags = I2C_M_RD, .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC, .buf = buf + M41T80_REG_SEC, }, }; if (i2c_transfer(client->adapter, msgs, 2) < 0) { dev_err(&client->dev, "read error\n"); return -EIO; } tm->tm_sec = bcd2bin(buf[M41T80_REG_SEC] & 0x7f); tm->tm_min = bcd2bin(buf[M41T80_REG_MIN] & 0x7f); tm->tm_hour = bcd2bin(buf[M41T80_REG_HOUR] & 0x3f); tm->tm_mday = bcd2bin(buf[M41T80_REG_DAY] & 0x3f); tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07; tm->tm_mon = bcd2bin(buf[M41T80_REG_MON] & 0x1f) - 1; /* assume 20YY not 19YY, and ignore the Century Bit */ tm->tm_year = bcd2bin(buf[M41T80_REG_YEAR]) + 100; return rtc_valid_tm(tm); } /* Sets the given date and time to the real time clock. */ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) { u8 wbuf[1 + M41T80_DATETIME_REG_SIZE]; u8 *buf = &wbuf[1]; u8 dt_addr[1] = { M41T80_REG_SEC }; struct i2c_msg msgs_in[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = dt_addr, }, { .addr = client->addr, .flags = I2C_M_RD, .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC, .buf = buf + M41T80_REG_SEC, }, }; struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1 + M41T80_DATETIME_REG_SIZE, .buf = wbuf, }, }; /* Read current reg values into buf[1..7] */ if (i2c_transfer(client->adapter, msgs_in, 2) < 0) { dev_err(&client->dev, "read error\n"); return -EIO; } wbuf[0] = 0; /* offset into rtc's regs */ /* Merge time-data and register flags into buf[0..7] */ buf[M41T80_REG_SSEC] = 0; buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f); buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f); buf[M41T80_REG_HOUR] = bin2bcd(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f); buf[M41T80_REG_WDAY] = (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07); buf[M41T80_REG_DAY] = bin2bcd(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f); buf[M41T80_REG_MON] = bin2bcd(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f); /* assume 20YY not 19YY */ buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year % 100); if (i2c_transfer(client->adapter, msgs, 1) != 1) { dev_err(&client->dev, "write error\n"); return -EIO; } return 0; } #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq) { struct i2c_client *client = to_i2c_client(dev); struct m41t80_data *clientdata = i2c_get_clientdata(client); u8 reg; if (clientdata->features & M41T80_FEATURE_BL) { reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS); seq_printf(seq, "battery\t\t: %s\n", (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok"); } return 0; } #else #define m41t80_rtc_proc NULL #endif static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm) { return m41t80_get_datetime(to_i2c_client(dev), tm); } static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm) { return m41t80_set_datetime(to_i2c_client(dev), tm); } /* * XXX - m41t80 alarm functionality is reported broken. * until it is fixed, don't register alarm functions. */ static struct rtc_class_ops m41t80_rtc_ops = { .read_time = m41t80_rtc_read_time, .set_time = m41t80_rtc_set_time, .proc = m41t80_rtc_proc, }; #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) static ssize_t m41t80_sysfs_show_flags(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); int val; val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS); if (val < 0) return -EIO; return sprintf(buf, "%#x\n", val); } static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL); static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct m41t80_data *clientdata = i2c_get_clientdata(client); int val, reg_sqw; if (!(clientdata->features & M41T80_FEATURE_SQ)) return -EINVAL; reg_sqw = M41T80_REG_SQW; if (clientdata->features & M41T80_FEATURE_SQ_ALT) reg_sqw = M41T80_REG_WDAY; val = i2c_smbus_read_byte_data(client, reg_sqw); if (val < 0) return -EIO; val = (val >> 4) & 0xf; switch (val) { case 0: break; case 1: val = 32768; break; default: val = 32768 >> val; } return sprintf(buf, "%d\n", val); } static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct m41t80_data *clientdata = i2c_get_clientdata(client); int almon, sqw, reg_sqw; int val = simple_strtoul(buf, NULL, 0); if (!(clientdata->features & M41T80_FEATURE_SQ)) return -EINVAL; if (val) { if (!is_power_of_2(val)) return -EINVAL; val = ilog2(val); if (val == 15) val = 1; else if (val < 14) val = 15 - val; else return -EINVAL; } /* disable SQW, set SQW frequency & re-enable */ almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); if (almon < 0) return -EIO; reg_sqw = M41T80_REG_SQW; if (clientdata->features & M41T80_FEATURE_SQ_ALT) reg_sqw = M41T80_REG_WDAY; sqw = i2c_smbus_read_byte_data(client, reg_sqw); if (sqw < 0) return -EIO; sqw = (sqw & 0x0f) | (val << 4); if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, almon & ~M41T80_ALMON_SQWE) < 0 || i2c_smbus_write_byte_data(client, reg_sqw, sqw) < 0) return -EIO; if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, almon | M41T80_ALMON_SQWE) < 0) return -EIO; return count; } static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR, m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq); static struct attribute *attrs[] = { &dev_attr_flags.attr, &dev_attr_sqwfreq.attr, NULL, }; static struct attribute_group attr_group = { .attrs = attrs, }; static int m41t80_sysfs_register(struct device *dev) { return sysfs_create_group(&dev->kobj, &attr_group); } #else static int m41t80_sysfs_register(struct device *dev) { return 0; } #endif #ifdef CONFIG_RTC_DRV_M41T80_WDT /* ***************************************************************************** * * Watchdog Driver * ***************************************************************************** */ static struct i2c_client *save_client; /* Default margin */ #define WD_TIMO 60 /* 1..31 seconds */ static int wdt_margin = WD_TIMO; module_param(wdt_margin, int, 0); MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)"); static unsigned long wdt_is_open; static int boot_flag; /** * wdt_ping: * * Reload counter one with the watchdog timeout. We don't bother reloading * the cascade counter. */ static void wdt_ping(void) { unsigned char i2c_data[2]; struct i2c_msg msgs1[1] = { { .addr = save_client->addr, .flags = 0, .len = 2, .buf = i2c_data, }, }; struct m41t80_data *clientdata = i2c_get_clientdata(save_client); i2c_data[0] = 0x09; /* watchdog register */ if (wdt_margin > 31) i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */ else /* * WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02) */ i2c_data[1] = wdt_margin<<2 | 0x82; /* * M41T65 has three bits for watchdog resolution. Don't set bit 7, as * that would be an invalid resolution. */ if (clientdata->features & M41T80_FEATURE_WD) i2c_data[1] &= ~M41T80_WATCHDOG_RB2; i2c_transfer(save_client->adapter, msgs1, 1); } /** * wdt_disable: * * disables watchdog. */ static void wdt_disable(void) { unsigned char i2c_data[2], i2c_buf[0x10]; struct i2c_msg msgs0[2] = { { .addr = save_client->addr, .flags = 0, .len = 1, .buf = i2c_data, }, { .addr = save_client->addr, .flags = I2C_M_RD, .len = 1, .buf = i2c_buf, }, }; struct i2c_msg msgs1[1] = { { .addr = save_client->addr, .flags = 0, .len = 2, .buf = i2c_data, }, }; i2c_data[0] = 0x09; i2c_transfer(save_client->adapter, msgs0, 2); i2c_data[0] = 0x09; i2c_data[1] = 0x00; i2c_transfer(save_client->adapter, msgs1, 1); } /** * wdt_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { wdt_ping(); return 1; } return 0; } static ssize_t wdt_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } /** * wdt_ioctl: * @inode: inode of the device * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. We only actually usefully support * querying capabilities and current status. */ static int wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_margin, rv; static struct watchdog_info ident = { .options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, .firmware_version = 1, .identity = "M41T80 WTD" }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user((struct watchdog_info __user *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(boot_flag, (int __user *)arg); case WDIOC_KEEPALIVE: wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, (int __user *)arg)) return -EFAULT; /* Arbitrary, can't find the card's limits */ if (new_margin < 1 || new_margin > 124) return -EINVAL; wdt_margin = new_margin; wdt_ping(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(wdt_margin, (int __user *)arg); case WDIOC_SETOPTIONS: if (copy_from_user(&rv, (int __user *)arg, sizeof(int))) return -EFAULT; if (rv & WDIOS_DISABLECARD) { pr_info("rtc-m41t80: disable watchdog\n"); wdt_disable(); } if (rv & WDIOS_ENABLECARD) { pr_info("rtc-m41t80: enable watchdog\n"); wdt_ping(); } return -EINVAL; } return -ENOTTY; } static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&m41t80_rtc_mutex); ret = wdt_ioctl(file, cmd, arg); mutex_unlock(&m41t80_rtc_mutex); return ret; } /** * wdt_open: * @inode: inode of device * @file: file handle to device * */ static int wdt_open(struct inode *inode, struct file *file) { if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) { mutex_lock(&m41t80_rtc_mutex); if (test_and_set_bit(0, &wdt_is_open)) { mutex_unlock(&m41t80_rtc_mutex); return -EBUSY; } /* * Activate */ wdt_is_open = 1; mutex_unlock(&m41t80_rtc_mutex); return nonseekable_open(inode, file); } return -ENODEV; } /** * wdt_close: * @inode: inode to board * @file: file handle to board * */ static int wdt_release(struct inode *inode, struct file *file) { if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) clear_bit(0, &wdt_is_open); return 0; } /** * notify_sys: * @this: our notifier block * @code: the event being reported * @unused: unused * * Our notifier is called on system shutdowns. We want to turn the card * off at reboot otherwise the machine will reboot again during memory * test or worse yet during the following fsck. This would suck, in fact * trust me - if it happens it does suck. */ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Disable Watchdog */ wdt_disable(); return NOTIFY_DONE; } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .read = wdt_read, .unlocked_ioctl = wdt_unlocked_ioctl, .write = wdt_write, .open = wdt_open, .release = wdt_release, .llseek = no_llseek, }; static struct miscdevice wdt_dev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; /* * The WDT card needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; #endif /* CONFIG_RTC_DRV_M41T80_WDT */ /* ***************************************************************************** * * Driver Interface * ***************************************************************************** */ static int m41t80_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct rtc_device *rtc = NULL; struct rtc_time tm; struct m41t80_data *clientdata = NULL; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA)) { rc = -ENODEV; goto exit; } dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); clientdata = devm_kzalloc(&client->dev, sizeof(*clientdata), GFP_KERNEL); if (!clientdata) { rc = -ENOMEM; goto exit; } clientdata->features = id->driver_data; i2c_set_clientdata(client, clientdata); rtc = devm_rtc_device_register(&client->dev, client->name, &m41t80_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { rc = PTR_ERR(rtc); rtc = NULL; goto exit; } clientdata->rtc = rtc; /* Make sure HT (Halt Update) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); if (rc < 0) goto ht_err; if (rc & M41T80_ALHOUR_HT) { if (clientdata->features & M41T80_FEATURE_HT) { m41t80_get_datetime(client, &tm); dev_info(&client->dev, "HT bit was set!\n"); dev_info(&client->dev, "Power Down at " "%04i-%02i-%02i %02i:%02i:%02i\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); } if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_HOUR, rc & ~M41T80_ALHOUR_HT) < 0) goto ht_err; } /* Make sure ST (stop) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC); if (rc < 0) goto st_err; if (rc & M41T80_SEC_ST) { if (i2c_smbus_write_byte_data(client, M41T80_REG_SEC, rc & ~M41T80_SEC_ST) < 0) goto st_err; } rc = m41t80_sysfs_register(&client->dev); if (rc) goto exit; #ifdef CONFIG_RTC_DRV_M41T80_WDT if (clientdata->features & M41T80_FEATURE_HT) { save_client = client; rc = misc_register(&wdt_dev); if (rc) goto exit; rc = register_reboot_notifier(&wdt_notifier); if (rc) { misc_deregister(&wdt_dev); goto exit; } } #endif return 0; st_err: rc = -EIO; dev_err(&client->dev, "Can't clear ST bit\n"); goto exit; ht_err: rc = -EIO; dev_err(&client->dev, "Can't clear HT bit\n"); goto exit; exit: return rc; } static int m41t80_remove(struct i2c_client *client) { #ifdef CONFIG_RTC_DRV_M41T80_WDT struct m41t80_data *clientdata = i2c_get_clientdata(client); if (clientdata->features & M41T80_FEATURE_HT) { misc_deregister(&wdt_dev); unregister_reboot_notifier(&wdt_notifier); } #endif return 0; } static struct i2c_driver m41t80_driver = { .driver = { .name = "rtc-m41t80", }, .probe = m41t80_probe, .remove = m41t80_remove, .id_table = m41t80_id, }; module_i2c_driver(m41t80_driver); MODULE_AUTHOR("Alexander Bigga <ab@mycable.de>"); MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
TakisBeskos/u8160-2.6.32.x-kernel
arch/sh/drivers/dma/dma-api.c
783
9315
/* * arch/sh/drivers/dma/dma-api.c * * SuperH-specific DMA management API * * Copyright (C) 2003, 2004, 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/mm.h> #include <linux/sched.h> #include <asm/dma.h> DEFINE_SPINLOCK(dma_spin_lock); static LIST_HEAD(registered_dmac_list); struct dma_info *get_dma_info(unsigned int chan) { struct dma_info *info; /* * Look for each DMAC's range to determine who the owner of * the channel is. */ list_for_each_entry(info, &registered_dmac_list, list) { if ((chan < info->first_vchannel_nr) || (chan >= info->first_vchannel_nr + info->nr_channels)) continue; return info; } return NULL; } EXPORT_SYMBOL(get_dma_info); struct dma_info *get_dma_info_by_name(const char *dmac_name) { struct dma_info *info; list_for_each_entry(info, &registered_dmac_list, list) { if (dmac_name && (strcmp(dmac_name, info->name) != 0)) continue; else return info; } return NULL; } EXPORT_SYMBOL(get_dma_info_by_name); static unsigned int get_nr_channels(void) { struct dma_info *info; unsigned int nr = 0; if (unlikely(list_empty(&registered_dmac_list))) return nr; list_for_each_entry(info, &registered_dmac_list, list) nr += info->nr_channels; return nr; } struct dma_channel *get_dma_channel(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel; int i; if (unlikely(!info)) return ERR_PTR(-EINVAL); for (i = 0; i < info->nr_channels; i++) { channel = &info->channels[i]; if (channel->vchan == chan) return channel; } return NULL; } EXPORT_SYMBOL(get_dma_channel); int get_dma_residue(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->get_residue) return info->ops->get_residue(channel); return 0; } EXPORT_SYMBOL(get_dma_residue); static int search_cap(const char **haystack, const char *needle) { const char **p; for (p = haystack; *p; p++) if (strcmp(*p, needle) == 0) return 1; return 0; } /** * request_dma_bycap - Allocate a DMA channel based on its capabilities * @dmac: List of DMA controllers to search * @caps: List of capabilities * * Search all channels of all DMA controllers to find a channel which * matches the requested capabilities. The result is the channel * number if a match is found, or %-ENODEV if no match is found. * * Note that not all DMA controllers export capabilities, in which * case they can never be allocated using this API, and so * request_dma() must be used specifying the channel number. */ int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id) { unsigned int found = 0; struct dma_info *info; const char **p; int i; BUG_ON(!dmac || !caps); list_for_each_entry(info, &registered_dmac_list, list) if (strcmp(*dmac, info->name) == 0) { found = 1; break; } if (!found) return -ENODEV; for (i = 0; i < info->nr_channels; i++) { struct dma_channel *channel = &info->channels[i]; if (unlikely(!channel->caps)) continue; for (p = caps; *p; p++) { if (!search_cap(channel->caps, *p)) break; if (request_dma(channel->chan, dev_id) == 0) return channel->chan; } } return -EINVAL; } EXPORT_SYMBOL(request_dma_bycap); int dmac_search_free_channel(const char *dev_id) { struct dma_channel *channel = { 0 }; struct dma_info *info = get_dma_info(0); int i; for (i = 0; i < info->nr_channels; i++) { channel = &info->channels[i]; if (unlikely(!channel)) return -ENODEV; if (atomic_read(&channel->busy) == 0) break; } if (info->ops->request) { int result = info->ops->request(channel); if (result) return result; atomic_set(&channel->busy, 1); return channel->chan; } return -ENOSYS; } int request_dma(unsigned int chan, const char *dev_id) { struct dma_channel *channel = { 0 }; struct dma_info *info = get_dma_info(chan); int result; channel = get_dma_channel(chan); if (atomic_xchg(&channel->busy, 1)) return -EBUSY; strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id)); if (info->ops->request) { result = info->ops->request(channel); if (result) atomic_set(&channel->busy, 0); return result; } return 0; } EXPORT_SYMBOL(request_dma); void free_dma(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->free) info->ops->free(channel); atomic_set(&channel->busy, 0); } EXPORT_SYMBOL(free_dma); void dma_wait_for_completion(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (channel->flags & DMA_TEI_CAPABLE) { wait_event(channel->wait_queue, (info->ops->get_residue(channel) == 0)); return; } while (info->ops->get_residue(channel)) cpu_relax(); } EXPORT_SYMBOL(dma_wait_for_completion); int register_chan_caps(const char *dmac, struct dma_chan_caps *caps) { struct dma_info *info; unsigned int found = 0; int i; list_for_each_entry(info, &registered_dmac_list, list) if (strcmp(dmac, info->name) == 0) { found = 1; break; } if (unlikely(!found)) return -ENODEV; for (i = 0; i < info->nr_channels; i++, caps++) { struct dma_channel *channel; if ((info->first_channel_nr + i) != caps->ch_num) return -EINVAL; channel = &info->channels[i]; channel->caps = caps->caplist; } return 0; } EXPORT_SYMBOL(register_chan_caps); void dma_configure_channel(unsigned int chan, unsigned long flags) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->configure) info->ops->configure(channel, flags); } EXPORT_SYMBOL(dma_configure_channel); int dma_xfer(unsigned int chan, unsigned long from, unsigned long to, size_t size, unsigned int mode) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); channel->sar = from; channel->dar = to; channel->count = size; channel->mode = mode; return info->ops->xfer(channel); } EXPORT_SYMBOL(dma_xfer); int dma_extend(unsigned int chan, unsigned long op, void *param) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->extend) return info->ops->extend(channel, op, param); return -ENOSYS; } EXPORT_SYMBOL(dma_extend); static int dma_read_proc(char *buf, char **start, off_t off, int len, int *eof, void *data) { struct dma_info *info; char *p = buf; if (list_empty(&registered_dmac_list)) return 0; /* * Iterate over each registered DMAC */ list_for_each_entry(info, &registered_dmac_list, list) { int i; /* * Iterate over each channel */ for (i = 0; i < info->nr_channels; i++) { struct dma_channel *channel = info->channels + i; if (!(channel->flags & DMA_CONFIGURED)) continue; p += sprintf(p, "%2d: %14s %s\n", i, info->name, channel->dev_id); } } return p - buf; } int register_dmac(struct dma_info *info) { unsigned int total_channels, i; INIT_LIST_HEAD(&info->list); printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n", info->name, info->nr_channels, info->nr_channels > 1 ? "s" : ""); BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels); info->pdev = platform_device_register_simple(info->name, -1, NULL, 0); if (IS_ERR(info->pdev)) return PTR_ERR(info->pdev); /* * Don't touch pre-configured channels */ if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) { unsigned int size; size = sizeof(struct dma_channel) * info->nr_channels; info->channels = kzalloc(size, GFP_KERNEL); if (!info->channels) return -ENOMEM; } total_channels = get_nr_channels(); info->first_vchannel_nr = total_channels; for (i = 0; i < info->nr_channels; i++) { struct dma_channel *chan = &info->channels[i]; atomic_set(&chan->busy, 0); chan->chan = info->first_channel_nr + i; chan->vchan = info->first_channel_nr + i + total_channels; memcpy(chan->dev_id, "Unused", 7); if (info->flags & DMAC_CHANNELS_TEI_CAPABLE) chan->flags |= DMA_TEI_CAPABLE; init_waitqueue_head(&chan->wait_queue); dma_create_sysfs_files(chan, info); } list_add(&info->list, &registered_dmac_list); return 0; } EXPORT_SYMBOL(register_dmac); void unregister_dmac(struct dma_info *info) { unsigned int i; for (i = 0; i < info->nr_channels; i++) dma_remove_sysfs_files(info->channels + i, info); if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) kfree(info->channels); list_del(&info->list); platform_device_unregister(info->pdev); } EXPORT_SYMBOL(unregister_dmac); static int __init dma_api_init(void) { printk(KERN_NOTICE "DMA: Registering DMA API.\n"); create_proc_read_entry("dma", 0, 0, dma_read_proc, 0); return 0; } subsys_initcall(dma_api_init); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); MODULE_DESCRIPTION("DMA API for SuperH"); MODULE_LICENSE("GPL");
gpl-2.0
KylinUI/android_kernel_oppo_n1
arch/arm/mach-msm/rpm_master_stat.c
1295
6193
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mm.h> #include <asm/uaccess.h> #include <mach/msm_iomap.h> #include "rpm_stats.h" #define MSG_RAM_SIZE_PER_MASTER 32 enum { NUMSHUTDOWNS, ACTIVECORES, MASTER_ID_MAX, }; static char *msm_rpm_master_stats_id_labels[MASTER_ID_MAX] = { [NUMSHUTDOWNS] = "num_shutdowns", [ACTIVECORES] = "active_cores", }; struct msm_rpm_master_stats { unsigned long numshutdowns; unsigned long active_cores; }; struct msm_rpm_master_stats_private_data { void __iomem *reg_base; u32 len; char **master_names; u32 nomasters; char buf[256]; struct msm_rpm_master_stats_platform_data *platform_data; }; static int msm_rpm_master_stats_file_close(struct inode *inode, struct file *file) { struct msm_rpm_master_stats_private_data *private = file->private_data; if (private->reg_base) iounmap(private->reg_base); kfree(file->private_data); return 0; } static int msm_rpm_master_copy_stats( struct msm_rpm_master_stats_private_data *pdata) { struct msm_rpm_master_stats record; static int nomasters; int count; static DEFINE_MUTEX(msm_rpm_master_stats_mutex); int j = 0; mutex_lock(&msm_rpm_master_stats_mutex); /* * iterrate possible nomasters times. * 8960, 8064 have 5 masters. * 8930 has 4 masters. * 9x15 has 3 masters. */ if (nomasters > pdata->nomasters - 1) { nomasters = 0; mutex_unlock(&msm_rpm_master_stats_mutex); return 0; } record.numshutdowns = readl_relaxed(pdata->reg_base + (nomasters * MSG_RAM_SIZE_PER_MASTER)); record.active_cores = readl_relaxed(pdata->reg_base + (nomasters * MSG_RAM_SIZE_PER_MASTER + 4)); count = snprintf(pdata->buf, sizeof(pdata->buf), "%s\n\t%s:%lu\n\t%s:%lu\n", pdata->master_names[nomasters], msm_rpm_master_stats_id_labels[0], record.numshutdowns, msm_rpm_master_stats_id_labels[1], record.active_cores); j = find_first_bit(&record.active_cores, BITS_PER_LONG); while (j < BITS_PER_LONG) { count += snprintf(pdata->buf + count, sizeof(pdata->buf) - count, "\t\tcore%d\n", j); j = find_next_bit(&record.active_cores, BITS_PER_LONG, j + 1); } nomasters++; mutex_unlock(&msm_rpm_master_stats_mutex); return count; } static int msm_rpm_master_stats_file_read(struct file *file, char __user *bufu, size_t count, loff_t *ppos) { struct msm_rpm_master_stats_private_data *prvdata; struct msm_rpm_master_stats_platform_data *pdata; prvdata = file->private_data; if (!prvdata) return -EINVAL; pdata = prvdata->platform_data; if (!pdata) return -EINVAL; if (!bufu || count < 0) return -EINVAL; if ((*ppos <= pdata->phys_size)) { prvdata->len = msm_rpm_master_copy_stats(prvdata); *ppos = 0; } return simple_read_from_buffer(bufu, count, ppos, prvdata->buf, prvdata->len); } static int msm_rpm_master_stats_file_open(struct inode *inode, struct file *file) { struct msm_rpm_master_stats_private_data *prvdata; struct msm_rpm_master_stats_platform_data *pdata; pdata = inode->i_private; file->private_data = kmalloc(sizeof(struct msm_rpm_master_stats_private_data), GFP_KERNEL); if (!file->private_data) return -ENOMEM; prvdata = file->private_data; prvdata->reg_base = ioremap(pdata->phys_addr_base, pdata->phys_size); if (!prvdata->reg_base) { kfree(file->private_data); prvdata = NULL; pr_err("%s: ERROR could not ioremap start=%p, len=%u\n", __func__, (void *)pdata->phys_addr_base, pdata->phys_size); return -EBUSY; } prvdata->len = 0; prvdata->nomasters = pdata->nomasters; prvdata->master_names = pdata->masters; prvdata->platform_data = pdata; return 0; } static const struct file_operations msm_rpm_master_stats_fops = { .owner = THIS_MODULE, .open = msm_rpm_master_stats_file_open, .read = msm_rpm_master_stats_file_read, .release = msm_rpm_master_stats_file_close, .llseek = no_llseek, }; static int __devinit msm_rpm_master_stats_probe(struct platform_device *pdev) { struct dentry *dent; struct msm_rpm_master_stats_platform_data *pdata; struct resource *res; pdata = pdev->dev.platform_data; if (!pdata) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdata->phys_addr_base = res->start; pdata->phys_size = resource_size(res); dent = debugfs_create_file("rpm_master_stats", S_IRUGO, NULL, pdev->dev.platform_data, &msm_rpm_master_stats_fops); if (!dent) { pr_err("%s: ERROR debugfs_create_file failed\n", __func__); return -ENOMEM; } platform_set_drvdata(pdev, dent); return 0; } static int __devexit msm_rpm_master_stats_remove(struct platform_device *pdev) { struct dentry *dent; dent = platform_get_drvdata(pdev); debugfs_remove(dent); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver msm_rpm_master_stats_driver = { .probe = msm_rpm_master_stats_probe, .remove = __devexit_p(msm_rpm_master_stats_remove), .driver = { .name = "msm_rpm_master_stat", .owner = THIS_MODULE, }, }; static int __init msm_rpm_master_stats_init(void) { return platform_driver_register(&msm_rpm_master_stats_driver); } static void __exit msm_rpm_master_stats_exit(void) { platform_driver_unregister(&msm_rpm_master_stats_driver); } module_init(msm_rpm_master_stats_init); module_exit(msm_rpm_master_stats_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM RPM Master Statistics driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:msm_master_stat_log");
gpl-2.0
abhishekr700/Nemesis_Kernel
drivers/gpu/drm/exynos/exynos_drm_fimc.c
2063
49186
/* * Copyright (C) 2012 Samsung Electronics Co.Ltd * Authors: * Eunchul Kim <chulspro.kim@samsung.com> * Jinyoung Jeon <jy0.jeon@samsung.com> * Sangmin Lee <lsmin.lee@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #include <drm/drmP.h> #include <drm/exynos_drm.h> #include "regs-fimc.h" #include "exynos_drm_ipp.h" #include "exynos_drm_fimc.h" /* * FIMC stands for Fully Interactive Mobile Camera and * supports image scaler/rotator and input/output DMA operations. * input DMA reads image data from the memory. * output DMA writes image data to memory. * FIMC supports image rotation and image effect functions. * * M2M operation : supports crop/scale/rotation/csc so on. * Memory ----> FIMC H/W ----> Memory. * Writeback operation : supports cloned screen with FIMD. * FIMD ----> FIMC H/W ----> Memory. * Output operation : supports direct display using local path. * Memory ----> FIMC H/W ----> FIMD. */ /* * TODO * 1. check suspend/resume api if needed. * 2. need to check use case platform_device_id. * 3. check src/dst size with, height. * 4. added check_prepare api for right register. * 5. need to add supported list in prop_list. * 6. check prescaler/scaler optimization. */ #define FIMC_MAX_DEVS 4 #define FIMC_MAX_SRC 2 #define FIMC_MAX_DST 32 #define FIMC_SHFACTOR 10 #define FIMC_BUF_STOP 1 #define FIMC_BUF_START 2 #define FIMC_REG_SZ 32 #define FIMC_WIDTH_ITU_709 1280 #define FIMC_REFRESH_MAX 60 #define FIMC_REFRESH_MIN 12 #define FIMC_CROP_MAX 8192 #define FIMC_CROP_MIN 32 #define FIMC_SCALE_MAX 4224 #define FIMC_SCALE_MIN 32 #define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ struct fimc_context, ippdrv); #define fimc_read(offset) readl(ctx->regs + (offset)) #define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) enum fimc_wb { FIMC_WB_NONE, FIMC_WB_A, FIMC_WB_B, }; enum { FIMC_CLK_LCLK, FIMC_CLK_GATE, FIMC_CLK_WB_A, FIMC_CLK_WB_B, FIMC_CLK_MUX, FIMC_CLK_PARENT, FIMC_CLKS_MAX }; static const char * const fimc_clock_names[] = { [FIMC_CLK_LCLK] = "sclk_fimc", [FIMC_CLK_GATE] = "fimc", [FIMC_CLK_WB_A] = "pxl_async0", [FIMC_CLK_WB_B] = "pxl_async1", [FIMC_CLK_MUX] = "mux", [FIMC_CLK_PARENT] = "parent", }; #define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL /* * A structure of scaler. * * @range: narrow, wide. * @bypass: unused scaler path. * @up_h: horizontal scale up. * @up_v: vertical scale up. * @hratio: horizontal ratio. * @vratio: vertical ratio. */ struct fimc_scaler { bool range; bool bypass; bool up_h; bool up_v; u32 hratio; u32 vratio; }; /* * A structure of scaler capability. * * find user manual table 43-1. * @in_hori: scaler input horizontal size. * @bypass: scaler bypass mode. * @dst_h_wo_rot: target horizontal size without output rotation. * @dst_h_rot: target horizontal size with output rotation. * @rl_w_wo_rot: real width without input rotation. * @rl_h_rot: real height without output rotation. */ struct fimc_capability { /* scaler */ u32 in_hori; u32 bypass; /* output rotator */ u32 dst_h_wo_rot; u32 dst_h_rot; /* input rotator */ u32 rl_w_wo_rot; u32 rl_h_rot; }; /* * A structure of fimc context. * * @ippdrv: prepare initialization using ippdrv. * @regs_res: register resources. * @regs: memory mapped io registers. * @lock: locking of operations. * @clocks: fimc clocks. * @clk_frequency: LCLK clock frequency. * @sysreg: handle to SYSREG block regmap. * @sc: scaler infomations. * @pol: porarity of writeback. * @id: fimc id. * @irq: irq number. * @suspended: qos operations. */ struct fimc_context { struct exynos_drm_ippdrv ippdrv; struct resource *regs_res; void __iomem *regs; struct mutex lock; struct clk *clocks[FIMC_CLKS_MAX]; u32 clk_frequency; struct regmap *sysreg; struct fimc_scaler sc; struct exynos_drm_ipp_pol pol; int id; int irq; bool suspended; }; static void fimc_sw_reset(struct fimc_context *ctx) { u32 cfg; DRM_DEBUG_KMS("%s\n", __func__); /* stop dma operation */ cfg = fimc_read(EXYNOS_CISTATUS); if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) { cfg = fimc_read(EXYNOS_MSCTRL); cfg &= ~EXYNOS_MSCTRL_ENVID; fimc_write(cfg, EXYNOS_MSCTRL); } cfg = fimc_read(EXYNOS_CISRCFMT); cfg |= EXYNOS_CISRCFMT_ITU601_8BIT; fimc_write(cfg, EXYNOS_CISRCFMT); /* disable image capture */ cfg = fimc_read(EXYNOS_CIIMGCPT); cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN); fimc_write(cfg, EXYNOS_CIIMGCPT); /* s/w reset */ cfg = fimc_read(EXYNOS_CIGCTRL); cfg |= (EXYNOS_CIGCTRL_SWRST); fimc_write(cfg, EXYNOS_CIGCTRL); /* s/w reset complete */ cfg = fimc_read(EXYNOS_CIGCTRL); cfg &= ~EXYNOS_CIGCTRL_SWRST; fimc_write(cfg, EXYNOS_CIGCTRL); /* reset sequence */ fimc_write(0x0, EXYNOS_CIFCNTSEQ); } static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) { DRM_DEBUG_KMS("%s\n", __func__); return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK, SYSREG_FIMD0WB_DEST_MASK, ctx->id << SYSREG_FIMD0WB_DEST_SHIFT); } static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) { u32 cfg; DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb); cfg = fimc_read(EXYNOS_CIGCTRL); cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | EXYNOS_CIGCTRL_SELCAM_ITU_MASK | EXYNOS_CIGCTRL_SELCAM_MIPI_MASK | EXYNOS_CIGCTRL_SELCAM_FIMC_MASK | EXYNOS_CIGCTRL_SELWB_CAMIF_MASK | EXYNOS_CIGCTRL_SELWRITEBACK_MASK); switch (wb) { case FIMC_WB_A: cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A | EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); break; case FIMC_WB_B: cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B | EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); break; case FIMC_WB_NONE: default: cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A | EXYNOS_CIGCTRL_SELWRITEBACK_A | EXYNOS_CIGCTRL_SELCAM_MIPI_A | EXYNOS_CIGCTRL_SELCAM_FIMC_ITU); break; } fimc_write(cfg, EXYNOS_CIGCTRL); } static void fimc_set_polarity(struct fimc_context *ctx, struct exynos_drm_ipp_pol *pol) { u32 cfg; DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n", __func__, pol->inv_pclk, pol->inv_vsync); DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n", __func__, pol->inv_href, pol->inv_hsync); cfg = fimc_read(EXYNOS_CIGCTRL); cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC | EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC); if (pol->inv_pclk) cfg |= EXYNOS_CIGCTRL_INVPOLPCLK; if (pol->inv_vsync) cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC; if (pol->inv_href) cfg |= EXYNOS_CIGCTRL_INVPOLHREF; if (pol->inv_hsync) cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC; fimc_write(cfg, EXYNOS_CIGCTRL); } static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable) { u32 cfg; DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); cfg = fimc_read(EXYNOS_CIGCTRL); if (enable) cfg |= EXYNOS_CIGCTRL_CAM_JPEG; else cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG; fimc_write(cfg, EXYNOS_CIGCTRL); } static void fimc_handle_irq(struct fimc_context *ctx, bool enable, bool overflow, bool level) { u32 cfg; DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__, enable, overflow, level); cfg = fimc_read(EXYNOS_CIGCTRL); if (enable) { cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL); cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE; if (overflow) cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN; if (level) cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL; } else cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE); fimc_write(cfg, EXYNOS_CIGCTRL); } static void fimc_clear_irq(struct fimc_context *ctx) { u32 cfg; DRM_DEBUG_KMS("%s\n", __func__); cfg = fimc_read(EXYNOS_CIGCTRL); cfg |= EXYNOS_CIGCTRL_IRQ_CLR; fimc_write(cfg, EXYNOS_CIGCTRL); } static bool fimc_check_ovf(struct fimc_context *ctx) { struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg, status, flag; status = fimc_read(EXYNOS_CISTATUS); flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB | EXYNOS_CISTATUS_OVFICR; DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag); if (status & flag) { cfg = fimc_read(EXYNOS_CIWDOFST); cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | EXYNOS_CIWDOFST_CLROVFICR); fimc_write(cfg, EXYNOS_CIWDOFST); cfg = fimc_read(EXYNOS_CIWDOFST); cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | EXYNOS_CIWDOFST_CLROVFICR); fimc_write(cfg, EXYNOS_CIWDOFST); dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n", ctx->id, status); return true; } return false; } static bool fimc_check_frame_end(struct fimc_context *ctx) { u32 cfg; cfg = fimc_read(EXYNOS_CISTATUS); DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg); if (!(cfg & EXYNOS_CISTATUS_FRAMEEND)) return false; cfg &= ~(EXYNOS_CISTATUS_FRAMEEND); fimc_write(cfg, EXYNOS_CISTATUS); return true; } static int fimc_get_buf_id(struct fimc_context *ctx) { u32 cfg; int frame_cnt, buf_id; DRM_DEBUG_KMS("%s\n", __func__); cfg = fimc_read(EXYNOS_CISTATUS2); frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg); if (frame_cnt == 0) frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg); DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__, EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg), EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg)); if (frame_cnt == 0) { DRM_ERROR("failed to get frame count.\n"); return -EIO; } buf_id = frame_cnt - 1; DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id); return buf_id; } static void fimc_handle_lastend(struct fimc_context *ctx, bool enable) { u32 cfg; DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); cfg = fimc_read(EXYNOS_CIOCTRL); if (enable) cfg |= EXYNOS_CIOCTRL_LASTENDEN; else cfg &= ~EXYNOS_CIOCTRL_LASTENDEN; fimc_write(cfg, EXYNOS_CIOCTRL); } static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) { struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); /* RGB */ cfg = fimc_read(EXYNOS_CISCCTRL); cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK; switch (fmt) { case DRM_FORMAT_RGB565: cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565; fimc_write(cfg, EXYNOS_CISCCTRL); return 0; case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888; fimc_write(cfg, EXYNOS_CISCCTRL); return 0; default: /* bypass */ break; } /* YUV */ cfg = fimc_read(EXYNOS_MSCTRL); cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK | EXYNOS_MSCTRL_C_INT_IN_2PLANE | EXYNOS_MSCTRL_ORDER422_YCBYCR); switch (fmt) { case DRM_FORMAT_YUYV: cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR; break; case DRM_FORMAT_YVYU: cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB; break; case DRM_FORMAT_UYVY: cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY; break; case DRM_FORMAT_VYUY: case DRM_FORMAT_YUV444: cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY; break; case DRM_FORMAT_NV21: case DRM_FORMAT_NV61: cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB | EXYNOS_MSCTRL_C_INT_IN_2PLANE); break; case DRM_FORMAT_YUV422: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE; break; case DRM_FORMAT_NV12: case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV16: cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | EXYNOS_MSCTRL_C_INT_IN_2PLANE); break; default: dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt); return -EINVAL; } fimc_write(cfg, EXYNOS_MSCTRL); return 0; } static int fimc_src_set_fmt(struct device *dev, u32 fmt) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); cfg = fimc_read(EXYNOS_MSCTRL); cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB; switch (fmt) { case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: cfg |= EXYNOS_MSCTRL_INFORMAT_RGB; break; case DRM_FORMAT_YUV444: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; break; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE; break; case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_YUV422: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422; break; case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV12MT: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; break; default: dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt); return -EINVAL; } fimc_write(cfg, EXYNOS_MSCTRL); cfg = fimc_read(EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; if (fmt == DRM_FORMAT_NV12MT) cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32; else cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; fimc_write(cfg, EXYNOS_CIDMAPARAM); return fimc_src_set_fmt_order(ctx, fmt); } static int fimc_src_set_transf(struct device *dev, enum drm_exynos_degree degree, enum drm_exynos_flip flip, bool *swap) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg1, cfg2; DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__, degree, flip); cfg1 = fimc_read(EXYNOS_MSCTRL); cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | EXYNOS_MSCTRL_FLIP_Y_MIRROR); cfg2 = fimc_read(EXYNOS_CITRGFMT); cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE; switch (degree) { case EXYNOS_DRM_DEGREE_0: if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; case EXYNOS_DRM_DEGREE_90: cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; case EXYNOS_DRM_DEGREE_180: cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | EXYNOS_MSCTRL_FLIP_Y_MIRROR); if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; case EXYNOS_DRM_DEGREE_270: cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | EXYNOS_MSCTRL_FLIP_Y_MIRROR); cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; default: dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); return -EINVAL; } fimc_write(cfg1, EXYNOS_MSCTRL); fimc_write(cfg2, EXYNOS_CITRGFMT); *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0; return 0; } static int fimc_set_window(struct fimc_context *ctx, struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) { u32 cfg, h1, h2, v1, v2; /* cropped image */ h1 = pos->x; h2 = sz->hsize - pos->w - pos->x; v1 = pos->y; v2 = sz->vsize - pos->h - pos->y; DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__, h1, h2, v1, v2); /* * set window offset 1, 2 size * check figure 43-21 in user manual */ cfg = fimc_read(EXYNOS_CIWDOFST); cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK | EXYNOS_CIWDOFST_WINVEROFST_MASK); cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) | EXYNOS_CIWDOFST_WINVEROFST(v1)); cfg |= EXYNOS_CIWDOFST_WINOFSEN; fimc_write(cfg, EXYNOS_CIWDOFST); cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) | EXYNOS_CIWDOFST2_WINVEROFST2(v2)); fimc_write(cfg, EXYNOS_CIWDOFST2); return 0; } static int fimc_src_set_size(struct device *dev, int swap, struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) { struct fimc_context *ctx = get_fimc_context(dev); struct drm_exynos_pos img_pos = *pos; struct drm_exynos_sz img_sz = *sz; u32 cfg; DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n", __func__, swap, sz->hsize, sz->vsize); /* original size */ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize)); fimc_write(cfg, EXYNOS_ORGISIZE); DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__, pos->x, pos->y, pos->w, pos->h); if (swap) { img_pos.w = pos->h; img_pos.h = pos->w; img_sz.hsize = sz->vsize; img_sz.vsize = sz->hsize; } /* set input DMA image size */ cfg = fimc_read(EXYNOS_CIREAL_ISIZE); cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK | EXYNOS_CIREAL_ISIZE_WIDTH_MASK); cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) | EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h)); fimc_write(cfg, EXYNOS_CIREAL_ISIZE); /* * set input FIFO image size * for now, we support only ITU601 8 bit mode */ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) | EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize)); fimc_write(cfg, EXYNOS_CISRCFMT); /* offset Y(RGB), Cb, Cr */ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) | EXYNOS_CIIYOFF_VERTICAL(img_pos.y)); fimc_write(cfg, EXYNOS_CIIYOFF); cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) | EXYNOS_CIICBOFF_VERTICAL(img_pos.y)); fimc_write(cfg, EXYNOS_CIICBOFF); cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) | EXYNOS_CIICROFF_VERTICAL(img_pos.y)); fimc_write(cfg, EXYNOS_CIICROFF); return fimc_set_window(ctx, &img_pos, &img_sz); } static int fimc_src_set_addr(struct device *dev, struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; struct drm_exynos_ipp_property *property; struct drm_exynos_ipp_config *config; if (!c_node) { DRM_ERROR("failed to get c_node.\n"); return -EINVAL; } property = &c_node->property; DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, property->prop_id, buf_id, buf_type); if (buf_id > FIMC_MAX_SRC) { dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); return -ENOMEM; } /* address register set */ switch (buf_type) { case IPP_BUF_ENQUEUE: config = &property->config[EXYNOS_DRM_OPS_SRC]; fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], EXYNOS_CIIYSA(buf_id)); if (config->fmt == DRM_FORMAT_YVU420) { fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], EXYNOS_CIICBSA(buf_id)); fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], EXYNOS_CIICRSA(buf_id)); } else { fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], EXYNOS_CIICBSA(buf_id)); fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], EXYNOS_CIICRSA(buf_id)); } break; case IPP_BUF_DEQUEUE: fimc_write(0x0, EXYNOS_CIIYSA(buf_id)); fimc_write(0x0, EXYNOS_CIICBSA(buf_id)); fimc_write(0x0, EXYNOS_CIICRSA(buf_id)); break; default: /* bypass */ break; } return 0; } static struct exynos_drm_ipp_ops fimc_src_ops = { .set_fmt = fimc_src_set_fmt, .set_transf = fimc_src_set_transf, .set_size = fimc_src_set_size, .set_addr = fimc_src_set_addr, }; static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) { struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); /* RGB */ cfg = fimc_read(EXYNOS_CISCCTRL); cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK; switch (fmt) { case DRM_FORMAT_RGB565: cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565; fimc_write(cfg, EXYNOS_CISCCTRL); return 0; case DRM_FORMAT_RGB888: cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888; fimc_write(cfg, EXYNOS_CISCCTRL); return 0; case DRM_FORMAT_XRGB8888: cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 | EXYNOS_CISCCTRL_EXTRGB_EXTENSION); fimc_write(cfg, EXYNOS_CISCCTRL); break; default: /* bypass */ break; } /* YUV */ cfg = fimc_read(EXYNOS_CIOCTRL); cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK | EXYNOS_CIOCTRL_ORDER422_MASK | EXYNOS_CIOCTRL_YCBCR_PLANE_MASK); switch (fmt) { case DRM_FORMAT_XRGB8888: cfg |= EXYNOS_CIOCTRL_ALPHA_OUT; break; case DRM_FORMAT_YUYV: cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR; break; case DRM_FORMAT_YVYU: cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB; break; case DRM_FORMAT_UYVY: cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY; break; case DRM_FORMAT_VYUY: cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY; break; case DRM_FORMAT_NV21: case DRM_FORMAT_NV61: cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB; cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; break; case DRM_FORMAT_YUV422: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE; break; case DRM_FORMAT_NV12: case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV16: cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; break; default: dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); return -EINVAL; } fimc_write(cfg, EXYNOS_CIOCTRL); return 0; } static int fimc_dst_set_fmt(struct device *dev, u32 fmt) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt); cfg = fimc_read(EXYNOS_CIEXTEN); if (fmt == DRM_FORMAT_AYUV) { cfg |= EXYNOS_CIEXTEN_YUV444_OUT; fimc_write(cfg, EXYNOS_CIEXTEN); } else { cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT; fimc_write(cfg, EXYNOS_CIEXTEN); cfg = fimc_read(EXYNOS_CITRGFMT); cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK; switch (fmt) { case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB; break; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE; break; case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_YUV422: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422; break; case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV21: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; break; default: dev_err(ippdrv->dev, "inavlid target format 0x%x.\n", fmt); return -EINVAL; } fimc_write(cfg, EXYNOS_CITRGFMT); } cfg = fimc_read(EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; if (fmt == DRM_FORMAT_NV12MT) cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32; else cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; fimc_write(cfg, EXYNOS_CIDMAPARAM); return fimc_dst_set_fmt_order(ctx, fmt); } static int fimc_dst_set_transf(struct device *dev, enum drm_exynos_degree degree, enum drm_exynos_flip flip, bool *swap) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__, degree, flip); cfg = fimc_read(EXYNOS_CITRGFMT); cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; switch (degree) { case EXYNOS_DRM_DEGREE_0: if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; case EXYNOS_DRM_DEGREE_90: cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; case EXYNOS_DRM_DEGREE_180: cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR | EXYNOS_CITRGFMT_FLIP_Y_MIRROR); if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; case EXYNOS_DRM_DEGREE_270: cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE | EXYNOS_CITRGFMT_FLIP_X_MIRROR | EXYNOS_CITRGFMT_FLIP_Y_MIRROR); if (flip & EXYNOS_DRM_FLIP_VERTICAL) cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; default: dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); return -EINVAL; } fimc_write(cfg, EXYNOS_CITRGFMT); *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0; return 0; } static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift) { DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst); if (src >= dst * 64) { DRM_ERROR("failed to make ratio and shift.\n"); return -EINVAL; } else if (src >= dst * 32) { *ratio = 32; *shift = 5; } else if (src >= dst * 16) { *ratio = 16; *shift = 4; } else if (src >= dst * 8) { *ratio = 8; *shift = 3; } else if (src >= dst * 4) { *ratio = 4; *shift = 2; } else if (src >= dst * 2) { *ratio = 2; *shift = 1; } else { *ratio = 1; *shift = 0; } return 0; } static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, struct drm_exynos_pos *src, struct drm_exynos_pos *dst) { struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg, cfg_ext, shfactor; u32 pre_dst_width, pre_dst_height; u32 pre_hratio, hfactor, pre_vratio, vfactor; int ret = 0; u32 src_w, src_h, dst_w, dst_h; cfg_ext = fimc_read(EXYNOS_CITRGFMT); if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) { src_w = src->h; src_h = src->w; } else { src_w = src->w; src_h = src->h; } if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) { dst_w = dst->h; dst_h = dst->w; } else { dst_w = dst->w; dst_h = dst->h; } ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor); if (ret) { dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); return ret; } ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor); if (ret) { dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); return ret; } pre_dst_width = src_w / pre_hratio; pre_dst_height = src_h / pre_vratio; DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__, pre_dst_width, pre_dst_height); DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n", __func__, pre_hratio, hfactor, pre_vratio, vfactor); sc->hratio = (src_w << 14) / (dst_w << hfactor); sc->vratio = (src_h << 14) / (dst_h << vfactor); sc->up_h = (dst_w >= src_w) ? true : false; sc->up_v = (dst_h >= src_h) ? true : false; DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n", __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v); shfactor = FIMC_SHFACTOR - (hfactor + vfactor); DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor); cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) | EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) | EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio)); fimc_write(cfg, EXYNOS_CISCPRERATIO); cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) | EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height)); fimc_write(cfg, EXYNOS_CISCPREDST); return ret; } static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) { u32 cfg, cfg_ext; DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n", __func__, sc->range, sc->bypass, sc->up_h, sc->up_v); DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n", __func__, sc->hratio, sc->vratio); cfg = fimc_read(EXYNOS_CISCCTRL); cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS | EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V | EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK | EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK | EXYNOS_CISCCTRL_CSCR2Y_WIDE | EXYNOS_CISCCTRL_CSCY2R_WIDE); if (sc->range) cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE | EXYNOS_CISCCTRL_CSCY2R_WIDE); if (sc->bypass) cfg |= EXYNOS_CISCCTRL_SCALERBYPASS; if (sc->up_h) cfg |= EXYNOS_CISCCTRL_SCALEUP_H; if (sc->up_v) cfg |= EXYNOS_CISCCTRL_SCALEUP_V; cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) | EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6))); fimc_write(cfg, EXYNOS_CISCCTRL); cfg_ext = fimc_read(EXYNOS_CIEXTEN); cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK; cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK; cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) | EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio)); fimc_write(cfg_ext, EXYNOS_CIEXTEN); } static int fimc_dst_set_size(struct device *dev, int swap, struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) { struct fimc_context *ctx = get_fimc_context(dev); struct drm_exynos_pos img_pos = *pos; struct drm_exynos_sz img_sz = *sz; u32 cfg; DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n", __func__, swap, sz->hsize, sz->vsize); /* original size */ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize)); fimc_write(cfg, EXYNOS_ORGOSIZE); DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__, pos->x, pos->y, pos->w, pos->h); /* CSC ITU */ cfg = fimc_read(EXYNOS_CIGCTRL); cfg &= ~EXYNOS_CIGCTRL_CSC_MASK; if (sz->hsize >= FIMC_WIDTH_ITU_709) cfg |= EXYNOS_CIGCTRL_CSC_ITU709; else cfg |= EXYNOS_CIGCTRL_CSC_ITU601; fimc_write(cfg, EXYNOS_CIGCTRL); if (swap) { img_pos.w = pos->h; img_pos.h = pos->w; img_sz.hsize = sz->vsize; img_sz.vsize = sz->hsize; } /* target image size */ cfg = fimc_read(EXYNOS_CITRGFMT); cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK | EXYNOS_CITRGFMT_TARGETV_MASK); cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) | EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h)); fimc_write(cfg, EXYNOS_CITRGFMT); /* target area */ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h); fimc_write(cfg, EXYNOS_CITAREA); /* offset Y(RGB), Cb, Cr */ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) | EXYNOS_CIOYOFF_VERTICAL(img_pos.y)); fimc_write(cfg, EXYNOS_CIOYOFF); cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) | EXYNOS_CIOCBOFF_VERTICAL(img_pos.y)); fimc_write(cfg, EXYNOS_CIOCBOFF); cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) | EXYNOS_CIOCROFF_VERTICAL(img_pos.y)); fimc_write(cfg, EXYNOS_CIOCROFF); return 0; } static int fimc_dst_get_buf_seq(struct fimc_context *ctx) { u32 cfg, i, buf_num = 0; u32 mask = 0x00000001; cfg = fimc_read(EXYNOS_CIFCNTSEQ); for (i = 0; i < FIMC_REG_SZ; i++) if (cfg & (mask << i)) buf_num++; DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num); return buf_num; } static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) { struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; bool enable; u32 cfg; u32 mask = 0x00000001 << buf_id; int ret = 0; DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__, buf_id, buf_type); mutex_lock(&ctx->lock); /* mask register set */ cfg = fimc_read(EXYNOS_CIFCNTSEQ); switch (buf_type) { case IPP_BUF_ENQUEUE: enable = true; break; case IPP_BUF_DEQUEUE: enable = false; break; default: dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); ret = -EINVAL; goto err_unlock; } /* sequence id */ cfg &= ~mask; cfg |= (enable << buf_id); fimc_write(cfg, EXYNOS_CIFCNTSEQ); /* interrupt enable */ if (buf_type == IPP_BUF_ENQUEUE && fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START) fimc_handle_irq(ctx, true, false, true); /* interrupt disable */ if (buf_type == IPP_BUF_DEQUEUE && fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP) fimc_handle_irq(ctx, false, false, true); err_unlock: mutex_unlock(&ctx->lock); return ret; } static int fimc_dst_set_addr(struct device *dev, struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; struct drm_exynos_ipp_property *property; struct drm_exynos_ipp_config *config; if (!c_node) { DRM_ERROR("failed to get c_node.\n"); return -EINVAL; } property = &c_node->property; DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__, property->prop_id, buf_id, buf_type); if (buf_id > FIMC_MAX_DST) { dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); return -ENOMEM; } /* address register set */ switch (buf_type) { case IPP_BUF_ENQUEUE: config = &property->config[EXYNOS_DRM_OPS_DST]; fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], EXYNOS_CIOYSA(buf_id)); if (config->fmt == DRM_FORMAT_YVU420) { fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], EXYNOS_CIOCBSA(buf_id)); fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], EXYNOS_CIOCRSA(buf_id)); } else { fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], EXYNOS_CIOCBSA(buf_id)); fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], EXYNOS_CIOCRSA(buf_id)); } break; case IPP_BUF_DEQUEUE: fimc_write(0x0, EXYNOS_CIOYSA(buf_id)); fimc_write(0x0, EXYNOS_CIOCBSA(buf_id)); fimc_write(0x0, EXYNOS_CIOCRSA(buf_id)); break; default: /* bypass */ break; } return fimc_dst_set_buf_seq(ctx, buf_id, buf_type); } static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_fmt = fimc_dst_set_fmt, .set_transf = fimc_dst_set_transf, .set_size = fimc_dst_set_size, .set_addr = fimc_dst_set_addr, }; static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) { DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); if (enable) { clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); ctx->suspended = false; } else { clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); ctx->suspended = true; } return 0; } static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; struct drm_exynos_ipp_event_work *event_work = c_node->event_work; int buf_id; DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id); fimc_clear_irq(ctx); if (fimc_check_ovf(ctx)) return IRQ_NONE; if (!fimc_check_frame_end(ctx)) return IRQ_NONE; buf_id = fimc_get_buf_id(ctx); if (buf_id < 0) return IRQ_HANDLED; DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id); if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { DRM_ERROR("failed to dequeue.\n"); return IRQ_HANDLED; } event_work->ippdrv = ippdrv; event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; queue_work(ippdrv->event_workq, (struct work_struct *)event_work); return IRQ_HANDLED; } static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) { struct drm_exynos_ipp_prop_list *prop_list; DRM_DEBUG_KMS("%s\n", __func__); prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); if (!prop_list) { DRM_ERROR("failed to alloc property list.\n"); return -ENOMEM; } prop_list->version = 1; prop_list->writeback = 1; prop_list->refresh_min = FIMC_REFRESH_MIN; prop_list->refresh_max = FIMC_REFRESH_MAX; prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) | (1 << EXYNOS_DRM_FLIP_VERTICAL) | (1 << EXYNOS_DRM_FLIP_HORIZONTAL); prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | (1 << EXYNOS_DRM_DEGREE_90) | (1 << EXYNOS_DRM_DEGREE_180) | (1 << EXYNOS_DRM_DEGREE_270); prop_list->csc = 1; prop_list->crop = 1; prop_list->crop_max.hsize = FIMC_CROP_MAX; prop_list->crop_max.vsize = FIMC_CROP_MAX; prop_list->crop_min.hsize = FIMC_CROP_MIN; prop_list->crop_min.vsize = FIMC_CROP_MIN; prop_list->scale = 1; prop_list->scale_max.hsize = FIMC_SCALE_MAX; prop_list->scale_max.vsize = FIMC_SCALE_MAX; prop_list->scale_min.hsize = FIMC_SCALE_MIN; prop_list->scale_min.vsize = FIMC_SCALE_MIN; ippdrv->prop_list = prop_list; return 0; } static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip) { switch (flip) { case EXYNOS_DRM_FLIP_NONE: case EXYNOS_DRM_FLIP_VERTICAL: case EXYNOS_DRM_FLIP_HORIZONTAL: case EXYNOS_DRM_FLIP_BOTH: return true; default: DRM_DEBUG_KMS("%s:invalid flip\n", __func__); return false; } } static int fimc_ippdrv_check_property(struct device *dev, struct drm_exynos_ipp_property *property) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list; struct drm_exynos_ipp_config *config; struct drm_exynos_pos *pos; struct drm_exynos_sz *sz; bool swap; int i; DRM_DEBUG_KMS("%s\n", __func__); for_each_ipp_ops(i) { if ((i == EXYNOS_DRM_OPS_SRC) && (property->cmd == IPP_CMD_WB)) continue; config = &property->config[i]; pos = &config->pos; sz = &config->sz; /* check for flip */ if (!fimc_check_drm_flip(config->flip)) { DRM_ERROR("invalid flip.\n"); goto err_property; } /* check for degree */ switch (config->degree) { case EXYNOS_DRM_DEGREE_90: case EXYNOS_DRM_DEGREE_270: swap = true; break; case EXYNOS_DRM_DEGREE_0: case EXYNOS_DRM_DEGREE_180: swap = false; break; default: DRM_ERROR("invalid degree.\n"); goto err_property; } /* check for buffer bound */ if ((pos->x + pos->w > sz->hsize) || (pos->y + pos->h > sz->vsize)) { DRM_ERROR("out of buf bound.\n"); goto err_property; } /* check for crop */ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { if (swap) { if ((pos->h < pp->crop_min.hsize) || (sz->vsize > pp->crop_max.hsize) || (pos->w < pp->crop_min.vsize) || (sz->hsize > pp->crop_max.vsize)) { DRM_ERROR("out of crop size.\n"); goto err_property; } } else { if ((pos->w < pp->crop_min.hsize) || (sz->hsize > pp->crop_max.hsize) || (pos->h < pp->crop_min.vsize) || (sz->vsize > pp->crop_max.vsize)) { DRM_ERROR("out of crop size.\n"); goto err_property; } } } /* check for scale */ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { if (swap) { if ((pos->h < pp->scale_min.hsize) || (sz->vsize > pp->scale_max.hsize) || (pos->w < pp->scale_min.vsize) || (sz->hsize > pp->scale_max.vsize)) { DRM_ERROR("out of scale size.\n"); goto err_property; } } else { if ((pos->w < pp->scale_min.hsize) || (sz->hsize > pp->scale_max.hsize) || (pos->h < pp->scale_min.vsize) || (sz->vsize > pp->scale_max.vsize)) { DRM_ERROR("out of scale size.\n"); goto err_property; } } } } return 0; err_property: for_each_ipp_ops(i) { if ((i == EXYNOS_DRM_OPS_SRC) && (property->cmd == IPP_CMD_WB)) continue; config = &property->config[i]; pos = &config->pos; sz = &config->sz; DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", i ? "dst" : "src", config->flip, config->degree, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); } return -EINVAL; } static void fimc_clear_addr(struct fimc_context *ctx) { int i; DRM_DEBUG_KMS("%s:\n", __func__); for (i = 0; i < FIMC_MAX_SRC; i++) { fimc_write(0, EXYNOS_CIIYSA(i)); fimc_write(0, EXYNOS_CIICBSA(i)); fimc_write(0, EXYNOS_CIICRSA(i)); } for (i = 0; i < FIMC_MAX_DST; i++) { fimc_write(0, EXYNOS_CIOYSA(i)); fimc_write(0, EXYNOS_CIOCBSA(i)); fimc_write(0, EXYNOS_CIOCRSA(i)); } } static int fimc_ippdrv_reset(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("%s\n", __func__); /* reset h/w block */ fimc_sw_reset(ctx); /* reset scaler capability */ memset(&ctx->sc, 0x0, sizeof(ctx->sc)); fimc_clear_addr(ctx); return 0; } static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) { struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; struct drm_exynos_ipp_property *property; struct drm_exynos_ipp_config *config; struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; struct drm_exynos_ipp_set_wb set_wb; int ret, i; u32 cfg0, cfg1; DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd); if (!c_node) { DRM_ERROR("failed to get c_node.\n"); return -EINVAL; } property = &c_node->property; fimc_handle_irq(ctx, true, false, true); for_each_ipp_ops(i) { config = &property->config[i]; img_pos[i] = config->pos; } ret = fimc_set_prescaler(ctx, &ctx->sc, &img_pos[EXYNOS_DRM_OPS_SRC], &img_pos[EXYNOS_DRM_OPS_DST]); if (ret) { dev_err(dev, "failed to set precalser.\n"); return ret; } /* If set ture, we can save jpeg about screen */ fimc_handle_jpeg(ctx, false); fimc_set_scaler(ctx, &ctx->sc); fimc_set_polarity(ctx, &ctx->pol); switch (cmd) { case IPP_CMD_M2M: fimc_set_type_ctrl(ctx, FIMC_WB_NONE); fimc_handle_lastend(ctx, false); /* setup dma */ cfg0 = fimc_read(EXYNOS_MSCTRL); cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; fimc_write(cfg0, EXYNOS_MSCTRL); break; case IPP_CMD_WB: fimc_set_type_ctrl(ctx, FIMC_WB_A); fimc_handle_lastend(ctx, true); /* setup FIMD */ ret = fimc_set_camblk_fimd0_wb(ctx); if (ret < 0) { dev_err(dev, "camblk setup failed.\n"); return ret; } set_wb.enable = 1; set_wb.refresh = property->refresh_rate; exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); break; case IPP_CMD_OUTPUT: default: ret = -EINVAL; dev_err(dev, "invalid operations.\n"); return ret; } /* Reset status */ fimc_write(0x0, EXYNOS_CISTATUS); cfg0 = fimc_read(EXYNOS_CIIMGCPT); cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC; cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC; /* Scaler */ cfg1 = fimc_read(EXYNOS_CISCCTRL); cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK; cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE | EXYNOS_CISCCTRL_SCALERSTART); fimc_write(cfg1, EXYNOS_CISCCTRL); /* Enable image capture*/ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN; fimc_write(cfg0, EXYNOS_CIIMGCPT); /* Disable frame end irq */ cfg0 = fimc_read(EXYNOS_CIGCTRL); cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE; fimc_write(cfg0, EXYNOS_CIGCTRL); cfg0 = fimc_read(EXYNOS_CIOCTRL); cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK; fimc_write(cfg0, EXYNOS_CIOCTRL); if (cmd == IPP_CMD_M2M) { cfg0 = fimc_read(EXYNOS_MSCTRL); cfg0 |= EXYNOS_MSCTRL_ENVID; fimc_write(cfg0, EXYNOS_MSCTRL); cfg0 = fimc_read(EXYNOS_MSCTRL); cfg0 |= EXYNOS_MSCTRL_ENVID; fimc_write(cfg0, EXYNOS_MSCTRL); } return 0; } static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) { struct fimc_context *ctx = get_fimc_context(dev); struct drm_exynos_ipp_set_wb set_wb = {0, 0}; u32 cfg; DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd); switch (cmd) { case IPP_CMD_M2M: /* Source clear */ cfg = fimc_read(EXYNOS_MSCTRL); cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; cfg &= ~EXYNOS_MSCTRL_ENVID; fimc_write(cfg, EXYNOS_MSCTRL); break; case IPP_CMD_WB: exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); break; case IPP_CMD_OUTPUT: default: dev_err(dev, "invalid operations.\n"); break; } fimc_handle_irq(ctx, false, false, true); /* reset sequence */ fimc_write(0x0, EXYNOS_CIFCNTSEQ); /* Scaler disable */ cfg = fimc_read(EXYNOS_CISCCTRL); cfg &= ~EXYNOS_CISCCTRL_SCALERSTART; fimc_write(cfg, EXYNOS_CISCCTRL); /* Disable image capture */ cfg = fimc_read(EXYNOS_CIIMGCPT); cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN); fimc_write(cfg, EXYNOS_CIIMGCPT); /* Enable frame end irq */ cfg = fimc_read(EXYNOS_CIGCTRL); cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE; fimc_write(cfg, EXYNOS_CIGCTRL); } static void fimc_put_clocks(struct fimc_context *ctx) { int i; for (i = 0; i < FIMC_CLKS_MAX; i++) { if (IS_ERR(ctx->clocks[i])) continue; clk_put(ctx->clocks[i]); ctx->clocks[i] = ERR_PTR(-EINVAL); } } static int fimc_setup_clocks(struct fimc_context *ctx) { struct device *fimc_dev = ctx->ippdrv.dev; struct device *dev; int ret, i; for (i = 0; i < FIMC_CLKS_MAX; i++) ctx->clocks[i] = ERR_PTR(-EINVAL); for (i = 0; i < FIMC_CLKS_MAX; i++) { if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B) dev = fimc_dev->parent; else dev = fimc_dev; ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]); if (IS_ERR(ctx->clocks[i])) { if (i >= FIMC_CLK_MUX) break; ret = PTR_ERR(ctx->clocks[i]); dev_err(fimc_dev, "failed to get clock: %s\n", fimc_clock_names[i]); goto e_clk_free; } } /* Optional FIMC LCLK parent clock setting */ if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) { ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX], ctx->clocks[FIMC_CLK_PARENT]); if (ret < 0) { dev_err(fimc_dev, "failed to set parent.\n"); goto e_clk_free; } } ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency); if (ret < 0) goto e_clk_free; ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]); if (!ret) return ret; e_clk_free: fimc_put_clocks(ctx); return ret; } static int fimc_parse_dt(struct fimc_context *ctx) { struct device_node *node = ctx->ippdrv.dev->of_node; /* Handle only devices that support the LCD Writeback data path */ if (!of_property_read_bool(node, "samsung,lcd-wb")) return -ENODEV; if (of_property_read_u32(node, "clock-frequency", &ctx->clk_frequency)) ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY; ctx->id = of_alias_get_id(node, "fimc"); if (ctx->id < 0) { dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n"); return -EINVAL; } return 0; } static int fimc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_context *ctx; struct resource *res; struct exynos_drm_ippdrv *ippdrv; int ret; if (!dev->of_node) { dev_err(dev, "device tree node not found.\n"); return -ENODEV; } ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->ippdrv.dev = dev; ret = fimc_parse_dt(ctx); if (ret < 0) return ret; ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,sysreg"); if (IS_ERR(ctx->sysreg)) { dev_err(dev, "syscon regmap lookup failed.\n"); return PTR_ERR(ctx->sysreg); } /* resource memory */ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); /* resource irq */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "failed to request irq resource.\n"); return -ENOENT; } ctx->irq = res->start; ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, IRQF_ONESHOT, "drm_fimc", ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); return ret; } ret = fimc_setup_clocks(ctx); if (ret < 0) return ret; ippdrv = &ctx->ippdrv; ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops; ippdrv->check_property = fimc_ippdrv_check_property; ippdrv->reset = fimc_ippdrv_reset; ippdrv->start = fimc_ippdrv_start; ippdrv->stop = fimc_ippdrv_stop; ret = fimc_init_prop_list(ippdrv); if (ret < 0) { dev_err(dev, "failed to init property list.\n"); goto err_put_clk; } DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, (int)ippdrv); mutex_init(&ctx->lock); platform_set_drvdata(pdev, ctx); pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = exynos_drm_ippdrv_register(ippdrv); if (ret < 0) { dev_err(dev, "failed to register drm fimc device.\n"); goto err_pm_dis; } dev_info(dev, "drm fimc registered successfully.\n"); return 0; err_pm_dis: pm_runtime_disable(dev); err_put_clk: fimc_put_clocks(ctx); return ret; } static int fimc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_context *ctx = get_fimc_context(dev); struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; exynos_drm_ippdrv_unregister(ippdrv); mutex_destroy(&ctx->lock); fimc_put_clocks(ctx); pm_runtime_set_suspended(dev); pm_runtime_disable(dev); return 0; } #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); if (pm_runtime_suspended(dev)) return 0; return fimc_clk_ctrl(ctx, false); } static int fimc_resume(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); if (!pm_runtime_suspended(dev)) return fimc_clk_ctrl(ctx, true); return 0; } #endif #ifdef CONFIG_PM_RUNTIME static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); return fimc_clk_ctrl(ctx, false); } static int fimc_runtime_resume(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id); return fimc_clk_ctrl(ctx, true); } #endif static const struct dev_pm_ops fimc_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume) SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL) }; static const struct of_device_id fimc_of_match[] = { { .compatible = "samsung,exynos4210-fimc" }, { .compatible = "samsung,exynos4212-fimc" }, { }, }; struct platform_driver fimc_driver = { .probe = fimc_probe, .remove = fimc_remove, .driver = { .of_match_table = fimc_of_match, .name = "exynos-drm-fimc", .owner = THIS_MODULE, .pm = &fimc_pm_ops, }, };
gpl-2.0
pastcompute/openwrt-cc-linux-3.14.x-grsecurity
drivers/media/usb/dvb-usb-v2/anysee.c
2063
32741
/* * DVB USB Linux driver for Anysee E30 DVB-C & DVB-T USB2.0 receiver * * Copyright (C) 2007 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO: * - add smart card reader support for Conditional Access (CA) * * Card reader in Anysee is nothing more than ISO 7816 card reader. * There is no hardware CAM in any Anysee device sold. * In my understanding it should be implemented by making own module * for ISO 7816 card reader, like dvb_ca_en50221 is implemented. This * module registers serial interface that can be used to communicate * with any ISO 7816 smart card. * * Any help according to implement serial smart card reader support * is highly welcome! */ #include "anysee.h" #include "dvb-pll.h" #include "tda1002x.h" #include "mt352.h" #include "mt352_priv.h" #include "zl10353.h" #include "tda18212.h" #include "cx24116.h" #include "stv0900.h" #include "stv6110.h" #include "isl6423.h" #include "cxd2820r.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen, u8 *rbuf, u8 rlen) { struct anysee_state *state = d_to_priv(d); int act_len, ret, i; mutex_lock(&d->usb_mutex); memcpy(&state->buf[0], sbuf, slen); state->buf[60] = state->seq++; dev_dbg(&d->udev->dev, "%s: >>> %*ph\n", __func__, slen, state->buf); /* We need receive one message more after dvb_usb_generic_rw due to weird transaction flow, which is 1 x send + 2 x receive. */ ret = dvb_usbv2_generic_rw_locked(d, state->buf, sizeof(state->buf), state->buf, sizeof(state->buf)); if (ret) goto error_unlock; /* TODO FIXME: dvb_usb_generic_rw() fails rarely with error code -32 * (EPIPE, Broken pipe). Function supports currently msleep() as a * parameter but I would not like to use it, since according to * Documentation/timers/timers-howto.txt it should not be used such * short, under < 20ms, sleeps. Repeating failed message would be * better choice as not to add unwanted delays... * Fixing that correctly is one of those or both; * 1) use repeat if possible * 2) add suitable delay */ /* get answer, retry few times if error returned */ for (i = 0; i < 3; i++) { /* receive 2nd answer */ ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, d->props->generic_bulk_ctrl_endpoint), state->buf, sizeof(state->buf), &act_len, 2000); if (ret) { dev_dbg(&d->udev->dev, "%s: recv bulk message failed=%d\n", __func__, ret); } else { dev_dbg(&d->udev->dev, "%s: <<< %*ph\n", __func__, rlen, state->buf); if (state->buf[63] != 0x4f) dev_dbg(&d->udev->dev, "%s: cmd failed\n", __func__); break; } } if (ret) { /* all retries failed, it is fatal */ dev_err(&d->udev->dev, "%s: recv bulk message failed=%d\n", KBUILD_MODNAME, ret); goto error_unlock; } /* read request, copy returned data to return buf */ if (rbuf && rlen) memcpy(rbuf, state->buf, rlen); error_unlock: mutex_unlock(&d->usb_mutex); return ret; } static int anysee_read_reg(struct dvb_usb_device *d, u16 reg, u8 *val) { u8 buf[] = {CMD_REG_READ, reg >> 8, reg & 0xff, 0x01}; int ret; ret = anysee_ctrl_msg(d, buf, sizeof(buf), val, 1); dev_dbg(&d->udev->dev, "%s: reg=%04x val=%02x\n", __func__, reg, *val); return ret; } static int anysee_write_reg(struct dvb_usb_device *d, u16 reg, u8 val) { u8 buf[] = {CMD_REG_WRITE, reg >> 8, reg & 0xff, 0x01, val}; dev_dbg(&d->udev->dev, "%s: reg=%04x val=%02x\n", __func__, reg, val); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } /* write single register with mask */ static int anysee_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val, u8 mask) { int ret; u8 tmp; /* no need for read if whole reg is written */ if (mask != 0xff) { ret = anysee_read_reg(d, reg, &tmp); if (ret) return ret; val &= mask; tmp &= ~mask; val |= tmp; } return anysee_write_reg(d, reg, val); } /* read single register with mask */ static int anysee_rd_reg_mask(struct dvb_usb_device *d, u16 reg, u8 *val, u8 mask) { int ret, i; u8 tmp; ret = anysee_read_reg(d, reg, &tmp); if (ret) return ret; tmp &= mask; /* find position of the first bit */ for (i = 0; i < 8; i++) { if ((mask >> i) & 0x01) break; } *val = tmp >> i; return 0; } static int anysee_get_hw_info(struct dvb_usb_device *d, u8 *id) { u8 buf[] = {CMD_GET_HW_INFO}; return anysee_ctrl_msg(d, buf, sizeof(buf), id, 3); } static int anysee_streaming_ctrl(struct dvb_frontend *fe, int onoff) { u8 buf[] = {CMD_STREAMING_CTRL, (u8)onoff, 0x00}; dev_dbg(&fe_to_d(fe)->udev->dev, "%s: onoff=%d\n", __func__, onoff); return anysee_ctrl_msg(fe_to_d(fe), buf, sizeof(buf), NULL, 0); } static int anysee_led_ctrl(struct dvb_usb_device *d, u8 mode, u8 interval) { u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x01, mode, interval}; dev_dbg(&d->udev->dev, "%s: state=%d interval=%d\n", __func__, mode, interval); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } static int anysee_ir_ctrl(struct dvb_usb_device *d, u8 onoff) { u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x02, onoff}; dev_dbg(&d->udev->dev, "%s: onoff=%d\n", __func__, onoff); return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); } /* I2C */ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0, inc, i = 0; u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].len > 2 || msg[i+1].len > 60) { ret = -EOPNOTSUPP; break; } buf[0] = CMD_I2C_READ; buf[1] = (msg[i].addr << 1) | 0x01; buf[2] = msg[i].buf[0]; buf[3] = msg[i].buf[1]; buf[4] = msg[i].len-1; buf[5] = msg[i+1].len; ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf, msg[i+1].len); inc = 2; } else { if (msg[i].len > 48) { ret = -EOPNOTSUPP; break; } buf[0] = CMD_I2C_WRITE; buf[1] = (msg[i].addr << 1); buf[2] = msg[i].len; buf[3] = 0x01; memcpy(&buf[4], msg[i].buf, msg[i].len); ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0); inc = 1; } if (ret) break; i += inc; } mutex_unlock(&d->i2c_mutex); return ret ? ret : i; } static u32 anysee_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm anysee_i2c_algo = { .master_xfer = anysee_master_xfer, .functionality = anysee_i2c_func, }; static int anysee_mt352_demod_init(struct dvb_frontend *fe) { static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x28 }; static u8 reset[] = { RESET, 0x80 }; static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 }; static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 }; static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } /* Callbacks for DVB USB */ static struct tda10023_config anysee_tda10023_config = { .demod_address = (0x1a >> 1), .invert = 0, .xtal = 16000000, .pll_m = 11, .pll_p = 3, .pll_n = 1, .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_C, .deltaf = 0xfeeb, }; static struct mt352_config anysee_mt352_config = { .demod_address = (0x1e >> 1), .demod_init = anysee_mt352_demod_init, }; static struct zl10353_config anysee_zl10353_config = { .demod_address = (0x1e >> 1), .parallel_ts = 1, }; static struct zl10353_config anysee_zl10353_tda18212_config2 = { .demod_address = (0x1e >> 1), .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, .no_tuner = 1, .if2 = 41500, }; static struct zl10353_config anysee_zl10353_tda18212_config = { .demod_address = (0x18 >> 1), .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, .no_tuner = 1, .if2 = 41500, }; static struct tda10023_config anysee_tda10023_tda18212_config = { .demod_address = (0x1a >> 1), .xtal = 16000000, .pll_m = 12, .pll_p = 3, .pll_n = 1, .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_B, .deltaf = 0xba02, }; static struct tda18212_config anysee_tda18212_config = { .i2c_address = (0xc0 >> 1), .if_dvbt_6 = 4150, .if_dvbt_7 = 4150, .if_dvbt_8 = 4150, .if_dvbc = 5000, }; static struct tda18212_config anysee_tda18212_config2 = { .i2c_address = 0x60 /* (0xc0 >> 1) */, .if_dvbt_6 = 3550, .if_dvbt_7 = 3700, .if_dvbt_8 = 4150, .if_dvbt2_6 = 3250, .if_dvbt2_7 = 4000, .if_dvbt2_8 = 4000, .if_dvbc = 5000, }; static struct cx24116_config anysee_cx24116_config = { .demod_address = (0xaa >> 1), .mpg_clk_pos_pol = 0x00, .i2c_wr_max = 48, }; static struct stv0900_config anysee_stv0900_config = { .demod_address = (0xd0 >> 1), .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1, /* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config anysee_stv6110_config = { .i2c_address = (0xc0 >> 1), .mclk = 16000000, .clk_div = 1, }; static struct isl6423_config anysee_isl6423_config = { .current_max = SEC_CURRENT_800m, .curlim = SEC_CURRENT_LIM_OFF, .mod_extern = 1, .addr = (0x10 >> 1), }; static struct cxd2820r_config anysee_cxd2820r_config = { .i2c_address = 0x6d, /* (0xda >> 1) */ .ts_mode = 0x38, }; /* * New USB device strings: Mfr=1, Product=2, SerialNumber=0 * Manufacturer: AMT.CO.KR * * E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=???????? * PCB: ? * parts: DNOS404ZH102A(MT352, DTT7579(?)) * * E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)" * PCB: PCB 507T (rev1.61) * parts: DNOS404ZH103A(ZL10353, DTT7579(?)) * OEA=0a OEB=00 OEC=00 OED=ff OEE=00 * IOA=45 IOB=ff IOC=00 IOD=ff IOE=00 * * E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee" * PCB: 507CD (rev1.1) * parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe * IOA=4f IOB=ff IOC=00 IOD=06 IOE=01 * IOD[0] ZL10353 1=enabled * IOA[7] TS 0=enabled * tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not) * * E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)" * PCB: 507DC (rev0.2) * parts: TDA10023, DTOS403IH102B TM, CST56I01 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe * IOA=4f IOB=ff IOC=00 IOD=26 IOE=01 * IOD[0] TDA10023 1=enabled * * E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)" * PCB: 507SI (rev2.1) * parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=fe * IOA=4d IOB=ff IOC=00 IOD=26 IOE=01 * IOD[0] CX24116 1=enabled * * E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" * PCB: 507FA (rev0.4) * parts: TDA10023, DTOS403IH102B TM, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0 * IOD[5] TDA10023 1=enabled * IOE[0] tuner 1=enabled * * E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" * PCB: 507FA (rev1.1) * parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0 * DVB-C: * IOD[5] TDA10023 1=enabled * IOE[0] tuner 1=enabled * DVB-T: * IOD[0] ZL10353 1=enabled * IOE[0] tuner 0=enabled * tuner is behind ZL10353 I2C-gate * tuner is behind TDA10023 I2C-gate * * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)" * PCB: 508TC (rev0.6) * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[4] TDA18212 1=enabled * DVB-C: * IOD[6] ZL10353 0=disabled * IOD[5] TDA10023 1=enabled * IOE[0] IF 1=enabled * DVB-T: * IOD[5] TDA10023 0=disabled * IOD[6] ZL10353 1=enabled * IOE[0] IF 0=enabled * * E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)" * PCB: 508S2 (rev0.7) * parts: DNBU10512IST(STV0903, STV6110), ISL6423 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] STV0903 1=enabled * * E7 T2C VID=1c73 PID=861f HW=20 FW=0.1 AMTCI=0.5 "anysee-E7T2C(LP)" * PCB: 508T2C (rev0.3) * parts: DNOQ44QCH106A(CXD2820R, TDA18212), TDA8024 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] CXD2820R 1=enabled * * E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)" * PCB: 508PTC (rev0.5) * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4 * IOA[7] TS 1=enabled * IOE[4] TDA18212 1=enabled * DVB-C: * IOD[6] ZL10353 0=disabled * IOD[5] TDA10023 1=enabled * IOE[0] IF 1=enabled * DVB-T: * IOD[5] TDA10023 0=disabled * IOD[6] ZL10353 1=enabled * IOE[0] IF 0=enabled * * E7 PS2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)" * PCB: 508PS2 (rev0.4) * parts: DNBU10512IST(STV0903, STV6110), ISL6423 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4 * IOA[7] TS 1=enabled * IOE[5] STV0903 1=enabled */ static int anysee_read_config(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); int ret; u8 hw_info[3]; /* * Check which hardware we have. * We must do this call two times to get reliable values (hw/fw bug). */ ret = anysee_get_hw_info(d, hw_info); if (ret) goto error; ret = anysee_get_hw_info(d, hw_info); if (ret) goto error; /* * Meaning of these info bytes are guessed. */ dev_info(&d->udev->dev, "%s: firmware version %d.%d hardware id %d\n", KBUILD_MODNAME, hw_info[1], hw_info[2], hw_info[0]); state->hw = hw_info[0]; error: return ret; } /* external I2C gate used for DNOD44CDH086A(TDA18212) tuner module */ static int anysee_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { /* enable / disable tuner access on IOE[4] */ return anysee_wr_reg_mask(fe_to_d(fe), REG_IOE, (enable << 4), 0x10); } static int anysee_frontend_ctrl(struct dvb_frontend *fe, int onoff) { struct anysee_state *state = fe_to_priv(fe); struct dvb_usb_device *d = fe_to_d(fe); int ret; dev_dbg(&d->udev->dev, "%s: fe=%d onoff=%d\n", __func__, fe->id, onoff); /* no frontend sleep control */ if (onoff == 0) return 0; switch (state->hw) { case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ if (fe->id == 0) { /* disable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 0), 0x01); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* enable DVB-C tuner on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 0), 0x01); if (ret) goto error; } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* enable DVB-T tuner on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (0 << 0), 0x01); if (ret) goto error; } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ if (fe->id == 0) { /* disable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 6), 0x40); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* enable IF route on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 0), 0x01); if (ret) goto error; } else { /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 6), 0x40); if (ret) goto error; /* enable IF route on IOE[0] */ ret = anysee_wr_reg_mask(d, REG_IOE, (0 << 0), 0x01); if (ret) goto error; } break; default: ret = 0; } error: return ret; } static int anysee_frontend_attach(struct dvb_usb_adapter *adap) { struct anysee_state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); int ret = 0; u8 tmp; struct i2c_msg msg[2] = { { .addr = anysee_tda18212_config.i2c_address, .flags = 0, .len = 1, .buf = "\x00", }, { .addr = anysee_tda18212_config.i2c_address, .flags = I2C_M_RD, .len = 1, .buf = &tmp, } }; switch (state->hw) { case ANYSEE_HW_507T: /* 2 */ /* E30 */ /* attach demod */ adap->fe[0] = dvb_attach(mt352_attach, &anysee_mt352_config, &d->i2c_adap); if (adap->fe[0]) break; /* attach demod */ adap->fe[0] = dvb_attach(zl10353_attach, &anysee_zl10353_config, &d->i2c_adap); break; case ANYSEE_HW_507CD: /* 6 */ /* E30 Plus */ /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* enable transport stream on IOA[7] */ ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(zl10353_attach, &anysee_zl10353_config, &d->i2c_adap); break; case ANYSEE_HW_507DC: /* 10 */ /* E30 C Plus */ /* enable DVB-C demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_config, &d->i2c_adap, 0x48); break; case ANYSEE_HW_507SI: /* 11 */ /* E30 S2 Plus */ /* enable DVB-S/S2 demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(cx24116_attach, &anysee_cx24116_config, &d->i2c_adap); break; case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ /* enable tuner on IOE[4] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 4), 0x10); if (ret) goto error; /* probe TDA18212 */ tmp = 0; ret = i2c_transfer(&d->i2c_adap, msg, 2); if (ret == 2 && tmp == 0xc7) dev_dbg(&d->udev->dev, "%s: TDA18212 found\n", __func__); else tmp = 0; /* disable tuner on IOE[4] */ ret = anysee_wr_reg_mask(d, REG_IOE, (0 << 4), 0x10); if (ret) goto error; /* disable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 0), 0x01); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ if (tmp == 0xc7) { /* TDA18212 config */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_tda18212_config, &d->i2c_adap, 0x48); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[0]) adap->fe[0]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; } else { /* PLL config */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_config, &d->i2c_adap, 0x48); } /* break out if first frontend attaching fails */ if (!adap->fe[0]) break; /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[0] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 0), 0x01); if (ret) goto error; /* attach demod */ if (tmp == 0xc7) { /* TDA18212 config */ adap->fe[1] = dvb_attach(zl10353_attach, &anysee_zl10353_tda18212_config2, &d->i2c_adap); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[1]) adap->fe[1]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; } else { /* PLL config */ adap->fe[1] = dvb_attach(zl10353_attach, &anysee_zl10353_config, &d->i2c_adap); } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ /* disable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 6), 0x40); if (ret) goto error; /* enable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(tda10023_attach, &anysee_tda10023_tda18212_config, &d->i2c_adap, 0x48); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[0]) adap->fe[0]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; /* break out if first frontend attaching fails */ if (!adap->fe[0]) break; /* disable DVB-C demod on IOD[5] */ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 5), 0x20); if (ret) goto error; /* enable DVB-T demod on IOD[6] */ ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 6), 0x40); if (ret) goto error; /* attach demod */ adap->fe[1] = dvb_attach(zl10353_attach, &anysee_zl10353_tda18212_config, &d->i2c_adap); /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */ if (adap->fe[1]) adap->fe[1]->ops.i2c_gate_ctrl = anysee_i2c_gate_ctrl; state->has_ci = true; break; case ANYSEE_HW_508S2: /* 19 */ case ANYSEE_HW_508PS2: /* 22 */ /* E7 S2 */ /* E7 PS2 */ /* enable DVB-S/S2 demod on IOE[5] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(stv0900_attach, &anysee_stv0900_config, &d->i2c_adap, 0); state->has_ci = true; break; case ANYSEE_HW_508T2C: /* 20 */ /* E7 T2C */ /* enable DVB-T/T2/C demod on IOE[5] */ ret = anysee_wr_reg_mask(d, REG_IOE, (1 << 5), 0x20); if (ret) goto error; /* attach demod */ adap->fe[0] = dvb_attach(cxd2820r_attach, &anysee_cxd2820r_config, &d->i2c_adap, NULL); state->has_ci = true; break; } if (!adap->fe[0]) { /* we have no frontend :-( */ ret = -ENODEV; dev_err(&d->udev->dev, "%s: Unsupported Anysee version. Please report to <linux-media@vger.kernel.org>.\n", KBUILD_MODNAME); } error: return ret; } static int anysee_tuner_attach(struct dvb_usb_adapter *adap) { struct anysee_state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct dvb_frontend *fe; int ret; dev_dbg(&d->udev->dev, "%s:\n", __func__); switch (state->hw) { case ANYSEE_HW_507T: /* 2 */ /* E30 */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc2 >> 1), NULL, DVB_PLL_THOMSON_DTT7579); break; case ANYSEE_HW_507CD: /* 6 */ /* E30 Plus */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc2 >> 1), &d->i2c_adap, DVB_PLL_THOMSON_DTT7579); break; case ANYSEE_HW_507DC: /* 10 */ /* E30 C Plus */ /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); break; case ANYSEE_HW_507SI: /* 11 */ /* E30 S2 Plus */ /* attach LNB controller */ fe = dvb_attach(isl6423_attach, adap->fe[0], &d->i2c_adap, &anysee_isl6423_config); break; case ANYSEE_HW_507FA: /* 15 */ /* E30 Combo Plus */ /* E30 C Plus */ /* Try first attach TDA18212 silicon tuner on IOE[4], if that * fails attach old simple PLL. */ /* attach tuner */ fe = dvb_attach(tda18212_attach, adap->fe[0], &d->i2c_adap, &anysee_tda18212_config); if (fe && adap->fe[1]) { /* attach tuner for 2nd FE */ fe = dvb_attach(tda18212_attach, adap->fe[1], &d->i2c_adap, &anysee_tda18212_config); break; } else if (fe) { break; } /* attach tuner */ fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); if (fe && adap->fe[1]) { /* attach tuner for 2nd FE */ fe = dvb_attach(dvb_pll_attach, adap->fe[1], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); } break; case ANYSEE_HW_508TC: /* 18 */ case ANYSEE_HW_508PTC: /* 21 */ /* E7 TC */ /* E7 PTC */ /* attach tuner */ fe = dvb_attach(tda18212_attach, adap->fe[0], &d->i2c_adap, &anysee_tda18212_config); if (fe) { /* attach tuner for 2nd FE */ fe = dvb_attach(tda18212_attach, adap->fe[1], &d->i2c_adap, &anysee_tda18212_config); } break; case ANYSEE_HW_508S2: /* 19 */ case ANYSEE_HW_508PS2: /* 22 */ /* E7 S2 */ /* E7 PS2 */ /* attach tuner */ fe = dvb_attach(stv6110_attach, adap->fe[0], &anysee_stv6110_config, &d->i2c_adap); if (fe) { /* attach LNB controller */ fe = dvb_attach(isl6423_attach, adap->fe[0], &d->i2c_adap, &anysee_isl6423_config); } break; case ANYSEE_HW_508T2C: /* 20 */ /* E7 T2C */ /* attach tuner */ fe = dvb_attach(tda18212_attach, adap->fe[0], &d->i2c_adap, &anysee_tda18212_config2); break; default: fe = NULL; } if (fe) ret = 0; else ret = -ENODEV; return ret; } #if IS_ENABLED(CONFIG_RC_CORE) static int anysee_rc_query(struct dvb_usb_device *d) { u8 buf[] = {CMD_GET_IR_CODE}; u8 ircode[2]; int ret; /* Remote controller is basic NEC using address byte 0x08. Anysee device RC query returns only two bytes, status and code, address byte is dropped. Also it does not return any value for NEC RCs having address byte other than 0x08. Due to that, we cannot use that device as standard NEC receiver. It could be possible make hack which reads whole code directly from device memory... */ ret = anysee_ctrl_msg(d, buf, sizeof(buf), ircode, sizeof(ircode)); if (ret) return ret; if (ircode[0]) { dev_dbg(&d->udev->dev, "%s: key pressed %02x\n", __func__, ircode[1]); rc_keydown(d->rc_dev, 0x08 << 8 | ircode[1], 0); } return 0; } static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { rc->allowed_protos = RC_BIT_NEC; rc->query = anysee_rc_query; rc->interval = 250; /* windows driver uses 500ms */ return 0; } #else #define anysee_get_rc_config NULL #endif static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot, int addr) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x02, 0x40 | addr >> 8, addr & 0xff, 0x00, 1}; u8 val; ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1); if (ret) return ret; return val; } static int anysee_ci_write_attribute_mem(struct dvb_ca_en50221 *ci, int slot, int addr, u8 val) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x03, 0x40 | addr >> 8, addr & 0xff, 0x00, 1, val}; ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); if (ret) return ret; return 0; } static int anysee_ci_read_cam_control(struct dvb_ca_en50221 *ci, int slot, u8 addr) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x04, 0x40, addr, 0x00, 1}; u8 val; ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1); if (ret) return ret; return val; } static int anysee_ci_write_cam_control(struct dvb_ca_en50221 *ci, int slot, u8 addr, u8 val) { struct dvb_usb_device *d = ci->data; int ret; u8 buf[] = {CMD_CI, 0x05, 0x40, addr, 0x00, 1, val}; ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); if (ret) return ret; return 0; } static int anysee_ci_slot_reset(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; struct anysee_state *state = d_to_priv(d); state->ci_cam_ready = jiffies + msecs_to_jiffies(1000); ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) return ret; msleep(300); ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; return 0; } static int anysee_ci_slot_shutdown(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80); if (ret) return ret; msleep(30); ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; return 0; } static int anysee_ci_slot_ts_enable(struct dvb_ca_en50221 *ci, int slot) { struct dvb_usb_device *d = ci->data; int ret; ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 1), 0x02); if (ret) return ret; return 0; } static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot, int open) { struct dvb_usb_device *d = ci->data; struct anysee_state *state = d_to_priv(d); int ret; u8 tmp = 0; ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40); if (ret) return ret; if (tmp == 0) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT; if (time_after(jiffies, state->ci_cam_ready)) ret |= DVB_CA_EN50221_POLL_CAM_READY; } return ret; } static int anysee_ci_init(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); int ret; state->ci.owner = THIS_MODULE; state->ci.read_attribute_mem = anysee_ci_read_attribute_mem; state->ci.write_attribute_mem = anysee_ci_write_attribute_mem; state->ci.read_cam_control = anysee_ci_read_cam_control; state->ci.write_cam_control = anysee_ci_write_cam_control; state->ci.slot_reset = anysee_ci_slot_reset; state->ci.slot_shutdown = anysee_ci_slot_shutdown; state->ci.slot_ts_enable = anysee_ci_slot_ts_enable; state->ci.poll_slot_status = anysee_ci_poll_slot_status; state->ci.data = d; ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80); if (ret) return ret; ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 2)|(0 << 1)|(0 << 0), 0x07); if (ret) return ret; ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 2)|(1 << 1)|(1 << 0), 0x07); if (ret) return ret; ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1); if (ret) return ret; state->ci_attached = true; return 0; } static void anysee_ci_release(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); /* detach CI */ if (state->ci_attached) dvb_ca_en50221_release(&state->ci); return; } static int anysee_init(struct dvb_usb_device *d) { struct anysee_state *state = d_to_priv(d); int ret; /* There is one interface with two alternate settings. Alternate setting 0 is for bulk transfer. Alternate setting 1 is for isochronous transfer. We use bulk transfer (alternate setting 0). */ ret = usb_set_interface(d->udev, 0, 0); if (ret) return ret; /* LED light */ ret = anysee_led_ctrl(d, 0x01, 0x03); if (ret) return ret; /* enable IR */ ret = anysee_ir_ctrl(d, 1); if (ret) return ret; /* attach CI */ if (state->has_ci) { ret = anysee_ci_init(d); if (ret) return ret; } return 0; } static void anysee_exit(struct dvb_usb_device *d) { return anysee_ci_release(d); } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties anysee_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct anysee_state), .generic_bulk_ctrl_endpoint = 0x01, .generic_bulk_ctrl_endpoint_response = 0x81, .i2c_algo = &anysee_i2c_algo, .read_config = anysee_read_config, .frontend_attach = anysee_frontend_attach, .tuner_attach = anysee_tuner_attach, .init = anysee_init, .get_rc_config = anysee_get_rc_config, .frontend_ctrl = anysee_frontend_ctrl, .streaming_ctrl = anysee_streaming_ctrl, .exit = anysee_exit, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x82, 8, 16 * 512), } } }; static const struct usb_device_id anysee_id_table[] = { { DVB_USB_DEVICE(USB_VID_CYPRESS, USB_PID_ANYSEE, &anysee_props, "Anysee", RC_MAP_ANYSEE) }, { DVB_USB_DEVICE(USB_VID_AMT, USB_PID_ANYSEE, &anysee_props, "Anysee", RC_MAP_ANYSEE) }, { } }; MODULE_DEVICE_TABLE(usb, anysee_id_table); static struct usb_driver anysee_usb_driver = { .name = KBUILD_MODNAME, .id_table = anysee_id_table, .probe = dvb_usbv2_probe, .disconnect = dvb_usbv2_disconnect, .suspend = dvb_usbv2_suspend, .resume = dvb_usbv2_resume, .reset_resume = dvb_usbv2_reset_resume, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(anysee_usb_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0"); MODULE_LICENSE("GPL");
gpl-2.0
ststeiger/pmfs
arch/sh/boards/mach-se/770x/setup.c
2319
4314
/* * linux/arch/sh/boards/se/770x/setup.c * * Copyright (C) 2000 Kazumoto Kojima * * Hitachi SolutionEngine Support. * */ #include <linux/init.h> #include <linux/platform_device.h> #include <mach-se/mach/se.h> #include <mach-se/mach/mrshpc.h> #include <asm/machvec.h> #include <asm/io.h> #include <asm/smc37c93x.h> #include <asm/heartbeat.h> /* * Configure the Super I/O chip */ static void __init smsc_config(int index, int data) { outb_p(index, INDEX_PORT); outb_p(data, DATA_PORT); } /* XXX: Another candidate for a more generic cchip machine vector */ static void __init smsc_setup(char **cmdline_p) { outb_p(CONFIG_ENTER, CONFIG_PORT); outb_p(CONFIG_ENTER, CONFIG_PORT); /* FDC */ smsc_config(CURRENT_LDN_INDEX, LDN_FDC); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IRQ_SELECT_INDEX, 6); /* IRQ6 */ /* AUXIO (GPIO): to use IDE1 */ smsc_config(CURRENT_LDN_INDEX, LDN_AUXIO); smsc_config(GPIO46_INDEX, 0x00); /* nIOROP */ smsc_config(GPIO47_INDEX, 0x00); /* nIOWOP */ /* COM1 */ smsc_config(CURRENT_LDN_INDEX, LDN_COM1); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IO_BASE_HI_INDEX, 0x03); smsc_config(IO_BASE_LO_INDEX, 0xf8); smsc_config(IRQ_SELECT_INDEX, 4); /* IRQ4 */ /* COM2 */ smsc_config(CURRENT_LDN_INDEX, LDN_COM2); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IO_BASE_HI_INDEX, 0x02); smsc_config(IO_BASE_LO_INDEX, 0xf8); smsc_config(IRQ_SELECT_INDEX, 3); /* IRQ3 */ /* RTC */ smsc_config(CURRENT_LDN_INDEX, LDN_RTC); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IRQ_SELECT_INDEX, 8); /* IRQ8 */ /* XXX: PARPORT, KBD, and MOUSE will come here... */ outb_p(CONFIG_EXIT, CONFIG_PORT); } static struct resource cf_ide_resources[] = { [0] = { .start = PA_MRSHPC_IO + 0x1f0, .end = PA_MRSHPC_IO + 0x1f0 + 8, .flags = IORESOURCE_MEM, }, [1] = { .start = PA_MRSHPC_IO + 0x1f0 + 0x206, .end = PA_MRSHPC_IO + 0x1f0 + 8 + 0x206 + 8, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_CFCARD, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cf_ide_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(cf_ide_resources), .resource = cf_ide_resources, }; static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), }; static struct resource heartbeat_resource = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = 1, .resource = &heartbeat_resource, }; #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ defined(CONFIG_CPU_SUBTYPE_SH7712) /* SH771X Ethernet driver */ static struct resource sh_eth0_resources[] = { [0] = { .start = SH_ETH0_BASE, .end = SH_ETH0_BASE + 0x1B8, .flags = IORESOURCE_MEM, }, [1] = { .start = SH_ETH0_IRQ, .end = SH_ETH0_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_eth0_device = { .name = "sh771x-ether", .id = 0, .dev = { .platform_data = PHY_ID, }, .num_resources = ARRAY_SIZE(sh_eth0_resources), .resource = sh_eth0_resources, }; static struct resource sh_eth1_resources[] = { [0] = { .start = SH_ETH1_BASE, .end = SH_ETH1_BASE + 0x1B8, .flags = IORESOURCE_MEM, }, [1] = { .start = SH_ETH1_IRQ, .end = SH_ETH1_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_eth1_device = { .name = "sh771x-ether", .id = 1, .dev = { .platform_data = PHY_ID, }, .num_resources = ARRAY_SIZE(sh_eth1_resources), .resource = sh_eth1_resources, }; #endif static struct platform_device *se_devices[] __initdata = { &heartbeat_device, &cf_ide_device, #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ defined(CONFIG_CPU_SUBTYPE_SH7712) &sh_eth0_device, &sh_eth1_device, #endif }; static int __init se_devices_setup(void) { mrshpc_setup_windows(); return platform_add_devices(se_devices, ARRAY_SIZE(se_devices)); } device_initcall(se_devices_setup); /* * The Machine Vector */ static struct sh_machine_vector mv_se __initmv = { .mv_name = "SolutionEngine", .mv_setup = smsc_setup, .mv_init_irq = init_se_IRQ, };
gpl-2.0
scue/LenovoK860i_4.2_opensource_kernel
arch/powerpc/platforms/cell/beat_interrupt.c
2831
6882
/* * Celleb/Beat Interrupt controller * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/types.h> #include <asm/machdep.h> #include "beat_interrupt.h" #include "beat_wrapper.h" #define MAX_IRQS NR_IRQS static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; static struct irq_host *beatic_host; /* * In this implementation, "virq" == "IRQ plug number", * "(irq_hw_number_t)hwirq" == "IRQ outlet number". */ /* assumption: locked */ static inline void beatic_update_irq_mask(unsigned int irq_plug) { int off; unsigned long masks[4]; off = (irq_plug / 256) * 4; masks[0] = beatic_irq_mask_enable[off + 0] & beatic_irq_mask_ack[off + 0]; masks[1] = beatic_irq_mask_enable[off + 1] & beatic_irq_mask_ack[off + 1]; masks[2] = beatic_irq_mask_enable[off + 2] & beatic_irq_mask_ack[off + 2]; masks[3] = beatic_irq_mask_enable[off + 3] & beatic_irq_mask_ack[off + 3]; if (beat_set_interrupt_mask(irq_plug&~255UL, masks[0], masks[1], masks[2], masks[3]) != 0) panic("Failed to set mask IRQ!"); } static void beatic_mask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_unmask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64)); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_ack_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_end_irq(struct irq_data *d) { s64 err; unsigned long flags; err = beat_downcount_of_interrupt(d->irq); if (err != 0) { if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ panic("Failed to downcount IRQ! Error = %16llx", err); printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq); } raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64)); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static struct irq_chip beatic_pic = { .name = "CELL-BEAT", .irq_unmask = beatic_unmask_irq, .irq_mask = beatic_mask_irq, .irq_eoi = beatic_end_irq, }; /* * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq), * update flags. * * Note that the number (virq) is already assigned at upper layer. */ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) { beat_destruct_irq_plug(virq); } /* * Create or update binding hardware IRQ number (hw) and Virtuql * IRQ number (virq). This is called only once for a given mapping. * * Note that the number (virq) is already assigned at upper layer. */ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { int64_t err; err = beat_construct_and_connect_irq_plug(virq, hw); if (err < 0) return -EIO; irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); return 0; } /* * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), * to pass away to irq_create_mapping(). * * Called from irq_create_of_mapping() only. * Note: We have only 1 entry to translate. */ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { const u64 *intspec2 = (const u64 *)intspec; *out_hwirq = *intspec2; *out_flags |= IRQ_TYPE_LEVEL_LOW; return 0; } static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) { /* Match all */ return 1; } static struct irq_host_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, .match = beatic_pic_host_match, }; /* * Get an IRQ number * Note: returns VIRQ */ static inline unsigned int beatic_get_irq_plug(void) { int i; uint64_t pending[4], ub; for (i = 0; i < MAX_IRQS; i += 256) { beat_detect_pending_interrupts(i, pending); __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[0] & beatic_irq_mask_enable[i/64+0] & beatic_irq_mask_ack[i/64+0])); if (ub != 64) return i + ub + 0; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[1] & beatic_irq_mask_enable[i/64+1] & beatic_irq_mask_ack[i/64+1])); if (ub != 64) return i + ub + 64; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[2] & beatic_irq_mask_enable[i/64+2] & beatic_irq_mask_ack[i/64+2])); if (ub != 64) return i + ub + 128; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[3] & beatic_irq_mask_enable[i/64+3] & beatic_irq_mask_ack[i/64+3])); if (ub != 64) return i + ub + 192; } return NO_IRQ; } unsigned int beatic_get_irq(void) { unsigned int ret; ret = beatic_get_irq_plug(); if (ret != NO_IRQ) beatic_ack_irq(irq_get_irq_data(ret)); return ret; } /* */ void __init beatic_init_IRQ(void) { int i; memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable)); memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack)); for (i = 0; i < MAX_IRQS; i += 256) beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L); /* Set out get_irq function */ ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &beatic_pic_host_ops, 0); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } void beatic_deinit_IRQ(void) { int i; for (i = 1; i < NR_IRQS; i++) beat_destruct_irq_plug(i); }
gpl-2.0
AOSParadox/kernel_msm
arch/arm/mach-exynos/clock-exynos4212.c
4623
2888
/* * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS4212 - Clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <plat/cpu-freq.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/pll.h> #include <plat/s5p-clock.h> #include <plat/clock-clksrc.h> #include <plat/pm.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include "common.h" #include "clock-exynos4.h" #ifdef CONFIG_PM_SLEEP static struct sleep_save exynos4212_clock_save[] = { SAVE_ITEM(EXYNOS4_CLKSRC_IMAGE), SAVE_ITEM(EXYNOS4_CLKDIV_IMAGE), SAVE_ITEM(EXYNOS4212_CLKGATE_IP_IMAGE), SAVE_ITEM(EXYNOS4212_CLKGATE_IP_PERIR), }; #endif static struct clk *clk_src_mpll_user_list[] = { [0] = &clk_fin_mpll, [1] = &exynos4_clk_mout_mpll.clk, }; static struct clksrc_sources clk_src_mpll_user = { .sources = clk_src_mpll_user_list, .nr_sources = ARRAY_SIZE(clk_src_mpll_user_list), }; static struct clksrc_clk clk_mout_mpll_user = { .clk = { .name = "mout_mpll_user", }, .sources = &clk_src_mpll_user, .reg_src = { .reg = EXYNOS4_CLKSRC_CPU, .shift = 24, .size = 1 }, }; static struct clksrc_clk *sysclks[] = { &clk_mout_mpll_user, }; static struct clksrc_clk clksrcs[] = { /* nothing here yet */ }; static struct clk init_clocks_off[] = { /* nothing here yet */ }; #ifdef CONFIG_PM_SLEEP static int exynos4212_clock_suspend(void) { s3c_pm_do_save(exynos4212_clock_save, ARRAY_SIZE(exynos4212_clock_save)); return 0; } static void exynos4212_clock_resume(void) { s3c_pm_do_restore_core(exynos4212_clock_save, ARRAY_SIZE(exynos4212_clock_save)); } #else #define exynos4212_clock_suspend NULL #define exynos4212_clock_resume NULL #endif static struct syscore_ops exynos4212_clock_syscore_ops = { .suspend = exynos4212_clock_suspend, .resume = exynos4212_clock_resume, }; void __init exynos4212_register_clocks(void) { int ptr; /* usbphy1 is removed */ exynos4_clkset_group_list[4] = NULL; /* mout_mpll_user is used */ exynos4_clkset_group_list[6] = &clk_mout_mpll_user.clk; exynos4_clkset_aclk_top_list[0] = &clk_mout_mpll_user.clk; exynos4_clk_mout_mpll.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_mpll.reg_src.shift = 12; exynos4_clk_mout_mpll.reg_src.size = 1; for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); register_syscore_ops(&exynos4212_clock_syscore_ops); }
gpl-2.0
VegaDevTeam/android_kernel_pantech_msm8974
fs/reiserfs/bitmap.c
4879
40167
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ /* Reiserfs block (de)allocator, bitmap-based. */ #include <linux/time.h> #include "reiserfs.h" #include <linux/errno.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #define PREALLOCATION_SIZE 9 /* different reiserfs block allocator options */ #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits) #define _ALLOC_concentrating_formatted_nodes 0 #define _ALLOC_displacing_large_files 1 #define _ALLOC_displacing_new_packing_localities 2 #define _ALLOC_old_hashed_relocation 3 #define _ALLOC_new_hashed_relocation 4 #define _ALLOC_skip_busy 5 #define _ALLOC_displace_based_on_dirid 6 #define _ALLOC_hashed_formatted_nodes 7 #define _ALLOC_old_way 8 #define _ALLOC_hundredth_slices 9 #define _ALLOC_dirid_groups 10 #define _ALLOC_oid_groups 11 #define _ALLOC_packing_groups 12 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s)) #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s)) #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s)) #define SET_OPTION(optname) \ do { \ reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \ set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \ } while(0) #define TEST_OPTION(optname, s) \ test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)) static inline void get_bit_address(struct super_block *s, b_blocknr_t block, unsigned int *bmap_nr, unsigned int *offset) { /* It is in the bitmap block number equal to the block * number divided by the number of bits in a block. */ *bmap_nr = block >> (s->s_blocksize_bits + 3); /* Within that bitmap block it is located at bit offset *offset. */ *offset = block & ((s->s_blocksize << 3) - 1); } int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value) { unsigned int bmap, offset; unsigned int bmap_count = reiserfs_bmap_count(s); if (block == 0 || block >= SB_BLOCK_COUNT(s)) { reiserfs_error(s, "vs-4010", "block number is out of range %lu (%u)", block, SB_BLOCK_COUNT(s)); return 0; } get_bit_address(s, block, &bmap, &offset); /* Old format filesystem? Unlikely, but the bitmaps are all up front so * we need to account for it. */ if (unlikely(test_bit(REISERFS_OLD_FORMAT, &(REISERFS_SB(s)->s_properties)))) { b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1; if (block >= bmap1 && block <= bmap1 + bmap_count) { reiserfs_error(s, "vs-4019", "bitmap block %lu(%u) " "can't be freed or reused", block, bmap_count); return 0; } } else { if (offset == 0) { reiserfs_error(s, "vs-4020", "bitmap block %lu(%u) " "can't be freed or reused", block, bmap_count); return 0; } } if (bmap >= bmap_count) { reiserfs_error(s, "vs-4030", "bitmap for requested block " "is out of range: block=%lu, bitmap_nr=%u", block, bmap); return 0; } if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) { reiserfs_error(s, "vs-4050", "this is root block (%u), " "it must be busy", SB_ROOT_BLOCK(s)); return 0; } return 1; } /* searches in journal structures for a given block number (bmap, off). If block is found in reiserfs journal it suggests next free block candidate to test. */ static inline int is_block_in_journal(struct super_block *s, unsigned int bmap, int off, int *next) { b_blocknr_t tmp; if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) { if (tmp) { /* hint supplied */ *next = tmp; PROC_INFO_INC(s, scan_bitmap.in_journal_hint); } else { (*next) = off + 1; /* inc offset to avoid looping. */ PROC_INFO_INC(s, scan_bitmap.in_journal_nohint); } PROC_INFO_INC(s, scan_bitmap.retry); return 1; } return 0; } /* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap * block; */ static int scan_bitmap_block(struct reiserfs_transaction_handle *th, unsigned int bmap_n, int *beg, int boundary, int min, int max, int unfm) { struct super_block *s = th->t_super; struct reiserfs_bitmap_info *bi = &SB_AP_BITMAP(s)[bmap_n]; struct buffer_head *bh; int end, next; int org = *beg; BUG_ON(!th->t_trans_id); RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of " "range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1); PROC_INFO_INC(s, scan_bitmap.bmap); /* this is unclear and lacks comments, explain how journal bitmaps work here for the reader. Convey a sense of the design here. What is a window? */ /* - I mean `a window of zero bits' as in description of this function - Zam. */ if (!bi) { reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer " "for bitmap %d", bmap_n); return 0; } bh = reiserfs_read_bitmap_block(s, bmap_n); if (bh == NULL) return 0; while (1) { cont: if (bi->free_count < min) { brelse(bh); return 0; // No free blocks in this bitmap } /* search for a first zero bit -- beginning of a window */ *beg = reiserfs_find_next_zero_le_bit ((unsigned long *)(bh->b_data), boundary, *beg); if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block * cannot contain a zero window of minimum size */ brelse(bh); return 0; } if (unfm && is_block_in_journal(s, bmap_n, *beg, beg)) continue; /* first zero bit found; we check next bits */ for (end = *beg + 1;; end++) { if (end >= *beg + max || end >= boundary || reiserfs_test_le_bit(end, bh->b_data)) { next = end; break; } /* finding the other end of zero bit window requires looking into journal structures (in * case of searching for free blocks for unformatted nodes) */ if (unfm && is_block_in_journal(s, bmap_n, end, &next)) break; } /* now (*beg) points to beginning of zero bits window, * (end) points to one bit after the window end */ if (end - *beg >= min) { /* it seems we have found window of proper size */ int i; reiserfs_prepare_for_journal(s, bh, 1); /* try to set all blocks used checking are they still free */ for (i = *beg; i < end; i++) { /* It seems that we should not check in journal again. */ if (reiserfs_test_and_set_le_bit (i, bh->b_data)) { /* bit was set by another process * while we slept in prepare_for_journal() */ PROC_INFO_INC(s, scan_bitmap.stolen); if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks, * if length of this set is more or equal to `min' */ end = i; break; } /* otherwise we clear all bit were set ... */ while (--i >= *beg) reiserfs_clear_le_bit (i, bh->b_data); reiserfs_restore_prepared_buffer(s, bh); *beg = org; /* ... and search again in current block from beginning */ goto cont; } } bi->free_count -= (end - *beg); journal_mark_dirty(th, s, bh); brelse(bh); /* free block count calculation */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg)); journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s)); return end - (*beg); } else { *beg = next; } } } static int bmap_hash_id(struct super_block *s, u32 id) { char *hash_in = NULL; unsigned long hash; unsigned bm; if (id <= 2) { bm = 1; } else { hash_in = (char *)(&id); hash = keyed_hash(hash_in, 4); bm = hash % reiserfs_bmap_count(s); if (!bm) bm = 1; } /* this can only be true when SB_BMAP_NR = 1 */ if (bm >= reiserfs_bmap_count(s)) bm = 0; return bm; } /* * hashes the id and then returns > 0 if the block group for the * corresponding hash is full */ static inline int block_group_used(struct super_block *s, u32 id) { int bm = bmap_hash_id(s, id); struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm]; /* If we don't have cached information on this bitmap block, we're * going to have to load it later anyway. Loading it here allows us * to make a better decision. This favors long-term performance gain * with a better on-disk layout vs. a short term gain of skipping the * read and potentially having a bad placement. */ if (info->free_count == UINT_MAX) { struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm); brelse(bh); } if (info->free_count > ((s->s_blocksize << 3) * 60 / 100)) { return 0; } return 1; } /* * the packing is returned in disk byte order */ __le32 reiserfs_choose_packing(struct inode * dir) { __le32 packing; if (TEST_OPTION(packing_groups, dir->i_sb)) { u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id); /* * some versions of reiserfsck expect packing locality 1 to be * special */ if (parent_dir == 1 || block_group_used(dir->i_sb, parent_dir)) packing = INODE_PKEY(dir)->k_objectid; else packing = INODE_PKEY(dir)->k_dir_id; } else packing = INODE_PKEY(dir)->k_objectid; return packing; } /* Tries to find contiguous zero bit window (given size) in given region of * bitmap and place new blocks there. Returns number of allocated blocks. */ static int scan_bitmap(struct reiserfs_transaction_handle *th, b_blocknr_t * start, b_blocknr_t finish, int min, int max, int unfm, sector_t file_block) { int nr_allocated = 0; struct super_block *s = th->t_super; /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr * - Hans, it is not a block number - Zam. */ unsigned int bm, off; unsigned int end_bm, end_off; unsigned int off_max = s->s_blocksize << 3; BUG_ON(!th->t_trans_id); PROC_INFO_INC(s, scan_bitmap.call); if (SB_FREE_BLOCKS(s) <= 0) return 0; // No point in looking for more free blocks get_bit_address(s, *start, &bm, &off); get_bit_address(s, finish, &end_bm, &end_off); if (bm > reiserfs_bmap_count(s)) return 0; if (end_bm > reiserfs_bmap_count(s)) end_bm = reiserfs_bmap_count(s); /* When the bitmap is more than 10% free, anyone can allocate. * When it's less than 10% free, only files that already use the * bitmap are allowed. Once we pass 80% full, this restriction * is lifted. * * We do this so that files that grow later still have space close to * their original allocation. This improves locality, and presumably * performance as a result. * * This is only an allocation policy and does not make up for getting a * bad hint. Decent hinting must be implemented for this to work well. */ if (TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s) / 20) { for (; bm < end_bm; bm++, off = 0) { if ((off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10) nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm); if (nr_allocated) goto ret; } /* we know from above that start is a reasonable number */ get_bit_address(s, *start, &bm, &off); } for (; bm < end_bm; bm++, off = 0) { nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm); if (nr_allocated) goto ret; } nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm); ret: *start = bm * off_max + off; return nr_allocated; } static void _reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *inode, b_blocknr_t block, int for_unformatted) { struct super_block *s = th->t_super; struct reiserfs_super_block *rs; struct buffer_head *sbh, *bmbh; struct reiserfs_bitmap_info *apbi; unsigned int nr, offset; BUG_ON(!th->t_trans_id); PROC_INFO_INC(s, free_block); rs = SB_DISK_SUPER_BLOCK(s); sbh = SB_BUFFER_WITH_SB(s); apbi = SB_AP_BITMAP(s); get_bit_address(s, block, &nr, &offset); if (nr >= reiserfs_bmap_count(s)) { reiserfs_error(s, "vs-4075", "block %lu is out of range", block); return; } bmbh = reiserfs_read_bitmap_block(s, nr); if (!bmbh) return; reiserfs_prepare_for_journal(s, bmbh, 1); /* clear bit for the given block in bit map */ if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) { reiserfs_error(s, "vs-4080", "block %lu: bit already cleared", block); } apbi[nr].free_count++; journal_mark_dirty(th, s, bmbh); brelse(bmbh); reiserfs_prepare_for_journal(s, sbh, 1); /* update super block */ set_sb_free_blocks(rs, sb_free_blocks(rs) + 1); journal_mark_dirty(th, s, sbh); if (for_unformatted) dquot_free_block_nodirty(inode, 1); } void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *inode, b_blocknr_t block, int for_unformatted) { struct super_block *s = th->t_super; BUG_ON(!th->t_trans_id); RFALSE(!s, "vs-4061: trying to free block on nonexistent device"); if (!is_reusable(s, block, 1)) return; if (block > sb_block_count(REISERFS_SB(s)->s_rs)) { reiserfs_error(th->t_super, "bitmap-4072", "Trying to free block outside file system " "boundaries (%lu > %lu)", block, sb_block_count(REISERFS_SB(s)->s_rs)); return; } /* mark it before we clear it, just in case */ journal_mark_freed(th, s, block); _reiserfs_free_block(th, inode, block, for_unformatted); } /* preallocated blocks don't need to be run through journal_mark_freed */ static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle *th, struct inode *inode, b_blocknr_t block) { BUG_ON(!th->t_trans_id); RFALSE(!th->t_super, "vs-4060: trying to free block on nonexistent device"); if (!is_reusable(th->t_super, block, 1)) return; _reiserfs_free_block(th, inode, block, 1); } static void __discard_prealloc(struct reiserfs_transaction_handle *th, struct reiserfs_inode_info *ei) { unsigned long save = ei->i_prealloc_block; int dirty = 0; struct inode *inode = &ei->vfs_inode; BUG_ON(!th->t_trans_id); #ifdef CONFIG_REISERFS_CHECK if (ei->i_prealloc_count < 0) reiserfs_error(th->t_super, "zam-4001", "inode has negative prealloc blocks count."); #endif while (ei->i_prealloc_count > 0) { reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block); ei->i_prealloc_block++; ei->i_prealloc_count--; dirty = 1; } if (dirty) reiserfs_update_sd(th, inode); ei->i_prealloc_block = save; list_del_init(&(ei->i_prealloc_list)); } /* FIXME: It should be inline function */ void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th, struct inode *inode) { struct reiserfs_inode_info *ei = REISERFS_I(inode); BUG_ON(!th->t_trans_id); if (ei->i_prealloc_count) __discard_prealloc(th, ei); } void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th) { struct list_head *plist = &SB_JOURNAL(th->t_super)->j_prealloc_list; BUG_ON(!th->t_trans_id); while (!list_empty(plist)) { struct reiserfs_inode_info *ei; ei = list_entry(plist->next, struct reiserfs_inode_info, i_prealloc_list); #ifdef CONFIG_REISERFS_CHECK if (!ei->i_prealloc_count) { reiserfs_error(th->t_super, "zam-4001", "inode is in prealloc list but has " "no preallocated blocks."); } #endif __discard_prealloc(th, ei); } } void reiserfs_init_alloc_options(struct super_block *s) { set_bit(_ALLOC_skip_busy, &SB_ALLOC_OPTS(s)); set_bit(_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s)); set_bit(_ALLOC_packing_groups, &SB_ALLOC_OPTS(s)); } /* block allocator related options are parsed here */ int reiserfs_parse_alloc_options(struct super_block *s, char *options) { char *this_char, *value; REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */ while ((this_char = strsep(&options, ":")) != NULL) { if ((value = strchr(this_char, '=')) != NULL) *value++ = 0; if (!strcmp(this_char, "concentrating_formatted_nodes")) { int temp; SET_OPTION(concentrating_formatted_nodes); temp = (value && *value) ? simple_strtoul(value, &value, 0) : 10; if (temp <= 0 || temp > 100) { REISERFS_SB(s)->s_alloc_options.border = 10; } else { REISERFS_SB(s)->s_alloc_options.border = 100 / temp; } continue; } if (!strcmp(this_char, "displacing_large_files")) { SET_OPTION(displacing_large_files); REISERFS_SB(s)->s_alloc_options.large_file_size = (value && *value) ? simple_strtoul(value, &value, 0) : 16; continue; } if (!strcmp(this_char, "displacing_new_packing_localities")) { SET_OPTION(displacing_new_packing_localities); continue; }; if (!strcmp(this_char, "old_hashed_relocation")) { SET_OPTION(old_hashed_relocation); continue; } if (!strcmp(this_char, "new_hashed_relocation")) { SET_OPTION(new_hashed_relocation); continue; } if (!strcmp(this_char, "dirid_groups")) { SET_OPTION(dirid_groups); continue; } if (!strcmp(this_char, "oid_groups")) { SET_OPTION(oid_groups); continue; } if (!strcmp(this_char, "packing_groups")) { SET_OPTION(packing_groups); continue; } if (!strcmp(this_char, "hashed_formatted_nodes")) { SET_OPTION(hashed_formatted_nodes); continue; } if (!strcmp(this_char, "skip_busy")) { SET_OPTION(skip_busy); continue; } if (!strcmp(this_char, "hundredth_slices")) { SET_OPTION(hundredth_slices); continue; } if (!strcmp(this_char, "old_way")) { SET_OPTION(old_way); continue; } if (!strcmp(this_char, "displace_based_on_dirid")) { SET_OPTION(displace_based_on_dirid); continue; } if (!strcmp(this_char, "preallocmin")) { REISERFS_SB(s)->s_alloc_options.preallocmin = (value && *value) ? simple_strtoul(value, &value, 0) : 4; continue; } if (!strcmp(this_char, "preallocsize")) { REISERFS_SB(s)->s_alloc_options.preallocsize = (value && *value) ? simple_strtoul(value, &value, 0) : PREALLOCATION_SIZE; continue; } reiserfs_warning(s, "zam-4001", "unknown option - %s", this_char); return 1; } reiserfs_info(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s)); return 0; } static void print_sep(struct seq_file *seq, int *first) { if (!*first) seq_puts(seq, ":"); else *first = 0; } void show_alloc_options(struct seq_file *seq, struct super_block *s) { int first = 1; if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) | (1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups))) return; seq_puts(seq, ",alloc="); if (TEST_OPTION(concentrating_formatted_nodes, s)) { print_sep(seq, &first); if (REISERFS_SB(s)->s_alloc_options.border != 10) { seq_printf(seq, "concentrating_formatted_nodes=%d", 100 / REISERFS_SB(s)->s_alloc_options.border); } else seq_puts(seq, "concentrating_formatted_nodes"); } if (TEST_OPTION(displacing_large_files, s)) { print_sep(seq, &first); if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) { seq_printf(seq, "displacing_large_files=%lu", REISERFS_SB(s)->s_alloc_options.large_file_size); } else seq_puts(seq, "displacing_large_files"); } if (TEST_OPTION(displacing_new_packing_localities, s)) { print_sep(seq, &first); seq_puts(seq, "displacing_new_packing_localities"); } if (TEST_OPTION(old_hashed_relocation, s)) { print_sep(seq, &first); seq_puts(seq, "old_hashed_relocation"); } if (TEST_OPTION(new_hashed_relocation, s)) { print_sep(seq, &first); seq_puts(seq, "new_hashed_relocation"); } if (TEST_OPTION(dirid_groups, s)) { print_sep(seq, &first); seq_puts(seq, "dirid_groups"); } if (TEST_OPTION(oid_groups, s)) { print_sep(seq, &first); seq_puts(seq, "oid_groups"); } if (TEST_OPTION(packing_groups, s)) { print_sep(seq, &first); seq_puts(seq, "packing_groups"); } if (TEST_OPTION(hashed_formatted_nodes, s)) { print_sep(seq, &first); seq_puts(seq, "hashed_formatted_nodes"); } if (TEST_OPTION(skip_busy, s)) { print_sep(seq, &first); seq_puts(seq, "skip_busy"); } if (TEST_OPTION(hundredth_slices, s)) { print_sep(seq, &first); seq_puts(seq, "hundredth_slices"); } if (TEST_OPTION(old_way, s)) { print_sep(seq, &first); seq_puts(seq, "old_way"); } if (TEST_OPTION(displace_based_on_dirid, s)) { print_sep(seq, &first); seq_puts(seq, "displace_based_on_dirid"); } if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) { print_sep(seq, &first); seq_printf(seq, "preallocmin=%d", REISERFS_SB(s)->s_alloc_options.preallocmin); } if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) { print_sep(seq, &first); seq_printf(seq, "preallocsize=%d", REISERFS_SB(s)->s_alloc_options.preallocsize); } } static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint) { char *hash_in; if (hint->formatted_node) { hash_in = (char *)&hint->key.k_dir_id; } else { if (!hint->inode) { //hint->search_start = hint->beg; hash_in = (char *)&hint->key.k_dir_id; } else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super)) hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id); else hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid); } hint->search_start = hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg); } /* * Relocation based on dirid, hashing them into a given bitmap block * files. Formatted nodes are unaffected, a separate policy covers them */ static void dirid_groups(reiserfs_blocknr_hint_t * hint) { unsigned long hash; __u32 dirid = 0; int bm = 0; struct super_block *sb = hint->th->t_super; if (hint->inode) dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id); else if (hint->formatted_node) dirid = hint->key.k_dir_id; if (dirid) { bm = bmap_hash_id(sb, dirid); hash = bm * (sb->s_blocksize << 3); /* give a portion of the block group to metadata */ if (hint->inode) hash += sb->s_blocksize / 2; hint->search_start = hash; } } /* * Relocation based on oid, hashing them into a given bitmap block * files. Formatted nodes are unaffected, a separate policy covers them */ static void oid_groups(reiserfs_blocknr_hint_t * hint) { if (hint->inode) { unsigned long hash; __u32 oid; __u32 dirid; int bm; dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id); /* keep the root dir and it's first set of subdirs close to * the start of the disk */ if (dirid <= 2) hash = (hint->inode->i_sb->s_blocksize << 3); else { oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid); bm = bmap_hash_id(hint->inode->i_sb, oid); hash = bm * (hint->inode->i_sb->s_blocksize << 3); } hint->search_start = hash; } } /* returns 1 if it finds an indirect item and gets valid hint info * from it, otherwise 0 */ static int get_left_neighbor(reiserfs_blocknr_hint_t * hint) { struct treepath *path; struct buffer_head *bh; struct item_head *ih; int pos_in_item; __le32 *item; int ret = 0; if (!hint->path) /* reiserfs code can call this function w/o pointer to path * structure supplied; then we rely on supplied search_start */ return 0; path = hint->path; bh = get_last_bh(path); RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor"); ih = get_ih(path); pos_in_item = path->pos_in_item; item = get_item(path); hint->search_start = bh->b_blocknr; if (!hint->formatted_node && is_indirect_le_ih(ih)) { /* for indirect item: go to left and look for the first non-hole entry in the indirect item */ if (pos_in_item == I_UNFM_NUM(ih)) pos_in_item--; // pos_in_item = I_UNFM_NUM (ih) - 1; while (pos_in_item >= 0) { int t = get_block_num(item, pos_in_item); if (t) { hint->search_start = t; ret = 1; break; } pos_in_item--; } } /* does result value fit into specified region? */ return ret; } /* should be, if formatted node, then try to put on first part of the device specified as number of percent with mount option device, else try to put on last of device. This is not to say it is good code to do so, but the effect should be measured. */ static inline void set_border_in_hint(struct super_block *s, reiserfs_blocknr_hint_t * hint) { b_blocknr_t border = SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border; if (hint->formatted_node) hint->end = border - 1; else hint->beg = border; } static inline void displace_large_file(reiserfs_blocknr_hint_t * hint) { if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super)) hint->search_start = hint->beg + keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id), 4) % (hint->end - hint->beg); else hint->search_start = hint->beg + keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid), 4) % (hint->end - hint->beg); } static inline void hash_formatted_node(reiserfs_blocknr_hint_t * hint) { char *hash_in; if (!hint->inode) hash_in = (char *)&hint->key.k_dir_id; else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super)) hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id); else hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid); hint->search_start = hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg); } static inline int this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t * hint) { return hint->block == REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size; } #ifdef DISPLACE_NEW_PACKING_LOCALITIES static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t * hint) { struct in_core_key *key = &hint->key; hint->th->displace_new_blocks = 0; hint->search_start = hint->beg + keyed_hash((char *)(&key->k_objectid), 4) % (hint->end - hint->beg); } #endif static inline int old_hashed_relocation(reiserfs_blocknr_hint_t * hint) { b_blocknr_t border; u32 hash_in; if (hint->formatted_node || hint->inode == NULL) { return 0; } hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id); border = hint->beg + (u32) keyed_hash(((char *)(&hash_in)), 4) % (hint->end - hint->beg - 1); if (border > hint->search_start) hint->search_start = border; return 1; } static inline int old_way(reiserfs_blocknr_hint_t * hint) { b_blocknr_t border; if (hint->formatted_node || hint->inode == NULL) { return 0; } border = hint->beg + le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end - hint->beg); if (border > hint->search_start) hint->search_start = border; return 1; } static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint) { struct in_core_key *key = &hint->key; b_blocknr_t slice_start; slice_start = (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100); if (slice_start > hint->search_start || slice_start + (hint->end / 100) <= hint->search_start) { hint->search_start = slice_start; } } static void determine_search_start(reiserfs_blocknr_hint_t * hint, int amount_needed) { struct super_block *s = hint->th->t_super; int unfm_hint; hint->beg = 0; hint->end = SB_BLOCK_COUNT(s) - 1; /* This is former border algorithm. Now with tunable border offset */ if (concentrating_formatted_nodes(s)) set_border_in_hint(s, hint); #ifdef DISPLACE_NEW_PACKING_LOCALITIES /* whenever we create a new directory, we displace it. At first we will hash for location, later we might look for a moderately empty place for it */ if (displacing_new_packing_localities(s) && hint->th->displace_new_blocks) { displace_new_packing_locality(hint); /* we do not continue determine_search_start, * if new packing locality is being displaced */ return; } #endif /* all persons should feel encouraged to add more special cases here and * test them */ if (displacing_large_files(s) && !hint->formatted_node && this_blocknr_allocation_would_make_it_a_large_file(hint)) { displace_large_file(hint); return; } /* if none of our special cases is relevant, use the left neighbor in the tree order of the new node we are allocating for */ if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) { hash_formatted_node(hint); return; } unfm_hint = get_left_neighbor(hint); /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation, new blocks are displaced based on directory ID. Also, if suggested search_start is less than last preallocated block, we start searching from it, assuming that HDD dataflow is faster in forward direction */ if (TEST_OPTION(old_way, s)) { if (!hint->formatted_node) { if (!reiserfs_hashed_relocation(s)) old_way(hint); else if (!reiserfs_no_unhashed_relocation(s)) old_hashed_relocation(hint); if (hint->inode && hint->search_start < REISERFS_I(hint->inode)->i_prealloc_block) hint->search_start = REISERFS_I(hint->inode)->i_prealloc_block; } return; } /* This is an approach proposed by Hans */ if (TEST_OPTION(hundredth_slices, s) && !(displacing_large_files(s) && !hint->formatted_node)) { hundredth_slices(hint); return; } /* old_hashed_relocation only works on unformatted */ if (!unfm_hint && !hint->formatted_node && TEST_OPTION(old_hashed_relocation, s)) { old_hashed_relocation(hint); } /* new_hashed_relocation works with both formatted/unformatted nodes */ if ((!unfm_hint || hint->formatted_node) && TEST_OPTION(new_hashed_relocation, s)) { new_hashed_relocation(hint); } /* dirid grouping works only on unformatted nodes */ if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) { dirid_groups(hint); } #ifdef DISPLACE_NEW_PACKING_LOCALITIES if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) { dirid_groups(hint); } #endif /* oid grouping works only on unformatted nodes */ if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) { oid_groups(hint); } return; } static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint) { /* make minimum size a mount option and benchmark both ways */ /* we preallocate blocks only for regular files, specific size */ /* benchmark preallocating always and see what happens */ hint->prealloc_size = 0; if (!hint->formatted_node && hint->preallocate) { if (S_ISREG(hint->inode->i_mode) && hint->inode->i_size >= REISERFS_SB(hint->th->t_super)->s_alloc_options. preallocmin * hint->inode->i_sb->s_blocksize) hint->prealloc_size = REISERFS_SB(hint->th->t_super)->s_alloc_options. preallocsize - 1; } return CARRY_ON; } /* XXX I know it could be merged with upper-level function; but may be result function would be too complex. */ static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, b_blocknr_t start, b_blocknr_t finish, int min, int amount_needed, int prealloc_size) { int rest = amount_needed; int nr_allocated; while (rest > 0 && start <= finish) { nr_allocated = scan_bitmap(hint->th, &start, finish, min, rest + prealloc_size, !hint->formatted_node, hint->block); if (nr_allocated == 0) /* no new blocks allocated, return */ break; /* fill free_blocknrs array first */ while (rest > 0 && nr_allocated > 0) { *new_blocknrs++ = start++; rest--; nr_allocated--; } /* do we have something to fill prealloc. array also ? */ if (nr_allocated > 0) { /* it means prealloc_size was greater that 0 and we do preallocation */ list_add(&REISERFS_I(hint->inode)->i_prealloc_list, &SB_JOURNAL(hint->th->t_super)-> j_prealloc_list); REISERFS_I(hint->inode)->i_prealloc_block = start; REISERFS_I(hint->inode)->i_prealloc_count = nr_allocated; break; } } return (amount_needed - rest); } static inline int blocknrs_and_prealloc_arrays_from_search_start (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed) { struct super_block *s = hint->th->t_super; b_blocknr_t start = hint->search_start; b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1; int passno = 0; int nr_allocated = 0; determine_prealloc_size(hint); if (!hint->formatted_node) { int quota_ret; #ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: allocating %d blocks id=%u", amount_needed, hint->inode->i_uid); #endif quota_ret = dquot_alloc_block_nodirty(hint->inode, amount_needed); if (quota_ret) /* Quota exceeded? */ return QUOTA_EXCEEDED; if (hint->preallocate && hint->prealloc_size) { #ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: allocating (prealloc) %d blocks id=%u", hint->prealloc_size, hint->inode->i_uid); #endif quota_ret = dquot_prealloc_block_nodirty(hint->inode, hint->prealloc_size); if (quota_ret) hint->preallocate = hint->prealloc_size = 0; } /* for unformatted nodes, force large allocations */ } do { switch (passno++) { case 0: /* Search from hint->search_start to end of disk */ start = hint->search_start; finish = SB_BLOCK_COUNT(s) - 1; break; case 1: /* Search from hint->beg to hint->search_start */ start = hint->beg; finish = hint->search_start; break; case 2: /* Last chance: Search from 0 to hint->beg */ start = 0; finish = hint->beg; break; default: /* We've tried searching everywhere, not enough space */ /* Free the blocks */ if (!hint->formatted_node) { #ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: freeing (nospace) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated, hint->inode->i_uid); #endif /* Free not allocated blocks */ dquot_free_block_nodirty(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); } while (nr_allocated--) reiserfs_free_block(hint->th, hint->inode, new_blocknrs[nr_allocated], !hint->formatted_node); return NO_DISK_SPACE; } } while ((nr_allocated += allocate_without_wrapping_disk(hint, new_blocknrs + nr_allocated, start, finish, 1, amount_needed - nr_allocated, hint-> prealloc_size)) < amount_needed); if (!hint->formatted_node && amount_needed + hint->prealloc_size > nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) { /* Some of preallocation blocks were not allocated */ #ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: freeing (failed prealloc) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)->i_prealloc_count, hint->inode->i_uid); #endif dquot_free_block_nodirty(hint->inode, amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)-> i_prealloc_count); } return CARRY_ON; } /* grab new blocknrs from preallocated list */ /* return amount still needed after using them */ static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed) { struct inode *inode = hint->inode; if (REISERFS_I(inode)->i_prealloc_count > 0) { while (amount_needed) { *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++; REISERFS_I(inode)->i_prealloc_count--; amount_needed--; if (REISERFS_I(inode)->i_prealloc_count <= 0) { list_del(&REISERFS_I(inode)->i_prealloc_list); break; } } } /* return amount still needed after using preallocated blocks */ return amount_needed; } int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have already reserved */ ) { int initial_amount_needed = amount_needed; int ret; struct super_block *s = hint->th->t_super; /* Check if there is enough space, taking into account reserved space */ if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks < amount_needed - reserved_by_us) return NO_DISK_SPACE; /* should this be if !hint->inode && hint->preallocate? */ /* do you mean hint->formatted_node can be removed ? - Zam */ /* hint->formatted_node cannot be removed because we try to access inode information here, and there is often no inode assotiated with metadata allocations - green */ if (!hint->formatted_node && hint->preallocate) { amount_needed = use_preallocated_list_if_available (hint, new_blocknrs, amount_needed); if (amount_needed == 0) /* all blocknrs we need we got from prealloc. list */ return CARRY_ON; new_blocknrs += (initial_amount_needed - amount_needed); } /* find search start and save it in hint structure */ determine_search_start(hint, amount_needed); if (hint->search_start >= SB_BLOCK_COUNT(s)) hint->search_start = SB_BLOCK_COUNT(s) - 1; /* allocation itself; fill new_blocknrs and preallocation arrays */ ret = blocknrs_and_prealloc_arrays_from_search_start (hint, new_blocknrs, amount_needed); /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second * variant) */ if (ret != CARRY_ON) { while (amount_needed++ < initial_amount_needed) { reiserfs_free_block(hint->th, hint->inode, *(--new_blocknrs), 1); } } return ret; } void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info) { unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size); /* The first bit must ALWAYS be 1 */ if (!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data)) reiserfs_error(sb, "reiserfs-2025", "bitmap block %lu is " "corrupted: first bit must be 1", bh->b_blocknr); info->free_count = 0; while (--cur >= (unsigned long *)bh->b_data) { /* 0 and ~0 are special, we can optimize for them */ if (*cur == 0) info->free_count += BITS_PER_LONG; else if (*cur != ~0L) /* A mix, investigate */ info->free_count += BITS_PER_LONG - hweight_long(*cur); } } struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb, unsigned int bitmap) { b_blocknr_t block = (sb->s_blocksize << 3) * bitmap; struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap; struct buffer_head *bh; /* Way old format filesystems had the bitmaps packed up front. * I doubt there are any of these left, but just in case... */ if (unlikely(test_bit(REISERFS_OLD_FORMAT, &(REISERFS_SB(sb)->s_properties)))) block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap; else if (bitmap == 0) block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; reiserfs_write_unlock(sb); bh = sb_bread(sb, block); reiserfs_write_lock(sb); if (bh == NULL) reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) " "reading failed", __func__, block); else { if (buffer_locked(bh)) { PROC_INFO_INC(sb, scan_bitmap.wait); reiserfs_write_unlock(sb); __wait_on_buffer(bh); reiserfs_write_lock(sb); } BUG_ON(!buffer_uptodate(bh)); BUG_ON(atomic_read(&bh->b_count) == 0); if (info->free_count == UINT_MAX) reiserfs_cache_bitmap_metadata(sb, bh, info); } return bh; } int reiserfs_init_bitmap_cache(struct super_block *sb) { struct reiserfs_bitmap_info *bitmap; unsigned int bmap_nr = reiserfs_bmap_count(sb); bitmap = vmalloc(sizeof(*bitmap) * bmap_nr); if (bitmap == NULL) return -ENOMEM; memset(bitmap, 0xff, sizeof(*bitmap) * bmap_nr); SB_AP_BITMAP(sb) = bitmap; return 0; } void reiserfs_free_bitmap_cache(struct super_block *sb) { if (SB_AP_BITMAP(sb)) { vfree(SB_AP_BITMAP(sb)); SB_AP_BITMAP(sb) = NULL; } }
gpl-2.0
meefik/tinykernel-flo
drivers/staging/iio/accel/adis16209_ring.c
4879
3569
#include <linux/export.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include "../iio.h" #include "../ring_sw.h" #include "../trigger_consumer.h" #include "adis16209.h" /** * adis16209_read_ring_data() read data registers which will be placed into ring * @dev: device associated with child of actual device (iio_dev or iio_trig) * @rx: somewhere to pass back the value read **/ static int adis16209_read_ring_data(struct device *dev, u8 *rx) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16209_state *st = iio_priv(indio_dev); struct spi_transfer xfers[ADIS16209_OUTPUTS + 1]; int ret; int i; mutex_lock(&st->buf_lock); spi_message_init(&msg); memset(xfers, 0, sizeof(xfers)); for (i = 0; i <= ADIS16209_OUTPUTS; i++) { xfers[i].bits_per_word = 8; xfers[i].cs_change = 1; xfers[i].len = 2; xfers[i].delay_usecs = 30; xfers[i].tx_buf = st->tx + 2 * i; st->tx[2 * i] = ADIS16209_READ_REG(ADIS16209_SUPPLY_OUT + 2 * i); st->tx[2 * i + 1] = 0; if (i >= 1) xfers[i].rx_buf = rx + 2 * (i - 1); spi_message_add_tail(&xfers[i], &msg); } ret = spi_sync(st->us, &msg); if (ret) dev_err(&st->us->dev, "problem when burst reading"); mutex_unlock(&st->buf_lock); return ret; } /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device * specific to be rolled into the core. */ static irqreturn_t adis16209_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis16209_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; int i = 0; s16 *data; size_t datasize = ring->access->get_bytes_per_datum(ring); data = kmalloc(datasize , GFP_KERNEL); if (data == NULL) { dev_err(&st->us->dev, "memory alloc failed in ring bh"); return -ENOMEM; } if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) && adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0) for (; i < bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); i++) data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2])); /* Guaranteed to be aligned with 8 byte boundary */ if (ring->scan_timestamp) *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; ring->access->store_to(ring, (u8 *)data, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); kfree(data); return IRQ_HANDLED; } void adis16209_unconfigure_ring(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->buffer); } static const struct iio_buffer_setup_ops adis16209_ring_setup_ops = { .preenable = &iio_sw_buffer_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, }; int adis16209_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; ring->scan_timestamp = true; indio_dev->setup_ops = &adis16209_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16209_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->buffer); return ret; }
gpl-2.0
Octo-Kat/platform_kernel_samsung_d2-old
drivers/staging/iio/dds/ad9834.c
4879
12011
/* * AD9833/AD9834/AD9837/AD9838 SPI DDS driver * * Copyright 2010-2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/module.h> #include <asm/div64.h> #include "../iio.h" #include "../sysfs.h" #include "dds.h" #include "ad9834.h" static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout) { unsigned long long freqreg = (u64) fout * (u64) (1 << AD9834_FREQ_BITS); do_div(freqreg, mclk); return freqreg; } static int ad9834_write_frequency(struct ad9834_state *st, unsigned long addr, unsigned long fout) { unsigned long regval; if (fout > (st->mclk / 2)) return -EINVAL; regval = ad9834_calc_freqreg(st->mclk, fout); st->freq_data[0] = cpu_to_be16(addr | (regval & RES_MASK(AD9834_FREQ_BITS / 2))); st->freq_data[1] = cpu_to_be16(addr | ((regval >> (AD9834_FREQ_BITS / 2)) & RES_MASK(AD9834_FREQ_BITS / 2))); return spi_sync(st->spi, &st->freq_msg); } static int ad9834_write_phase(struct ad9834_state *st, unsigned long addr, unsigned long phase) { if (phase > (1 << AD9834_PHASE_BITS)) return -EINVAL; st->data = cpu_to_be16(addr | phase); return spi_sync(st->spi, &st->msg); } static ssize_t ad9834_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; long val; ret = strict_strtoul(buf, 10, &val); if (ret) goto error_ret; mutex_lock(&indio_dev->mlock); switch ((u32) this_attr->address) { case AD9834_REG_FREQ0: case AD9834_REG_FREQ1: ret = ad9834_write_frequency(st, this_attr->address, val); break; case AD9834_REG_PHASE0: case AD9834_REG_PHASE1: ret = ad9834_write_phase(st, this_attr->address, val); break; case AD9834_OPBITEN: if (st->control & AD9834_MODE) { ret = -EINVAL; /* AD9843 reserved mode */ break; } if (val) st->control |= AD9834_OPBITEN; else st->control &= ~AD9834_OPBITEN; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; case AD9834_PIN_SW: if (val) st->control |= AD9834_PIN_SW; else st->control &= ~AD9834_PIN_SW; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; case AD9834_FSEL: case AD9834_PSEL: if (val == 0) st->control &= ~(this_attr->address | AD9834_PIN_SW); else if (val == 1) { st->control |= this_attr->address; st->control &= ~AD9834_PIN_SW; } else { ret = -EINVAL; break; } st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; case AD9834_RESET: if (val) st->control &= ~AD9834_RESET; else st->control |= AD9834_RESET; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; default: ret = -ENODEV; } mutex_unlock(&indio_dev->mlock); error_ret: return ret ? ret : len; } static ssize_t ad9834_store_wavetype(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret = 0; bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837); mutex_lock(&indio_dev->mlock); switch ((u32) this_attr->address) { case 0: if (sysfs_streq(buf, "sine")) { st->control &= ~AD9834_MODE; if (is_ad9833_7) st->control &= ~AD9834_OPBITEN; } else if (sysfs_streq(buf, "triangle")) { if (is_ad9833_7) { st->control &= ~AD9834_OPBITEN; st->control |= AD9834_MODE; } else if (st->control & AD9834_OPBITEN) { ret = -EINVAL; /* AD9843 reserved mode */ } else { st->control |= AD9834_MODE; } } else if (is_ad9833_7 && sysfs_streq(buf, "square")) { st->control &= ~AD9834_MODE; st->control |= AD9834_OPBITEN; } else { ret = -EINVAL; } break; case 1: if (sysfs_streq(buf, "square") && !(st->control & AD9834_MODE)) { st->control &= ~AD9834_MODE; st->control |= AD9834_OPBITEN; } else { ret = -EINVAL; } break; default: ret = -EINVAL; break; } if (!ret) { st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); } mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } static ssize_t ad9834_show_out0_wavetype_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); char *str; if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) str = "sine triangle square"; else if (st->control & AD9834_OPBITEN) str = "sine"; else str = "sine triangle"; return sprintf(buf, "%s\n", str); } static IIO_DEVICE_ATTR(dds0_out0_wavetype_available, S_IRUGO, ad9834_show_out0_wavetype_available, NULL, 0); static ssize_t ad9834_show_out1_wavetype_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); char *str; if (st->control & AD9834_MODE) str = ""; else str = "square"; return sprintf(buf, "%s\n", str); } static IIO_DEVICE_ATTR(dds0_out1_wavetype_available, S_IRUGO, ad9834_show_out1_wavetype_available, NULL, 0); /** * see dds.h for further information */ static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0); static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1); static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL); static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */ static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0); static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1); static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL); static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/ static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL, ad9834_write, AD9834_PIN_SW); static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET); static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_OPBITEN); static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0); static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1); static struct attribute *ad9834_attributes[] = { &iio_dev_attr_dds0_freq0.dev_attr.attr, &iio_dev_attr_dds0_freq1.dev_attr.attr, &iio_const_attr_dds0_freq_scale.dev_attr.attr, &iio_dev_attr_dds0_phase0.dev_attr.attr, &iio_dev_attr_dds0_phase1.dev_attr.attr, &iio_const_attr_dds0_phase_scale.dev_attr.attr, &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr, &iio_dev_attr_dds0_freqsymbol.dev_attr.attr, &iio_dev_attr_dds0_phasesymbol.dev_attr.attr, &iio_dev_attr_dds0_out_enable.dev_attr.attr, &iio_dev_attr_dds0_out1_enable.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr, &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr, &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr, NULL, }; static struct attribute *ad9833_attributes[] = { &iio_dev_attr_dds0_freq0.dev_attr.attr, &iio_dev_attr_dds0_freq1.dev_attr.attr, &iio_const_attr_dds0_freq_scale.dev_attr.attr, &iio_dev_attr_dds0_phase0.dev_attr.attr, &iio_dev_attr_dds0_phase1.dev_attr.attr, &iio_const_attr_dds0_phase_scale.dev_attr.attr, &iio_dev_attr_dds0_freqsymbol.dev_attr.attr, &iio_dev_attr_dds0_phasesymbol.dev_attr.attr, &iio_dev_attr_dds0_out_enable.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr, NULL, }; static const struct attribute_group ad9834_attribute_group = { .attrs = ad9834_attributes, }; static const struct attribute_group ad9833_attribute_group = { .attrs = ad9833_attributes, }; static const struct iio_info ad9834_info = { .attrs = &ad9834_attribute_group, .driver_module = THIS_MODULE, }; static const struct iio_info ad9833_info = { .attrs = &ad9833_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad9834_probe(struct spi_device *spi) { struct ad9834_platform_data *pdata = spi->dev.platform_data; struct ad9834_state *st; struct iio_dev *indio_dev; struct regulator *reg; int ret; if (!pdata) { dev_dbg(&spi->dev, "no platform data?\n"); return -ENODEV; } reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(reg)) { ret = regulator_enable(reg); if (ret) goto error_put_reg; } indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_disable_reg; } spi_set_drvdata(spi, indio_dev); st = iio_priv(indio_dev); st->mclk = pdata->mclk; st->spi = spi; st->devid = spi_get_device_id(spi)->driver_data; st->reg = reg; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; switch (st->devid) { case ID_AD9833: case ID_AD9837: indio_dev->info = &ad9833_info; break; default: indio_dev->info = &ad9834_info; break; } indio_dev->modes = INDIO_DIRECT_MODE; /* Setup default messages */ st->xfer.tx_buf = &st->data; st->xfer.len = 2; spi_message_init(&st->msg); spi_message_add_tail(&st->xfer, &st->msg); st->freq_xfer[0].tx_buf = &st->freq_data[0]; st->freq_xfer[0].len = 2; st->freq_xfer[0].cs_change = 1; st->freq_xfer[1].tx_buf = &st->freq_data[1]; st->freq_xfer[1].len = 2; spi_message_init(&st->freq_msg); spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg); spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg); st->control = AD9834_B28 | AD9834_RESET; if (!pdata->en_div2) st->control |= AD9834_DIV2; if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834)) st->control |= AD9834_SIGN_PIB; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); if (ret) { dev_err(&spi->dev, "device init failed\n"); goto error_free_device; } ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0); if (ret) goto error_free_device; ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1); if (ret) goto error_free_device; ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0); if (ret) goto error_free_device; ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1); if (ret) goto error_free_device; ret = iio_device_register(indio_dev); if (ret) goto error_free_device; return 0; error_free_device: iio_free_device(indio_dev); error_disable_reg: if (!IS_ERR(reg)) regulator_disable(reg); error_put_reg: if (!IS_ERR(reg)) regulator_put(reg); return ret; } static int __devexit ad9834_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad9834_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_free_device(indio_dev); return 0; } static const struct spi_device_id ad9834_id[] = { {"ad9833", ID_AD9833}, {"ad9834", ID_AD9834}, {"ad9837", ID_AD9837}, {"ad9838", ID_AD9838}, {} }; MODULE_DEVICE_TABLE(spi, ad9834_id); static struct spi_driver ad9834_driver = { .driver = { .name = "ad9834", .owner = THIS_MODULE, }, .probe = ad9834_probe, .remove = __devexit_p(ad9834_remove), .id_table = ad9834_id, }; module_spi_driver(ad9834_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS"); MODULE_LICENSE("GPL v2");
gpl-2.0
boa19861105/android_442_KitKat_kernel_htc_B2_UHL
drivers/staging/iio/adc/max1363_ring.c
4879
3495
/* * Copyright (C) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * max1363_ring.c */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/bitops.h> #include "../iio.h" #include "../buffer.h" #include "../ring_sw.h" #include "../trigger_consumer.h" #include "max1363.h" int max1363_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct max1363_state *st = iio_priv(indio_dev); /* * Need to figure out the current mode based upon the requested * scan mask in iio_dev */ st->current_mode = max1363_match_mode(scan_mask, st->chip_info); if (!st->current_mode) return -EINVAL; max1363_set_scan_mode(st); return 0; } static irqreturn_t max1363_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max1363_state *st = iio_priv(indio_dev); s64 time_ns; __u8 *rxbuf; int b_sent; size_t d_size; unsigned long numvals = bitmap_weight(st->current_mode->modemask, MAX1363_MAX_CHANNELS); /* Ensure the timestamp is 8 byte aligned */ if (st->chip_info->bits != 8) d_size = numvals*2; else d_size = numvals; if (indio_dev->buffer->scan_timestamp) { d_size += sizeof(s64); if (d_size % sizeof(s64)) d_size += sizeof(s64) - (d_size % sizeof(s64)); } /* Monitor mode prevents reading. Whilst not currently implemented * might as well have this test in here in the meantime as it does * no harm. */ if (numvals == 0) return IRQ_HANDLED; rxbuf = kmalloc(d_size, GFP_KERNEL); if (rxbuf == NULL) return -ENOMEM; if (st->chip_info->bits != 8) b_sent = i2c_master_recv(st->client, rxbuf, numvals*2); else b_sent = i2c_master_recv(st->client, rxbuf, numvals); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); if (indio_dev->buffer->scan_timestamp) memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns)); iio_push_to_buffer(indio_dev->buffer, rxbuf, time_ns); done: iio_trigger_notify_done(indio_dev->trig); kfree(rxbuf); return IRQ_HANDLED; } static const struct iio_buffer_setup_ops max1363_ring_setup_ops = { .postenable = &iio_triggered_buffer_postenable, .preenable = &iio_sw_buffer_preenable, .predisable = &iio_triggered_buffer_predisable, }; int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct max1363_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->buffer = iio_sw_rb_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &max1363_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", st->client->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &max1363_ring_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->buffer); error_ret: return ret; } void max1363_ring_cleanup(struct iio_dev *indio_dev) { /* ensure that the trigger has been detached */ iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->buffer); }
gpl-2.0
gromikakao/lge-kernel-gproj
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
5135
34182
/*====================================================================== fmvj18x_cs.c 2.8 2002/03/23 A fmvj18x (and its compatibles) PCMCIA client driver Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp TDK LAK-CD021 and CONTEC C-NET(PC)C support added by Nobuhiro Katayama, kata-n@po.iijnet.or.jp The PCMCIA client code is based on code written by David Hinds. Network code is based on the "FMV-18x driver" by Yutaka TAMIYA but is actually largely Donald Becker's AT1700 driver, which carries the following attribution: Written 1993-94 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 ======================================================================*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "fmvj18x_cs" #define DRV_VERSION "2.9" #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <asm/uaccess.h> #include <asm/io.h> /*====================================================================*/ /* Module parameters */ MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) /* SRAM configuration */ /* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ INT_MODULE_PARM(sram_config, 0); /*====================================================================*/ /* PCMCIA event handlers */ static int fmvj18x_config(struct pcmcia_device *link); static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id); static int fmvj18x_setup_mfc(struct pcmcia_device *link); static void fmvj18x_release(struct pcmcia_device *link); static void fmvj18x_detach(struct pcmcia_device *p_dev); /* LAN controller(MBH86960A) specific routines */ static int fjn_config(struct net_device *dev, struct ifmap *map); static int fjn_open(struct net_device *dev); static int fjn_close(struct net_device *dev); static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t fjn_interrupt(int irq, void *dev_id); static void fjn_rx(struct net_device *dev); static void fjn_reset(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void fjn_tx_timeout(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; /* card type */ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, XXX10304, NEC, KME } cardtype_t; /* driver specific data structure */ typedef struct local_info_t { struct pcmcia_device *p_dev; long open_time; uint tx_started:1; uint tx_queue; u_short tx_queue_len; cardtype_t cardtype; u_short sent; u_char __iomem *base; } local_info_t; #define MC_FILTERBREAK 64 /*====================================================================*/ /* ioport offset from the base address */ #define TX_STATUS 0 /* transmit status register */ #define RX_STATUS 1 /* receive status register */ #define TX_INTR 2 /* transmit interrupt mask register */ #define RX_INTR 3 /* receive interrupt mask register */ #define TX_MODE 4 /* transmit mode register */ #define RX_MODE 5 /* receive mode register */ #define CONFIG_0 6 /* configuration register 0 */ #define CONFIG_1 7 /* configuration register 1 */ #define NODE_ID 8 /* node ID register (bank 0) */ #define MAR_ADR 8 /* multicast address registers (bank 1) */ #define DATAPORT 8 /* buffer mem port registers (bank 2) */ #define TX_START 10 /* transmit start register */ #define COL_CTRL 11 /* 16 collision control register */ #define BMPR12 12 /* reserved */ #define BMPR13 13 /* reserved */ #define RX_SKIP 14 /* skip received packet register */ #define LAN_CTRL 16 /* LAN card control register */ #define MAC_ID 0x1a /* hardware address */ #define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */ /* control bits */ #define ENA_TMT_OK 0x80 #define ENA_TMT_REC 0x20 #define ENA_COL 0x04 #define ENA_16_COL 0x02 #define ENA_TBUS_ERR 0x01 #define ENA_PKT_RDY 0x80 #define ENA_BUS_ERR 0x40 #define ENA_LEN_ERR 0x08 #define ENA_ALG_ERR 0x04 #define ENA_CRC_ERR 0x02 #define ENA_OVR_FLO 0x01 /* flags */ #define F_TMT_RDY 0x80 /* can accept new packet */ #define F_NET_BSY 0x40 /* carrier is detected */ #define F_TMT_OK 0x20 /* send packet successfully */ #define F_SRT_PKT 0x10 /* short packet error */ #define F_COL_ERR 0x04 /* collision error */ #define F_16_COL 0x02 /* 16 collision error */ #define F_TBUS_ERR 0x01 /* bus read error */ #define F_PKT_RDY 0x80 /* packet(s) in buffer */ #define F_BUS_ERR 0x40 /* bus read error */ #define F_LEN_ERR 0x08 /* short packet */ #define F_ALG_ERR 0x04 /* frame error */ #define F_CRC_ERR 0x02 /* CRC error */ #define F_OVR_FLO 0x01 /* overflow error */ #define F_BUF_EMP 0x40 /* receive buffer is empty */ #define F_SKP_PKT 0x05 /* drop packet in buffer */ /* default bitmaps */ #define D_TX_INTR ( ENA_TMT_OK ) #define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO ) #define TX_STAT_M ( F_TMT_RDY ) #define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO ) /* commands */ #define D_TX_MODE 0x06 /* no tests, detect carrier */ #define ID_MATCHED 0x02 /* (RX_MODE) */ #define RECV_ALL 0x03 /* (RX_MODE) */ #define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */ #define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */ #define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */ #define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */ #define BANK_0 0xa0 /* bank 0 (CONFIG_1) */ #define BANK_1 0xa4 /* bank 1 (CONFIG_1) */ #define BANK_2 0xa8 /* bank 2 (CONFIG_1) */ #define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */ #define DO_TX 0x80 /* do transmit packet */ #define SEND_PKT 0x81 /* send a packet */ #define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */ #define MANU_MODE 0x03 /* Stop and skip packet on 16 col */ #define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */ #define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */ #define INTR_OFF 0x0d /* LAN controller ignores interrupts */ #define INTR_ON 0x1d /* LAN controller will catch interrupts */ #define TX_TIMEOUT ((400*HZ)/1000) #define BANK_0U 0x20 /* bank 0 (CONFIG_1) */ #define BANK_1U 0x24 /* bank 1 (CONFIG_1) */ #define BANK_2U 0x28 /* bank 2 (CONFIG_1) */ static const struct net_device_ops fjn_netdev_ops = { .ndo_open = fjn_open, .ndo_stop = fjn_close, .ndo_start_xmit = fjn_start_xmit, .ndo_tx_timeout = fjn_tx_timeout, .ndo_set_config = fjn_config, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int fmvj18x_probe(struct pcmcia_device *link) { local_info_t *lp; struct net_device *dev; dev_dbg(&link->dev, "fmvj18x_attach()\n"); /* Make up a FMVJ18x specific data structure */ dev = alloc_etherdev(sizeof(local_info_t)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); link->priv = dev; lp->p_dev = link; lp->base = NULL; /* The io structure describes IO port mapping */ link->resource[0]->end = 32; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; /* General socket configuration */ link->config_flags |= CONF_ENABLE_IRQ; dev->netdev_ops = &fjn_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); return fmvj18x_config(link); } /* fmvj18x_attach */ /*====================================================================*/ static void fmvj18x_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; dev_dbg(&link->dev, "fmvj18x_detach\n"); unregister_netdev(dev); fmvj18x_release(link); free_netdev(dev); } /* fmvj18x_detach */ /*====================================================================*/ static int mfc_try_io_port(struct pcmcia_device *link) { int i, ret; static const unsigned int serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; for (i = 0; i < 5; i++) { link->resource[1]->start = serial_base[i]; link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (link->resource[1]->start == 0) { link->resource[1]->end = 0; pr_notice("out of resource for serial\n"); } ret = pcmcia_request_io(link); if (ret == 0) return ret; } return ret; } static int ungermann_try_io_port(struct pcmcia_device *link) { int ret; unsigned int ioaddr; /* Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360 0x380,0x3c0 only for ioport. */ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) { link->resource[0]->start = ioaddr; ret = pcmcia_request_io(link); if (ret == 0) { /* calculate ConfigIndex value */ link->config_index = ((link->resource[0]->start & 0x0f0) >> 3) | 0x22; return ret; } } return ret; /* RequestIO failed */ } static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { return 0; /* strange, but that's what the code did already before... */ } static int fmvj18x_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; local_info_t *lp = netdev_priv(dev); int i, ret; unsigned int ioaddr; cardtype_t cardtype; char *card_name = "unknown"; u8 *buf; size_t len; u_char buggybuf[32]; dev_dbg(&link->dev, "fmvj18x_config\n"); link->io_lines = 5; len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); kfree(buf); if (len) { /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL); if (ret != 0) goto failed; switch (link->manf_id) { case MANFID_TDK: cardtype = TDK; if (link->card_id == PRODID_TDK_GN3410 || link->card_id == PRODID_TDK_NP9610 || link->card_id == PRODID_TDK_MN3200) { /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; } break; case MANFID_NEC: cardtype = NEC; /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_KME: cardtype = KME; /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_CONTEC: cardtype = CONTEC; break; case MANFID_FUJITSU: if (link->config_base == 0x0fe0) cardtype = MBH10302; else if (link->card_id == PRODID_FUJITSU_MBH10302) /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), but these are MBH10304 based card. */ cardtype = MBH10304; else if (link->card_id == PRODID_FUJITSU_MBH10304) cardtype = MBH10304; else cardtype = LA501; break; default: cardtype = MBH10304; } } else { /* old type card */ switch (link->manf_id) { case MANFID_FUJITSU: if (link->card_id == PRODID_FUJITSU_MBH10304) { cardtype = XXX10304; /* MBH10304 with buggy CIS */ link->config_index = 0x20; } else { cardtype = MBH10302; /* NextCom NC5310, etc. */ link->config_index = 1; } break; case MANFID_UNGERMANN: cardtype = UNGERMANN; break; default: cardtype = MBH10302; link->config_index = 1; } } if (link->resource[1]->end != 0) { ret = mfc_try_io_port(link); if (ret != 0) goto failed; } else if (cardtype == UNGERMANN) { ret = ungermann_try_io_port(link); if (ret != 0) goto failed; } else { ret = pcmcia_request_io(link); if (ret) goto failed; } ret = pcmcia_request_irq(link, fjn_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (resource_size(link->resource[1]) != 0) { ret = fmvj18x_setup_mfc(link); if (ret != 0) goto failed; } ioaddr = dev->base_addr; /* Reset controller */ if (sram_config == 0) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); /* Power On chip and select bank 0 */ if (cardtype == MBH10302) outb(BANK_0, ioaddr + CONFIG_1); else outb(BANK_0U, ioaddr + CONFIG_1); /* Set hardware address */ switch (cardtype) { case MBH10304: case TDK: case LA501: case CONTEC: case NEC: case KME: if (cardtype == MBH10304) { card_name = "FMV-J182"; len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); if (len < 11) { kfree(buf); goto failed; } /* Read MACID from CIS */ for (i = 5; i < 11; i++) dev->dev_addr[i] = buf[i]; kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) goto failed; if( cardtype == TDK ) { card_name = "TDK LAK-CD021"; } else if( cardtype == LA501 ) { card_name = "LA501"; } else if( cardtype == NEC ) { card_name = "PK-UG-J001"; } else if( cardtype == KME ) { card_name = "Panasonic"; } else { card_name = "C-NET(PC)C"; } } break; case UNGERMANN: /* Read MACID from register */ for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i); card_name = "Access/CARD"; break; case XXX10304: /* Read MACID from Buggy CIS */ if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { pr_notice("unable to read hardware net address\n"); goto failed; } for (i = 0 ; i < 6; i++) { dev->dev_addr[i] = buggybuf[i]; } card_name = "FMV-J182"; break; case MBH10302: default: /* Read MACID from register */ for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + MAC_ID + i); card_name = "FMV-J181"; break; } lp->cardtype = cardtype; SET_NETDEV_DEV(dev, &link->dev); if (register_netdev(dev) != 0) { pr_notice("register_netdev() failed\n"); goto failed; } /* print current configuration */ netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n", card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", dev->base_addr, dev->irq, dev->dev_addr); return 0; failed: fmvj18x_release(link); return -ENODEV; } /* fmvj18x_config */ /*====================================================================*/ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) { u_char __iomem *base; int i, j; /* Allocate a small memory window */ link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[2]->start = 0; link->resource[2]->end = 0; i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return -1; base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); pcmcia_map_mem_page(link, link->resource[2], 0); /* * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff * 'xx' is garbage. * 'yy' is MAC address. */ for (i = 0; i < 0x200; i++) { if (readb(base+i*2) == 0x22) { if (readb(base+(i-1)*2) == 0xff && readb(base+(i+5)*2) == 0x04 && readb(base+(i+6)*2) == 0x06 && readb(base+(i+13)*2) == 0xff) break; } } if (i != 0x200) { for (j = 0 ; j < 6; j++,i++) { node_id[j] = readb(base+(i+7)*2); } } iounmap(base); j = pcmcia_release_window(link, link->resource[2]); return (i != 0x200) ? 0 : -1; } /* fmvj18x_get_hwinfo */ /*====================================================================*/ static int fmvj18x_setup_mfc(struct pcmcia_device *link) { int i; struct net_device *dev = link->priv; unsigned int ioaddr; local_info_t *lp = netdev_priv(dev); /* Allocate a small memory window */ link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[3]->start = link->resource[3]->end = 0; i = pcmcia_request_window(link, link->resource[3], 0); if (i != 0) return -1; lp->base = ioremap(link->resource[3]->start, resource_size(link->resource[3])); if (lp->base == NULL) { netdev_notice(dev, "ioremap failed\n"); return -1; } i = pcmcia_map_mem_page(link, link->resource[3], 0); if (i != 0) { iounmap(lp->base); lp->base = NULL; return -1; } ioaddr = dev->base_addr; writeb(0x47, lp->base+0x800); /* Config Option Register of LAN */ writeb(0x0, lp->base+0x802); /* Config and Status Register */ writeb(ioaddr & 0xff, lp->base+0x80a); /* I/O Base(Low) of LAN */ writeb((ioaddr >> 8) & 0xff, lp->base+0x80c); /* I/O Base(High) of LAN */ writeb(0x45, lp->base+0x820); /* Config Option Register of Modem */ writeb(0x8, lp->base+0x822); /* Config and Status Register */ return 0; } /*====================================================================*/ static void fmvj18x_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; local_info_t *lp = netdev_priv(dev); u_char __iomem *tmp; dev_dbg(&link->dev, "fmvj18x_release\n"); if (lp->base != NULL) { tmp = lp->base; lp->base = NULL; /* set NULL before iounmap */ iounmap(tmp); } pcmcia_disable_device(link); } static int fmvj18x_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int fmvj18x_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { fjn_reset(dev); netif_device_attach(dev); } return 0; } /*====================================================================*/ static const struct pcmcia_device_id fmvj18x_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004), PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59), PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922), PCMCIA_DEVICE_PROD_ID12("Eiger labs,Inc.", "EPX-10BT PC Card Ethernet 10BT", 0xf47e6c66, 0x877f9922), PCMCIA_DEVICE_PROD_ID12("FUJITSU", "LAN Card(FMV-J182)", 0x6ee5a3d8, 0x5baf31db), PCMCIA_DEVICE_PROD_ID12("FUJITSU", "MBH10308", 0x6ee5a3d8, 0x3f04875e), PCMCIA_DEVICE_PROD_ID12("FUJITSU TOWA", "LA501", 0xb8451188, 0x12939ba2), PCMCIA_DEVICE_PROD_ID12("HITACHI", "HT-4840-11", 0xf4f43949, 0x773910f4), PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310B Ver1.0 ", 0x8cef4d3a, 0x075fc7b6), PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310 Ver1.0 ", 0x8cef4d3a, 0xbccf43e6), PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "10BASE_T CARD R280", 0x85c10e17, 0xd9413666), PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CD02x", 0x1eae9475, 0x8fa0ee70), PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CF010", 0x1eae9475, 0x7683bc9a), PCMCIA_DEVICE_PROD_ID1("CONTEC Co.,Ltd.", 0x58d8fee2), PCMCIA_DEVICE_PROD_ID1("PCMCIA LAN MBH10304 ES", 0x2599f454), PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da), PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080), PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids); static struct pcmcia_driver fmvj18x_cs_driver = { .owner = THIS_MODULE, .name = "fmvj18x_cs", .probe = fmvj18x_probe, .remove = fmvj18x_detach, .id_table = fmvj18x_ids, .suspend = fmvj18x_suspend, .resume = fmvj18x_resume, }; static int __init init_fmvj18x_cs(void) { return pcmcia_register_driver(&fmvj18x_cs_driver); } static void __exit exit_fmvj18x_cs(void) { pcmcia_unregister_driver(&fmvj18x_cs_driver); } module_init(init_fmvj18x_cs); module_exit(exit_fmvj18x_cs); /*====================================================================*/ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) { struct net_device *dev = dev_id; local_info_t *lp = netdev_priv(dev); unsigned int ioaddr; unsigned short tx_stat, rx_stat; ioaddr = dev->base_addr; /* avoid multiple interrupts */ outw(0x0000, ioaddr + TX_INTR); /* wait for a while */ udelay(1); /* get status */ tx_stat = inb(ioaddr + TX_STATUS); rx_stat = inb(ioaddr + RX_STATUS); /* clear status */ outb(tx_stat, ioaddr + TX_STATUS); outb(rx_stat, ioaddr + RX_STATUS); pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); pr_debug(" tx_status %02x.\n", tx_stat); if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { /* there is packet(s) in rx buffer */ fjn_rx(dev); } if (tx_stat & F_TMT_RDY) { dev->stats.tx_packets += lp->sent ; lp->sent = 0 ; if (lp->tx_queue) { outb(DO_TX | lp->tx_queue, ioaddr + TX_START); lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; dev->trans_start = jiffies; } else { lp->tx_started = 0; } netif_wake_queue(dev); } pr_debug("%s: exiting interrupt,\n", dev->name); pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); if (lp->base != NULL) { /* Ack interrupt for multifunction card */ writeb(0x01, lp->base+0x802); writeb(0x09, lp->base+0x822); } return IRQ_HANDLED; } /* fjn_interrupt */ /*====================================================================*/ static void fjn_tx_timeout(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; netdev_notice(dev, "transmit timed out with status %04x, %s?\n", htons(inw(ioaddr + TX_STATUS)), inb(ioaddr + TX_STATUS) & F_TMT_RDY ? "IRQ conflict" : "network cable problem"); netdev_notice(dev, "timeout registers: %04x %04x %04x " "%04x %04x %04x %04x %04x.\n", htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)), htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14))); dev->stats.tx_errors++; /* ToDo: We should try to restart the adaptor... */ local_irq_disable(); fjn_reset(dev); lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->sent = 0; lp->open_time = jiffies; local_irq_enable(); netif_wake_queue(dev); } static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; short length = skb->len; if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; length = ETH_ZLEN; } netif_stop_queue(dev); { unsigned char *buf = skb->data; if (length > ETH_FRAME_LEN) { netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n", length); return NETDEV_TX_BUSY; } netdev_dbg(dev, "Transmitting a packet of length %lu\n", (unsigned long)skb->len); dev->stats.tx_bytes += skb->len; /* Disable both interrupts. */ outw(0x0000, ioaddr + TX_INTR); /* wait for a while */ udelay(1); outw(length, ioaddr + DATAPORT); outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1); lp->tx_queue++; lp->tx_queue_len += ((length+3) & ~1); if (lp->tx_started == 0) { /* If the Tx is idle, always trigger a transmit. */ outb(DO_TX | lp->tx_queue, ioaddr + TX_START); lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->tx_started = 1; netif_start_queue(dev); } else { if( sram_config == 0 ) { if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) ) /* Yes, there is room for one more packet. */ netif_start_queue(dev); } else { if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) && lp->tx_queue < 127 ) /* Yes, there is room for one more packet. */ netif_start_queue(dev); } } /* Re-enable interrupts */ outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); } dev_kfree_skb (skb); return NETDEV_TX_OK; } /* fjn_start_xmit */ /*====================================================================*/ static void fjn_reset(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int i; netdev_dbg(dev, "fjn_reset() called\n"); /* Reset controller */ if( sram_config == 0 ) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); /* Power On chip and select bank 0 */ if (lp->cardtype == MBH10302) outb(BANK_0, ioaddr + CONFIG_1); else outb(BANK_0U, ioaddr + CONFIG_1); /* Set Tx modes */ outb(D_TX_MODE, ioaddr + TX_MODE); /* set Rx modes */ outb(ID_MATCHED, ioaddr + RX_MODE); /* Set hardware address */ for (i = 0; i < 6; i++) outb(dev->dev_addr[i], ioaddr + NODE_ID + i); /* (re)initialize the multicast table */ set_rx_mode(dev); /* Switch to bank 2 (runtime mode) */ if (lp->cardtype == MBH10302) outb(BANK_2, ioaddr + CONFIG_1); else outb(BANK_2U, ioaddr + CONFIG_1); /* set 16col ctrl bits */ if( lp->cardtype == TDK || lp->cardtype == CONTEC) outb(TDK_AUTO_MODE, ioaddr + COL_CTRL); else outb(AUTO_MODE, ioaddr + COL_CTRL); /* clear Reserved Regs */ outb(0x00, ioaddr + BMPR12); outb(0x00, ioaddr + BMPR13); /* reset Skip packet reg. */ outb(0x01, ioaddr + RX_SKIP); /* Enable Tx and Rx */ if( sram_config == 0 ) outb(CONFIG0_DFL, ioaddr + CONFIG_0); else outb(CONFIG0_DFL_1, ioaddr + CONFIG_0); /* Init receive pointer ? */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); /* Clear all status */ outb(0xff, ioaddr + TX_STATUS); outb(0xff, ioaddr + RX_STATUS); if (lp->cardtype == MBH10302) outb(INTR_OFF, ioaddr + LAN_CTRL); /* Turn on Rx interrupts */ outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); /* Turn on interrupts from LAN card controller */ if (lp->cardtype == MBH10302) outb(INTR_ON, ioaddr + LAN_CTRL); } /* fjn_reset */ /*====================================================================*/ static void fjn_rx(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; int boguscount = 10; /* 5 -> 10: by agy 19940922 */ pr_debug("%s: in rx_packet(), rx_status %02x.\n", dev->name, inb(ioaddr + RX_STATUS)); while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { u_short status = inw(ioaddr + DATAPORT); netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n", inb(ioaddr + RX_MODE), status); #ifndef final_version if (status == 0) { outb(F_SKP_PKT, ioaddr + RX_SKIP); break; } #endif if ((status & 0xF0) != 0x20) { /* There was an error. */ dev->stats.rx_errors++; if (status & F_LEN_ERR) dev->stats.rx_length_errors++; if (status & F_ALG_ERR) dev->stats.rx_frame_errors++; if (status & F_CRC_ERR) dev->stats.rx_crc_errors++; if (status & F_OVR_FLO) dev->stats.rx_over_errors++; } else { u_short pkt_len = inw(ioaddr + DATAPORT); /* Malloc up new buffer. */ struct sk_buff *skb; if (pkt_len > 1550) { netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n", pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_errors++; break; } skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n", pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); skb->protocol = eth_type_trans(skb, dev); { int i; pr_debug("%s: Rxed packet of length %d: ", dev->name, pkt_len); for (i = 0; i < 14; i++) pr_debug(" %02x", skb->data[i]); pr_debug(".\n"); } netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } if (--boguscount <= 0) break; } /* If any worth-while packets have been received, dev_rint() has done a netif_wake_queue() for us and will work on them when we get to the bottom-half routine. */ /* if (lp->cardtype != TDK) { int i; for (i = 0; i < 20; i++) { if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP) break; (void)inw(ioaddr + DATAPORT); /+ dummy status read +/ outb(F_SKP_PKT, ioaddr + RX_SKIP); } if (i > 0) pr_debug("%s: Exint Rx packet with mode %02x after " "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); } */ } /* fjn_rx */ /*====================================================================*/ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; static int fjn_config(struct net_device *dev, struct ifmap *map){ return 0; } static int fjn_open(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; pr_debug("fjn_open('%s').\n", dev->name); if (!pcmcia_dev_present(link)) return -ENODEV; link->open++; fjn_reset(dev); lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->open_time = jiffies; netif_start_queue(dev); return 0; } /* fjn_open */ /*====================================================================*/ static int fjn_close(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; unsigned int ioaddr = dev->base_addr; pr_debug("fjn_close('%s').\n", dev->name); lp->open_time = 0; netif_stop_queue(dev); /* Set configuration register 0 to disable Tx and Rx. */ if( sram_config == 0 ) outb(CONFIG0_RST ,ioaddr + CONFIG_0); else outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0); /* Update the statistics -- ToDo. */ /* Power-down the chip. Green, green, green! */ outb(CHIP_OFF ,ioaddr + CONFIG_1); /* Set the ethernet adaptor disable IRQ */ if (lp->cardtype == MBH10302) outb(INTR_OFF, ioaddr + LAN_CTRL); link->open--; return 0; } /* fjn_close */ /*====================================================================*/ /* Set the multicast/promiscuous mode for this adaptor. */ static void set_rx_mode(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; u_char mc_filter[8]; /* Multicast hash filter */ u_long flags; int i; int saved_bank; int saved_config_0 = inb(ioaddr + CONFIG_0); local_irq_save(flags); /* Disable Tx and Rx */ if (sram_config == 0) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); if (dev->flags & IFF_PROMISC) { memset(mc_filter, 0xff, sizeof(mc_filter)); outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ } else if (netdev_mc_count(dev) > MC_FILTERBREAK || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); outb(2, ioaddr + RX_MODE); /* Use normal mode. */ } else if (netdev_mc_empty(dev)) { memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << (bit & 7)); } outb(2, ioaddr + RX_MODE); /* Use normal mode. */ } /* Switch to bank 1 and set the multicast table. */ saved_bank = inb(ioaddr + CONFIG_1); outb(0xe4, ioaddr + CONFIG_1); for (i = 0; i < 8; i++) outb(mc_filter[i], ioaddr + MAR_ADR + i); outb(saved_bank, ioaddr + CONFIG_1); outb(saved_config_0, ioaddr + CONFIG_0); local_irq_restore(flags); }
gpl-2.0
elektroschmock/android_kernel_google_msm
arch/mips/powertv/asic/asic-cronus.c
7695
4328
/* * Locations of devices in the Cronus ASIC * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> * * Description: Defines the platform resources for the SA settop. */ #include <linux/init.h> #include <asm/mach-powertv/asic.h> #define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x)) const struct register_map cronus_register_map __initdata = { .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)}, .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)}, .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)}, .chipver3 = {.phys = CRONUS_ADDR(0x2A0800)}, .chipver2 = {.phys = CRONUS_ADDR(0x2A0804)}, .chipver1 = {.phys = CRONUS_ADDR(0x2A0808)}, .chipver0 = {.phys = CRONUS_ADDR(0x2A080C)}, /* The registers of IRBlaster */ .uart1_intstat = {.phys = CRONUS_ADDR(0x2A1800)}, .uart1_inten = {.phys = CRONUS_ADDR(0x2A1804)}, .uart1_config1 = {.phys = CRONUS_ADDR(0x2A1808)}, .uart1_config2 = {.phys = CRONUS_ADDR(0x2A180C)}, .uart1_divisorhi = {.phys = CRONUS_ADDR(0x2A1810)}, .uart1_divisorlo = {.phys = CRONUS_ADDR(0x2A1814)}, .uart1_data = {.phys = CRONUS_ADDR(0x2A1818)}, .uart1_status = {.phys = CRONUS_ADDR(0x2A181C)}, .int_stat_3 = {.phys = CRONUS_ADDR(0x2A2800)}, .int_stat_2 = {.phys = CRONUS_ADDR(0x2A2804)}, .int_stat_1 = {.phys = CRONUS_ADDR(0x2A2808)}, .int_stat_0 = {.phys = CRONUS_ADDR(0x2A280C)}, .int_config = {.phys = CRONUS_ADDR(0x2A2810)}, .int_int_scan = {.phys = CRONUS_ADDR(0x2A2818)}, .ien_int_3 = {.phys = CRONUS_ADDR(0x2A2830)}, .ien_int_2 = {.phys = CRONUS_ADDR(0x2A2834)}, .ien_int_1 = {.phys = CRONUS_ADDR(0x2A2838)}, .ien_int_0 = {.phys = CRONUS_ADDR(0x2A283C)}, .int_level_3_3 = {.phys = CRONUS_ADDR(0x2A2880)}, .int_level_3_2 = {.phys = CRONUS_ADDR(0x2A2884)}, .int_level_3_1 = {.phys = CRONUS_ADDR(0x2A2888)}, .int_level_3_0 = {.phys = CRONUS_ADDR(0x2A288C)}, .int_level_2_3 = {.phys = CRONUS_ADDR(0x2A2890)}, .int_level_2_2 = {.phys = CRONUS_ADDR(0x2A2894)}, .int_level_2_1 = {.phys = CRONUS_ADDR(0x2A2898)}, .int_level_2_0 = {.phys = CRONUS_ADDR(0x2A289C)}, .int_level_1_3 = {.phys = CRONUS_ADDR(0x2A28A0)}, .int_level_1_2 = {.phys = CRONUS_ADDR(0x2A28A4)}, .int_level_1_1 = {.phys = CRONUS_ADDR(0x2A28A8)}, .int_level_1_0 = {.phys = CRONUS_ADDR(0x2A28AC)}, .int_level_0_3 = {.phys = CRONUS_ADDR(0x2A28B0)}, .int_level_0_2 = {.phys = CRONUS_ADDR(0x2A28B4)}, .int_level_0_1 = {.phys = CRONUS_ADDR(0x2A28B8)}, .int_level_0_0 = {.phys = CRONUS_ADDR(0x2A28BC)}, .int_docsis_en = {.phys = CRONUS_ADDR(0x2A28F4)}, .mips_pll_setup = {.phys = CRONUS_ADDR(0x1C0000)}, .fs432x4b4_usb_ctl = {.phys = CRONUS_ADDR(0x1C0028)}, .test_bus = {.phys = CRONUS_ADDR(0x1C00CC)}, .crt_spare = {.phys = CRONUS_ADDR(0x1c00d4)}, .usb2_ohci_int_mask = {.phys = CRONUS_ADDR(0x20000C)}, .usb2_strap = {.phys = CRONUS_ADDR(0x200014)}, .ehci_hcapbase = {.phys = CRONUS_ADDR(0x21FE00)}, .ohci_hc_revision = {.phys = CRONUS_ADDR(0x21fc00)}, .bcm1_bs_lmi_steer = {.phys = CRONUS_ADDR(0x2E0008)}, .usb2_control = {.phys = CRONUS_ADDR(0x2E004C)}, .usb2_stbus_obc = {.phys = CRONUS_ADDR(0x21FF00)}, .usb2_stbus_mess_size = {.phys = CRONUS_ADDR(0x21FF04)}, .usb2_stbus_chunk_size = {.phys = CRONUS_ADDR(0x21FF08)}, .pcie_regs = {.phys = CRONUS_ADDR(0x220000)}, .tim_ch = {.phys = CRONUS_ADDR(0x2A2C10)}, .tim_cl = {.phys = CRONUS_ADDR(0x2A2C14)}, .gpio_dout = {.phys = CRONUS_ADDR(0x2A2C20)}, .gpio_din = {.phys = CRONUS_ADDR(0x2A2C24)}, .gpio_dir = {.phys = CRONUS_ADDR(0x2A2C2C)}, .watchdog = {.phys = CRONUS_ADDR(0x2A2C30)}, .front_panel = {.phys = CRONUS_ADDR(0x2A3800)}, };
gpl-2.0
elektroschmock/android_kernel_google_msm
arch/mips/powertv/asic/asic-calliope.c
7695
4482
/* * Locations of devices in the Calliope ASIC. * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> * * Description: Defines the platform resources for the SA settop. */ #include <linux/init.h> #include <asm/mach-powertv/asic.h> #define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x)) const struct register_map calliope_register_map __initdata = { .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)}, .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)}, .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)}, .chipver3 = {.phys = CALLIOPE_ADDR(0xA00800)}, .chipver2 = {.phys = CALLIOPE_ADDR(0xA00804)}, .chipver1 = {.phys = CALLIOPE_ADDR(0xA00808)}, .chipver0 = {.phys = CALLIOPE_ADDR(0xA0080c)}, /* The registers of IRBlaster */ .uart1_intstat = {.phys = CALLIOPE_ADDR(0xA01800)}, .uart1_inten = {.phys = CALLIOPE_ADDR(0xA01804)}, .uart1_config1 = {.phys = CALLIOPE_ADDR(0xA01808)}, .uart1_config2 = {.phys = CALLIOPE_ADDR(0xA0180C)}, .uart1_divisorhi = {.phys = CALLIOPE_ADDR(0xA01810)}, .uart1_divisorlo = {.phys = CALLIOPE_ADDR(0xA01814)}, .uart1_data = {.phys = CALLIOPE_ADDR(0xA01818)}, .uart1_status = {.phys = CALLIOPE_ADDR(0xA0181C)}, .int_stat_3 = {.phys = CALLIOPE_ADDR(0xA02800)}, .int_stat_2 = {.phys = CALLIOPE_ADDR(0xA02804)}, .int_stat_1 = {.phys = CALLIOPE_ADDR(0xA02808)}, .int_stat_0 = {.phys = CALLIOPE_ADDR(0xA0280c)}, .int_config = {.phys = CALLIOPE_ADDR(0xA02810)}, .int_int_scan = {.phys = CALLIOPE_ADDR(0xA02818)}, .ien_int_3 = {.phys = CALLIOPE_ADDR(0xA02830)}, .ien_int_2 = {.phys = CALLIOPE_ADDR(0xA02834)}, .ien_int_1 = {.phys = CALLIOPE_ADDR(0xA02838)}, .ien_int_0 = {.phys = CALLIOPE_ADDR(0xA0283c)}, .int_level_3_3 = {.phys = CALLIOPE_ADDR(0xA02880)}, .int_level_3_2 = {.phys = CALLIOPE_ADDR(0xA02884)}, .int_level_3_1 = {.phys = CALLIOPE_ADDR(0xA02888)}, .int_level_3_0 = {.phys = CALLIOPE_ADDR(0xA0288c)}, .int_level_2_3 = {.phys = CALLIOPE_ADDR(0xA02890)}, .int_level_2_2 = {.phys = CALLIOPE_ADDR(0xA02894)}, .int_level_2_1 = {.phys = CALLIOPE_ADDR(0xA02898)}, .int_level_2_0 = {.phys = CALLIOPE_ADDR(0xA0289c)}, .int_level_1_3 = {.phys = CALLIOPE_ADDR(0xA028a0)}, .int_level_1_2 = {.phys = CALLIOPE_ADDR(0xA028a4)}, .int_level_1_1 = {.phys = CALLIOPE_ADDR(0xA028a8)}, .int_level_1_0 = {.phys = CALLIOPE_ADDR(0xA028ac)}, .int_level_0_3 = {.phys = CALLIOPE_ADDR(0xA028b0)}, .int_level_0_2 = {.phys = CALLIOPE_ADDR(0xA028b4)}, .int_level_0_1 = {.phys = CALLIOPE_ADDR(0xA028b8)}, .int_level_0_0 = {.phys = CALLIOPE_ADDR(0xA028bc)}, .int_docsis_en = {.phys = CALLIOPE_ADDR(0xA028F4)}, .mips_pll_setup = {.phys = CALLIOPE_ADDR(0x980000)}, .fs432x4b4_usb_ctl = {.phys = CALLIOPE_ADDR(0x980030)}, .test_bus = {.phys = CALLIOPE_ADDR(0x9800CC)}, .crt_spare = {.phys = CALLIOPE_ADDR(0x9800d4)}, .usb2_ohci_int_mask = {.phys = CALLIOPE_ADDR(0x9A000c)}, .usb2_strap = {.phys = CALLIOPE_ADDR(0x9A0014)}, .ehci_hcapbase = {.phys = CALLIOPE_ADDR(0x9BFE00)}, .ohci_hc_revision = {.phys = CALLIOPE_ADDR(0x9BFC00)}, .bcm1_bs_lmi_steer = {.phys = CALLIOPE_ADDR(0x9E0004)}, .usb2_control = {.phys = CALLIOPE_ADDR(0x9E0054)}, .usb2_stbus_obc = {.phys = CALLIOPE_ADDR(0x9BFF00)}, .usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)}, .usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)}, .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */ .tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)}, .tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)}, .gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)}, .gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)}, .gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)}, .watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)}, .front_panel = {.phys = 0x000000}, /* -not used- */ };
gpl-2.0
DennisBold/android_kernel_htc_pyramid
arch/ia64/sn/pci/pcibr/pcibr_dma.c
9487
11726
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/export.h> #include <asm/sn/addrs.h> #include <asm/sn/geo.h> #include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/pic.h> #include <asm/sn/sn_sal.h> #include <asm/sn/tiocp.h> #include "tio.h" #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" extern int sn_ioif_inited; /* ===================================================================== * DMA MANAGEMENT * * The Bridge ASIC provides three methods of doing DMA: via a "direct map" * register available in 32-bit PCI space (which selects a contiguous 2G * address space on some other widget), via "direct" addressing via 64-bit * PCI space (all destination information comes from the PCI address, * including transfer attributes), and via a "mapped" region that allows * a bunch of different small mappings to be established with the PMU. * * For efficiency, we most prefer to use the 32bit direct mapping facility, * since it requires no resource allocations. The advantage of using the * PMU over the 64-bit direct is that single-cycle PCI addressing can be * used; the advantage of using 64-bit direct over PMU addressing is that * we do not have to allocate entries in the PMU. */ static dma_addr_t pcibr_dmamap_ate32(struct pcidev_info *info, u64 paddr, size_t req_size, u64 flags, int dma_flags) { struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> pdi_pcibus_info; u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> pdi_linux_pcidev->devfn)) - 1; int ate_count; int ate_index; u64 ate_flags = flags | PCI32_ATE_V; u64 ate; u64 pci_addr; u64 xio_addr; u64 offset; /* PIC in PCI-X mode does not supports 32bit PageMap mode */ if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { return 0; } /* Calculate the number of ATEs needed. */ if (!(MINIMAL_ATE_FLAG(paddr, req_size))) { ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */ +req_size /* max mapping bytes */ - 1) + 1; /* round UP */ } else { /* assume requested target is page aligned */ ate_count = IOPG(req_size /* max mapping bytes */ - 1) + 1; /* round UP */ } /* Get the number of ATEs required. */ ate_index = pcibr_ate_alloc(pcibus_info, ate_count); if (ate_index < 0) return 0; /* In PCI-X mode, Prefetch not supported */ if (IS_PCIX(pcibus_info)) ate_flags &= ~(PCI32_ATE_PREF); if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS)) xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else xio_addr = paddr; offset = IOPGOFF(xio_addr); ate = ate_flags | (xio_addr - offset); /* If PIC, put the targetid in the ATE */ if (IS_PIC_SOFT(pcibus_info)) { ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); } /* * If we're mapping for MSI, set the MSI bit in the ATE. If it's a * TIOCP based pci bus, we also need to set the PIO bit in the ATE. */ if (dma_flags & SN_DMA_MSI) { ate |= PCI32_ATE_MSI; if (IS_TIOCP_SOFT(pcibus_info)) ate |= PCI32_ATE_PIO; } ate_write(pcibus_info, ate_index, ate_count, ate); /* * Set up the DMA mapped Address. */ pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index; /* * If swap was set in device in pcibr_endian_set() * we need to turn swapping on. */ if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) ATE_SWAP_ON(pci_addr); return pci_addr; } static dma_addr_t pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, u64 dma_attributes, int dma_flags) { struct pcibus_info *pcibus_info = (struct pcibus_info *) ((info->pdi_host_pcidev_info)->pdi_pcibus_info); u64 pci_addr; /* Translate to Crosstalk View of Physical Address */ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) pci_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else pci_addr = paddr; pci_addr |= dma_attributes; /* Handle Bus mode */ if (IS_PCIX(pcibus_info)) pci_addr &= ~PCI64_ATTR_PREF; /* Handle Bridge Chipset differences */ if (IS_PIC_SOFT(pcibus_info)) { pci_addr |= ((u64) pcibus_info-> pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); } else pci_addr |= (dma_flags & SN_DMA_MSI) ? TIOCP_PCI64_CMDTYPE_MSI : TIOCP_PCI64_CMDTYPE_MEM; /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) pci_addr |= PCI64_ATTR_VIRTUAL; return pci_addr; } static dma_addr_t pcibr_dmatrans_direct32(struct pcidev_info * info, u64 paddr, size_t req_size, u64 flags, int dma_flags) { struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> pdi_pcibus_info; u64 xio_addr; u64 xio_base; u64 offset; u64 endoff; if (IS_PCIX(pcibus_info)) { return 0; } if (dma_flags & SN_DMA_MSI) return 0; if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else xio_addr = paddr; xio_base = pcibus_info->pbi_dir_xbase; offset = xio_addr - xio_base; endoff = req_size + offset; if ((req_size > (1ULL << 31)) || /* Too Big */ (xio_addr < xio_base) || /* Out of range for mappings */ (endoff > (1ULL << 31))) { /* Too Big */ return 0; } return PCI32_DIRECT_BASE | offset; } /* * Wrapper routine for freeing DMA maps * DMA mappings for Direct 64 and 32 do not have any DMA maps. */ void pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction) { struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->pdi_pcibus_info; if (IS_PCI32_MAPPED(dma_handle)) { int ate_index; ate_index = IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE)); pcibr_ate_free(pcibus_info, ate_index); } } /* * On SN systems there is a race condition between a PIO read response and * DMA's. In rare cases, the read response may beat the DMA, causing the * driver to think that data in memory is complete and meaningful. This code * eliminates that race. This routine is called by the PIO read routines * after doing the read. For PIC this routine then forces a fake interrupt * on another line, which is logically associated with the slot that the PIO * is addressed to. It then spins while watching the memory location that * the interrupt is targeted to. When the interrupt response arrives, we * are sure that the DMA has landed in memory and it is safe for the driver * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush * Bridge register since it ensures the data has entered the coherence domain, * unlike the PIC Device(x) Write Request Buffer Flush register. */ void sn_dma_flush(u64 addr) { nasid_t nasid; int is_tio; int wid_num; int i, j; unsigned long flags; u64 itte; struct hubdev_info *hubinfo; struct sn_flush_device_kernel *p; struct sn_flush_device_common *common; struct sn_flush_nasid_entry *flush_nasid_list; if (!sn_ioif_inited) return; nasid = NASID_GET(addr); if (-1 == nasid_to_cnodeid(nasid)) return; hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; BUG_ON(!hubinfo); flush_nasid_list = &hubinfo->hdi_flush_nasid_list; if (flush_nasid_list->widget_p == NULL) return; is_tio = (nasid & 1); if (is_tio) { int itte_index; if (TIO_HWIN(addr)) itte_index = 0; else if (TIO_BWIN_WINDOWNUM(addr)) itte_index = TIO_BWIN_WINDOWNUM(addr); else itte_index = -1; if (itte_index >= 0) { itte = flush_nasid_list->iio_itte[itte_index]; if (! TIO_ITTE_VALID(itte)) return; wid_num = TIO_ITTE_WIDGET(itte); } else wid_num = TIO_SWIN_WIDGETNUM(addr); } else { if (BWIN_WINDOWNUM(addr)) { itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)]; wid_num = IIO_ITTE_WIDGET(itte); } else wid_num = SWIN_WIDGETNUM(addr); } if (flush_nasid_list->widget_p[wid_num] == NULL) return; p = &flush_nasid_list->widget_p[wid_num][0]; /* find a matching BAR */ for (i = 0; i < DEV_PER_WIDGET; i++,p++) { common = p->common; for (j = 0; j < PCI_ROM_RESOURCE; j++) { if (common->sfdl_bar_list[j].start == 0) break; if (addr >= common->sfdl_bar_list[j].start && addr <= common->sfdl_bar_list[j].end) break; } if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0) break; } /* if no matching BAR, return without doing anything. */ if (i == DEV_PER_WIDGET) return; /* * For TIOCP use the Device(x) Write Request Buffer Flush Bridge * register since it ensures the data has entered the coherence * domain, unlike PIC. */ if (is_tio) { /* * Note: devices behind TIOCE should never be matched in the * above code, and so the following code is PIC/CP centric. * If CE ever needs the sn_dma_flush mechanism, we will have * to account for that here and in tioce_bus_fixup(). */ u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); u32 revnum = XWIDGET_PART_REV_NUM(tio_id); /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { return; } else { pcireg_wrb_flush_get(common->sfdl_pcibus_info, (common->sfdl_slot - 1)); } } else { spin_lock_irqsave(&p->sfdl_flush_lock, flags); *common->sfdl_flush_addr = 0; /* force an interrupt. */ *(volatile u32 *)(common->sfdl_force_int_addr) = 1; /* wait for the interrupt to come back. */ while (*(common->sfdl_flush_addr) != 0x10f) cpu_relax(); /* okay, everything is synched up. */ spin_unlock_irqrestore(&p->sfdl_flush_lock, flags); } return; } /* * DMA interfaces. Called from pci_dma.c routines. */ dma_addr_t pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) { dma_addr_t dma_handle; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); /* SN cannot support DMA addresses smaller than 32 bits. */ if (hwdev->dma_mask < 0x7fffffff) { return 0; } if (hwdev->dma_mask == ~0UL) { /* * Handle the most common case: 64 bit cards. This * call should always succeed. */ dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, PCI64_ATTR_PREF, dma_flags); } else { /* Handle 32-63 bit cards via direct mapping */ dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, size, 0, dma_flags); if (!dma_handle) { /* * It is a 32 bit card and we cannot do direct mapping, * so we use an ATE. */ dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, size, PCI32_ATE_PREF, dma_flags); } } return dma_handle; } dma_addr_t pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) { dma_addr_t dma_handle; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); if (hwdev->dev.coherent_dma_mask == ~0UL) { dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, PCI64_ATTR_BAR, dma_flags); } else { dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, phys_addr, size, PCI32_ATE_BAR, dma_flags); } return dma_handle; } EXPORT_SYMBOL(sn_dma_flush);
gpl-2.0
scotthartbti/cm_samsung_kernel_dempsey
arch/powerpc/boot/cuboot-rainier.c
14095
1453
/* * Old U-boot compatibility for Rainier * * Valentine Barshak <vbarshak@ru.mvista.com> * Copyright 2007 MontaVista Software, Inc * * Based on Ebony code by David Gibson <david@gibson.dropbear.id.au> * Copyright IBM Corporation, 2007 * * Based on Bamboo code by Josh Boyer <jwboyer@linux.vnet.ibm.com> * Copyright IBM Corporation, 2007 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the License */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void rainier_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ibm4xx_denali_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = rainier_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
avareldalton85/rpi2-linux-rt
arch/powerpc/boot/cuboot-rainier.c
14095
1453
/* * Old U-boot compatibility for Rainier * * Valentine Barshak <vbarshak@ru.mvista.com> * Copyright 2007 MontaVista Software, Inc * * Based on Ebony code by David Gibson <david@gibson.dropbear.id.au> * Copyright IBM Corporation, 2007 * * Based on Bamboo code by Josh Boyer <jwboyer@linux.vnet.ibm.com> * Copyright IBM Corporation, 2007 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the License */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void rainier_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ibm4xx_denali_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = rainier_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
ParanoidAndroid/android_external_busybox
findutils/grep.c
16
24129
/* vi: set sw=4 ts=4: */ /* * Mini grep implementation for busybox using libc regex. * * Copyright (C) 1999,2000,2001 by Lineo, inc. and Mark Whitley * Copyright (C) 1999,2000,2001 by Mark Whitley <markw@codepoet.org> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ /* BB_AUDIT SUSv3 defects - unsupported option -x "match whole line only". */ /* BB_AUDIT GNU defects - always acts as -a. */ /* http://www.opengroup.org/onlinepubs/007904975/utilities/grep.html */ /* * 2004,2006 (C) Vladimir Oleynik <dzo@simtreas.ru> - * correction "-e pattern1 -e pattern2" logic and more optimizations. * precompiled regex * * (C) 2006 Jac Goudsmit added -o option */ //applet:IF_GREP(APPLET(grep, BB_DIR_BIN, BB_SUID_DROP)) //applet:IF_FEATURE_GREP_EGREP_ALIAS(APPLET_ODDNAME(egrep, grep, BB_DIR_BIN, BB_SUID_DROP, egrep)) //applet:IF_FEATURE_GREP_FGREP_ALIAS(APPLET_ODDNAME(fgrep, grep, BB_DIR_BIN, BB_SUID_DROP, fgrep)) //kbuild:lib-$(CONFIG_GREP) += grep.o //config:config GREP //config: bool "grep" //config: default y //config: help //config: grep is used to search files for a specified pattern. //config: //config:config FEATURE_GREP_EGREP_ALIAS //config: bool "Enable extended regular expressions (egrep & grep -E)" //config: default y //config: depends on GREP //config: help //config: Enabled support for extended regular expressions. Extended //config: regular expressions allow for alternation (foo|bar), grouping, //config: and various repetition operators. //config: //config:config FEATURE_GREP_FGREP_ALIAS //config: bool "Alias fgrep to grep -F" //config: default y //config: depends on GREP //config: help //config: fgrep sees the search pattern as a normal string rather than //config: regular expressions. //config: grep -F always works, this just creates the fgrep alias. //config: //config:config FEATURE_GREP_CONTEXT //config: bool "Enable before and after context flags (-A, -B and -C)" //config: default y //config: depends on GREP //config: help //config: Print the specified number of leading (-B) and/or trailing (-A) //config: context surrounding our matching lines. //config: Print the specified number of context lines (-C). #include "libbb.h" #include "xregex.h" /* options */ //usage:#define grep_trivial_usage //usage: "[-HhnlLoqvsriw" //usage: "F" //usage: IF_FEATURE_GREP_EGREP_ALIAS("E") //usage: IF_EXTRA_COMPAT("z") //usage: "] [-m N] " //usage: IF_FEATURE_GREP_CONTEXT("[-A/B/C N] ") //usage: "PATTERN/-e PATTERN.../-f FILE [FILE]..." //usage:#define grep_full_usage "\n\n" //usage: "Search for PATTERN in FILEs (or stdin)\n" //usage: "\n -H Add 'filename:' prefix" //usage: "\n -h Do not add 'filename:' prefix" //usage: "\n -n Add 'line_no:' prefix" //usage: "\n -l Show only names of files that match" //usage: "\n -L Show only names of files that don't match" //usage: "\n -c Show only count of matching lines" //usage: "\n -o Show only the matching part of line" //usage: "\n -q Quiet. Return 0 if PATTERN is found, 1 otherwise" //usage: "\n -v Select non-matching lines" //usage: "\n -s Suppress open and read errors" //usage: "\n -r Recurse" //usage: "\n -i Ignore case" //usage: "\n -w Match whole words only" //usage: "\n -x Match whole lines only" //usage: "\n -F PATTERN is a literal (not regexp)" //usage: IF_FEATURE_GREP_EGREP_ALIAS( //usage: "\n -E PATTERN is an extended regexp" //usage: ) //usage: IF_EXTRA_COMPAT( //usage: "\n -z Input is NUL terminated" //usage: ) //usage: "\n -m N Match up to N times per file" //usage: IF_FEATURE_GREP_CONTEXT( //usage: "\n -A N Print N lines of trailing context" //usage: "\n -B N Print N lines of leading context" //usage: "\n -C N Same as '-A N -B N'" //usage: ) //usage: "\n -e PTRN Pattern to match" //usage: "\n -f FILE Read pattern from file" //usage: //usage:#define grep_example_usage //usage: "$ grep root /etc/passwd\n" //usage: "root:x:0:0:root:/root:/bin/bash\n" //usage: "$ grep ^[rR]oo. /etc/passwd\n" //usage: "root:x:0:0:root:/root:/bin/bash\n" //usage: //usage:#define egrep_trivial_usage NOUSAGE_STR //usage:#define egrep_full_usage "" //usage:#define fgrep_trivial_usage NOUSAGE_STR //usage:#define fgrep_full_usage "" #define OPTSTR_GREP \ "lnqvscFiHhe:f:Lorm:wx" \ IF_FEATURE_GREP_CONTEXT("A:B:C:") \ IF_FEATURE_GREP_EGREP_ALIAS("E") \ IF_EXTRA_COMPAT("z") \ "aI" /* ignored: -a "assume all files to be text" */ /* ignored: -I "assume binary files have no matches" */ enum { OPTBIT_l, /* list matched file names only */ OPTBIT_n, /* print line# */ OPTBIT_q, /* quiet - exit(EXIT_SUCCESS) of first match */ OPTBIT_v, /* invert the match, to select non-matching lines */ OPTBIT_s, /* suppress errors about file open errors */ OPTBIT_c, /* count matches per file (suppresses normal output) */ OPTBIT_F, /* literal match */ OPTBIT_i, /* case-insensitive */ OPTBIT_H, /* force filename display */ OPTBIT_h, /* inhibit filename display */ OPTBIT_e, /* -e PATTERN */ OPTBIT_f, /* -f FILE_WITH_PATTERNS */ OPTBIT_L, /* list unmatched file names only */ OPTBIT_o, /* show only matching parts of lines */ OPTBIT_r, /* recurse dirs */ OPTBIT_m, /* -m MAX_MATCHES */ OPTBIT_w, /* -w whole word match */ OPTBIT_x, /* -x whole line match */ IF_FEATURE_GREP_CONTEXT( OPTBIT_A ,) /* -A NUM: after-match context */ IF_FEATURE_GREP_CONTEXT( OPTBIT_B ,) /* -B NUM: before-match context */ IF_FEATURE_GREP_CONTEXT( OPTBIT_C ,) /* -C NUM: -A and -B combined */ IF_FEATURE_GREP_EGREP_ALIAS(OPTBIT_E ,) /* extended regexp */ IF_EXTRA_COMPAT( OPTBIT_z ,) /* input is NUL terminated */ OPT_l = 1 << OPTBIT_l, OPT_n = 1 << OPTBIT_n, OPT_q = 1 << OPTBIT_q, OPT_v = 1 << OPTBIT_v, OPT_s = 1 << OPTBIT_s, OPT_c = 1 << OPTBIT_c, OPT_F = 1 << OPTBIT_F, OPT_i = 1 << OPTBIT_i, OPT_H = 1 << OPTBIT_H, OPT_h = 1 << OPTBIT_h, OPT_e = 1 << OPTBIT_e, OPT_f = 1 << OPTBIT_f, OPT_L = 1 << OPTBIT_L, OPT_o = 1 << OPTBIT_o, OPT_r = 1 << OPTBIT_r, OPT_m = 1 << OPTBIT_m, OPT_w = 1 << OPTBIT_w, OPT_x = 1 << OPTBIT_x, OPT_A = IF_FEATURE_GREP_CONTEXT( (1 << OPTBIT_A)) + 0, OPT_B = IF_FEATURE_GREP_CONTEXT( (1 << OPTBIT_B)) + 0, OPT_C = IF_FEATURE_GREP_CONTEXT( (1 << OPTBIT_C)) + 0, OPT_E = IF_FEATURE_GREP_EGREP_ALIAS((1 << OPTBIT_E)) + 0, OPT_z = IF_EXTRA_COMPAT( (1 << OPTBIT_z)) + 0, }; #define PRINT_FILES_WITH_MATCHES (option_mask32 & OPT_l) #define PRINT_LINE_NUM (option_mask32 & OPT_n) #define BE_QUIET (option_mask32 & OPT_q) #define SUPPRESS_ERR_MSGS (option_mask32 & OPT_s) #define PRINT_MATCH_COUNTS (option_mask32 & OPT_c) #define FGREP_FLAG (option_mask32 & OPT_F) #define PRINT_FILES_WITHOUT_MATCHES (option_mask32 & OPT_L) #define NUL_DELIMITED (option_mask32 & OPT_z) struct globals { int max_matches; #if !ENABLE_EXTRA_COMPAT int reflags; #else RE_TRANSLATE_TYPE case_fold; /* RE_TRANSLATE_TYPE is [[un]signed] char* */ #endif smalluint invert_search; smalluint print_filename; smalluint open_errors; #if ENABLE_FEATURE_GREP_CONTEXT smalluint did_print_line; int lines_before; int lines_after; char **before_buf; IF_EXTRA_COMPAT(size_t *before_buf_size;) int last_line_printed; #endif /* globals used internally */ llist_t *pattern_head; /* growable list of patterns to match */ const char *cur_file; /* the current file we are reading */ } FIX_ALIASING; #define G (*(struct globals*)&bb_common_bufsiz1) #define INIT_G() do { \ struct G_sizecheck { \ char G_sizecheck[sizeof(G) > COMMON_BUFSIZE ? -1 : 1]; \ }; \ } while (0) #define max_matches (G.max_matches ) #if !ENABLE_EXTRA_COMPAT # define reflags (G.reflags ) #else # define case_fold (G.case_fold ) /* http://www.delorie.com/gnu/docs/regex/regex_46.html */ # define reflags re_syntax_options # undef REG_NOSUB # undef REG_EXTENDED # undef REG_ICASE # define REG_NOSUB bug:is:here /* should not be used */ /* Just RE_SYNTAX_EGREP is not enough, need to enable {n[,[m]]} too */ # define REG_EXTENDED (RE_SYNTAX_EGREP | RE_INTERVALS | RE_NO_BK_BRACES) # define REG_ICASE bug:is:here /* should not be used */ #endif #define invert_search (G.invert_search ) #define print_filename (G.print_filename ) #define open_errors (G.open_errors ) #define did_print_line (G.did_print_line ) #define lines_before (G.lines_before ) #define lines_after (G.lines_after ) #define before_buf (G.before_buf ) #define before_buf_size (G.before_buf_size ) #define last_line_printed (G.last_line_printed ) #define pattern_head (G.pattern_head ) #define cur_file (G.cur_file ) typedef struct grep_list_data_t { char *pattern; /* for GNU regex, matched_range must be persistent across grep_file() calls */ #if !ENABLE_EXTRA_COMPAT regex_t compiled_regex; regmatch_t matched_range; #else struct re_pattern_buffer compiled_regex; struct re_registers matched_range; #endif #define ALLOCATED 1 #define COMPILED 2 int flg_mem_alocated_compiled; } grep_list_data_t; #if !ENABLE_EXTRA_COMPAT #define print_line(line, line_len, linenum, decoration) \ print_line(line, linenum, decoration) #endif static void print_line(const char *line, size_t line_len, int linenum, char decoration) { #if ENABLE_FEATURE_GREP_CONTEXT /* Happens when we go to next file, immediately hit match * and try to print prev context... from prev file! Don't do it */ if (linenum < 1) return; /* possibly print the little '--' separator */ if ((lines_before || lines_after) && did_print_line && last_line_printed != linenum - 1 ) { puts("--"); } /* guard against printing "--" before first line of first file */ did_print_line = 1; last_line_printed = linenum; #endif if (print_filename) printf("%s%c", cur_file, decoration); if (PRINT_LINE_NUM) printf("%i%c", linenum, decoration); /* Emulate weird GNU grep behavior with -ov */ if ((option_mask32 & (OPT_v|OPT_o)) != (OPT_v|OPT_o)) { #if !ENABLE_EXTRA_COMPAT puts(line); #else fwrite(line, 1, line_len, stdout); putchar(NUL_DELIMITED ? '\0' : '\n'); #endif } } #if ENABLE_EXTRA_COMPAT /* Unlike getline, this one removes trailing '\n' */ static ssize_t FAST_FUNC bb_getline(char **line_ptr, size_t *line_alloc_len, FILE *file) { ssize_t res_sz; char *line; int delim = (NUL_DELIMITED ? '\0' : '\n'); res_sz = getdelim(line_ptr, line_alloc_len, delim, file); line = *line_ptr; if (res_sz > 0) { if (line[res_sz - 1] == delim) line[--res_sz] = '\0'; } else { free(line); /* uclibc allocates a buffer even on EOF. WTF? */ } return res_sz; } #endif static int grep_file(FILE *file) { smalluint found; int linenum = 0; int nmatches = 0; #if !ENABLE_EXTRA_COMPAT char *line; #else char *line = NULL; ssize_t line_len; size_t line_alloc_len; # define rm_so start[0] # define rm_eo end[0] #endif #if ENABLE_FEATURE_GREP_CONTEXT int print_n_lines_after = 0; int curpos = 0; /* track where we are in the circular 'before' buffer */ int idx = 0; /* used for iteration through the circular buffer */ #else enum { print_n_lines_after = 0 }; #endif while ( #if !ENABLE_EXTRA_COMPAT (line = xmalloc_fgetline(file)) != NULL #else (line_len = bb_getline(&line, &line_alloc_len, file)) >= 0 #endif ) { llist_t *pattern_ptr = pattern_head; static grep_list_data_t *gl; linenum++; found = 0; while (pattern_ptr) { gl = (grep_list_data_t *)pattern_ptr->data; if (FGREP_FLAG) { found |= (((option_mask32 & OPT_i) ? strcasestr(line, gl->pattern) : strstr(line, gl->pattern) ) != NULL); } else { if (!(gl->flg_mem_alocated_compiled & COMPILED)) { gl->flg_mem_alocated_compiled |= COMPILED; #if !ENABLE_EXTRA_COMPAT xregcomp(&gl->compiled_regex, gl->pattern, reflags); #else memset(&gl->compiled_regex, 0, sizeof(gl->compiled_regex)); gl->compiled_regex.translate = case_fold; /* for -i */ if (re_compile_pattern(gl->pattern, strlen(gl->pattern), &gl->compiled_regex)) bb_error_msg_and_die("bad regex '%s'", gl->pattern); #endif } #if !ENABLE_EXTRA_COMPAT gl->matched_range.rm_so = 0; gl->matched_range.rm_eo = 0; #endif if ( #if !ENABLE_EXTRA_COMPAT regexec(&gl->compiled_regex, line, 1, &gl->matched_range, 0) == 0 #else re_search(&gl->compiled_regex, line, line_len, /*start:*/ 0, /*range:*/ line_len, &gl->matched_range) >= 0 #endif ) { if (option_mask32 & OPT_x) { found = (gl->matched_range.rm_so == 0 && line[gl->matched_range.rm_eo] == '\0'); } else if (!(option_mask32 & OPT_w)) { found = 1; } else { char c = ' '; if (gl->matched_range.rm_so) c = line[gl->matched_range.rm_so - 1]; if (!isalnum(c) && c != '_') { c = line[gl->matched_range.rm_eo]; if (!c || (!isalnum(c) && c != '_')) found = 1; } } } } /* If it's non-inverted search, we can stop * at first match */ if (found && !invert_search) goto do_found; pattern_ptr = pattern_ptr->link; } /* while (pattern_ptr) */ if (found ^ invert_search) { do_found: /* keep track of matches */ nmatches++; /* quiet/print (non)matching file names only? */ if (option_mask32 & (OPT_q|OPT_l|OPT_L)) { free(line); /* we don't need line anymore */ if (BE_QUIET) { /* manpage says about -q: * "exit immediately with zero status * if any match is found, * even if errors were detected" */ exit(EXIT_SUCCESS); } /* if we're just printing filenames, we stop after the first match */ if (PRINT_FILES_WITH_MATCHES) { puts(cur_file); /* fall through to "return 1" */ } /* OPT_L aka PRINT_FILES_WITHOUT_MATCHES: return early */ return 1; /* one match */ } #if ENABLE_FEATURE_GREP_CONTEXT /* Were we printing context and saw next (unwanted) match? */ if ((option_mask32 & OPT_m) && nmatches > max_matches) break; #endif /* print the matched line */ if (PRINT_MATCH_COUNTS == 0) { #if ENABLE_FEATURE_GREP_CONTEXT int prevpos = (curpos == 0) ? lines_before - 1 : curpos - 1; /* if we were told to print 'before' lines and there is at least * one line in the circular buffer, print them */ if (lines_before && before_buf[prevpos] != NULL) { int first_buf_entry_line_num = linenum - lines_before; /* advance to the first entry in the circular buffer, and * figure out the line number is of the first line in the * buffer */ idx = curpos; while (before_buf[idx] == NULL) { idx = (idx + 1) % lines_before; first_buf_entry_line_num++; } /* now print each line in the buffer, clearing them as we go */ while (before_buf[idx] != NULL) { print_line(before_buf[idx], before_buf_size[idx], first_buf_entry_line_num, '-'); free(before_buf[idx]); before_buf[idx] = NULL; idx = (idx + 1) % lines_before; first_buf_entry_line_num++; } } /* make a note that we need to print 'after' lines */ print_n_lines_after = lines_after; #endif if (option_mask32 & OPT_o) { if (FGREP_FLAG) { /* -Fo just prints the pattern * (unless -v: -Fov doesnt print anything at all) */ if (found) print_line(gl->pattern, strlen(gl->pattern), linenum, ':'); } else while (1) { unsigned start = gl->matched_range.rm_so; unsigned end = gl->matched_range.rm_eo; unsigned len = end - start; char old = line[end]; line[end] = '\0'; /* Empty match is not printed: try "echo test | grep -o ''" */ if (len != 0) print_line(line + start, len, linenum, ':'); if (old == '\0') break; line[end] = old; if (len == 0) end++; #if !ENABLE_EXTRA_COMPAT if (regexec(&gl->compiled_regex, line + end, 1, &gl->matched_range, REG_NOTBOL) != 0) break; gl->matched_range.rm_so += end; gl->matched_range.rm_eo += end; #else if (re_search(&gl->compiled_regex, line, line_len, end, line_len - end, &gl->matched_range) < 0) break; #endif } } else { print_line(line, line_len, linenum, ':'); } } } #if ENABLE_FEATURE_GREP_CONTEXT else { /* no match */ /* if we need to print some context lines after the last match, do so */ if (print_n_lines_after) { print_line(line, strlen(line), linenum, '-'); print_n_lines_after--; } else if (lines_before) { /* Add the line to the circular 'before' buffer */ free(before_buf[curpos]); before_buf[curpos] = line; IF_EXTRA_COMPAT(before_buf_size[curpos] = line_len;) curpos = (curpos + 1) % lines_before; /* avoid free(line) - we took the line */ line = NULL; } } #endif /* ENABLE_FEATURE_GREP_CONTEXT */ #if !ENABLE_EXTRA_COMPAT free(line); #endif /* Did we print all context after last requested match? */ if ((option_mask32 & OPT_m) && !print_n_lines_after && nmatches == max_matches ) { break; } } /* while (read line) */ /* special-case file post-processing for options where we don't print line * matches, just filenames and possibly match counts */ /* grep -c: print [filename:]count, even if count is zero */ if (PRINT_MATCH_COUNTS) { if (print_filename) printf("%s:", cur_file); printf("%d\n", nmatches); } /* grep -L: print just the filename */ if (PRINT_FILES_WITHOUT_MATCHES) { /* nmatches is zero, no need to check it: * we return 1 early if we detected a match * and PRINT_FILES_WITHOUT_MATCHES is set */ puts(cur_file); } return nmatches; } #if ENABLE_FEATURE_CLEAN_UP #define new_grep_list_data(p, m) add_grep_list_data(p, m) static char *add_grep_list_data(char *pattern, int flg_used_mem) #else #define new_grep_list_data(p, m) add_grep_list_data(p) static char *add_grep_list_data(char *pattern) #endif { grep_list_data_t *gl = xzalloc(sizeof(*gl)); gl->pattern = pattern; #if ENABLE_FEATURE_CLEAN_UP gl->flg_mem_alocated_compiled = flg_used_mem; #else /*gl->flg_mem_alocated_compiled = 0;*/ #endif return (char *)gl; } static void load_regexes_from_file(llist_t *fopt) { while (fopt) { char *line; FILE *fp; llist_t *cur = fopt; char *ffile = cur->data; fopt = cur->link; free(cur); fp = xfopen_stdin(ffile); while ((line = xmalloc_fgetline(fp)) != NULL) { llist_add_to(&pattern_head, new_grep_list_data(line, ALLOCATED)); } fclose_if_not_stdin(fp); } } static int FAST_FUNC file_action_grep(const char *filename, struct stat *statbuf UNUSED_PARAM, void* matched, int depth UNUSED_PARAM) { FILE *file = fopen_for_read(filename); if (file == NULL) { if (!SUPPRESS_ERR_MSGS) bb_simple_perror_msg(filename); open_errors = 1; return 0; } cur_file = filename; *(int*)matched += grep_file(file); fclose(file); return 1; } static int grep_dir(const char *dir) { int matched = 0; recursive_action(dir, /* recurse=yes */ ACTION_RECURSE | /* followLinks=no */ /* depthFirst=yes */ ACTION_DEPTHFIRST, /* fileAction= */ file_action_grep, /* dirAction= */ NULL, /* userData= */ &matched, /* depth= */ 0); return matched; } int grep_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; int grep_main(int argc UNUSED_PARAM, char **argv) { FILE *file; int matched; llist_t *fopt = NULL; /* do normal option parsing */ #if ENABLE_FEATURE_GREP_CONTEXT int Copt, opts; /* -H unsets -h; -C unsets -A,-B; -e,-f are lists; * -m,-A,-B,-C have numeric param */ opt_complementary = "H-h:C-AB:e::f::m+:A+:B+:C+"; opts = getopt32(argv, OPTSTR_GREP, &pattern_head, &fopt, &max_matches, &lines_after, &lines_before, &Copt); if (opts & OPT_C) { /* -C unsets prev -A and -B, but following -A or -B may override it */ if (!(opts & OPT_A)) /* not overridden */ lines_after = Copt; if (!(opts & OPT_B)) /* not overridden */ lines_before = Copt; } /* sanity checks */ if (opts & (OPT_c|OPT_q|OPT_l|OPT_L)) { option_mask32 &= ~OPT_n; lines_before = 0; lines_after = 0; } else if (lines_before > 0) { if ((unsigned) lines_before > (unsigned) (INT_MAX / sizeof(long long))) lines_before = INT_MAX / sizeof(long long); /* overflow in (lines_before * sizeof(x)) is prevented (above) */ before_buf = xzalloc(lines_before * sizeof(before_buf[0])); IF_EXTRA_COMPAT(before_buf_size = xzalloc(lines_before * sizeof(before_buf_size[0]));) } #else /* with auto sanity checks */ /* -H unsets -h; -c,-q or -l unset -n; -e,-f are lists; -m N */ opt_complementary = "H-h:c-n:q-n:l-n:e::f::m+"; getopt32(argv, OPTSTR_GREP, &pattern_head, &fopt, &max_matches); #endif invert_search = ((option_mask32 & OPT_v) != 0); /* 0 | 1 */ { /* convert char **argv to grep_list_data_t */ llist_t *cur; for (cur = pattern_head; cur; cur = cur->link) cur->data = new_grep_list_data(cur->data, 0); } if (option_mask32 & OPT_f) { load_regexes_from_file(fopt); if (!pattern_head) { /* -f EMPTY_FILE? */ /* GNU grep treats it as "nothing matches" */ llist_add_to(&pattern_head, new_grep_list_data((char*) "", 0)); invert_search ^= 1; } } if (ENABLE_FEATURE_GREP_FGREP_ALIAS && applet_name[0] == 'f') option_mask32 |= OPT_F; #if !ENABLE_EXTRA_COMPAT if (!(option_mask32 & (OPT_o | OPT_w))) reflags = REG_NOSUB; #endif if (ENABLE_FEATURE_GREP_EGREP_ALIAS && (applet_name[0] == 'e' || (option_mask32 & OPT_E)) ) { reflags |= REG_EXTENDED; } #if ENABLE_EXTRA_COMPAT else { reflags = RE_SYNTAX_GREP; } #endif if (option_mask32 & OPT_i) { #if !ENABLE_EXTRA_COMPAT reflags |= REG_ICASE; #else int i; case_fold = xmalloc(256); for (i = 0; i < 256; i++) case_fold[i] = (unsigned char)i; for (i = 'a'; i <= 'z'; i++) case_fold[i] = (unsigned char)(i - ('a' - 'A')); #endif } argv += optind; /* if we didn't get a pattern from -e and no command file was specified, * first parameter should be the pattern. no pattern, no worky */ if (pattern_head == NULL) { char *pattern; if (*argv == NULL) bb_show_usage(); pattern = new_grep_list_data(*argv++, 0); llist_add_to(&pattern_head, pattern); } /* argv[0..(argc-1)] should be names of file to grep through. If * there is more than one file to grep, we will print the filenames. */ if (argv[0] && argv[1]) print_filename = 1; /* -H / -h of course override */ if (option_mask32 & OPT_H) print_filename = 1; if (option_mask32 & OPT_h) print_filename = 0; /* If no files were specified, or '-' was specified, take input from * stdin. Otherwise, we grep through all the files specified. */ matched = 0; do { cur_file = *argv; file = stdin; if (!cur_file || LONE_DASH(cur_file)) { cur_file = "(standard input)"; } else { if (option_mask32 & OPT_r) { struct stat st; if (stat(cur_file, &st) == 0 && S_ISDIR(st.st_mode)) { if (!(option_mask32 & OPT_h)) print_filename = 1; matched += grep_dir(cur_file); goto grep_done; } } /* else: fopen(dir) will succeed, but reading won't */ file = fopen_for_read(cur_file); if (file == NULL) { if (!SUPPRESS_ERR_MSGS) bb_simple_perror_msg(cur_file); open_errors = 1; continue; } } matched += grep_file(file); fclose_if_not_stdin(file); grep_done: ; } while (*argv && *++argv); /* destroy all the elments in the pattern list */ if (ENABLE_FEATURE_CLEAN_UP) { while (pattern_head) { llist_t *pattern_head_ptr = pattern_head; grep_list_data_t *gl = (grep_list_data_t *)pattern_head_ptr->data; pattern_head = pattern_head->link; if (gl->flg_mem_alocated_compiled & ALLOCATED) free(gl->pattern); if (gl->flg_mem_alocated_compiled & COMPILED) regfree(&gl->compiled_regex); free(gl); free(pattern_head_ptr); } } /* 0 = success, 1 = failed, 2 = error */ if (open_errors) return 2; return !matched; /* invert return value: 0 = success, 1 = failed */ }
gpl-2.0
Xilinx/glibc
libio/oldiofsetpos.c
16
2106
/* Copyright (C) 1993-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. As a special exception, if you link the code in this file with files compiled with a GNU compiler to produce an executable, that does not cause the resulting executable to be covered by the GNU Lesser General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU Lesser General Public License. This exception applies to code released by its copyright holders in files containing the exception. */ #include <libioP.h> #include <errno.h> #include <shlib-compat.h> #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2) int attribute_compat_text_section _IO_old_fsetpos (fp, posp) _IO_FILE *fp; const _IO_fpos_t *posp; { int result; CHECK_FILE (fp, EOF); _IO_acquire_lock (fp); if (_IO_seekpos_unlocked (fp, posp->__pos, _IOS_INPUT|_IOS_OUTPUT) == _IO_pos_BAD) { /* ANSI explicitly requires setting errno to a positive value on failure. */ #ifdef EIO if (errno == 0) __set_errno (EIO); #endif result = EOF; } else result = 0; _IO_release_lock (fp); return result; } #ifdef weak_alias compat_symbol (libc, _IO_old_fsetpos, _IO_fsetpos, GLIBC_2_0); strong_alias (_IO_old_fsetpos, __old_fsetpos) compat_symbol (libc, __old_fsetpos, fsetpos, GLIBC_2_0); #endif #endif
gpl-2.0
Exadios/XCSoar-the-library
src/Dialogs/Task/Widgets/KeyholeZoneEditWidget.cpp
16
2446
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2016 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "KeyholeZoneEditWidget.hpp" #include "Engine/Task/ObservationZones/KeyholeZone.hpp" #include "Language/Language.hpp" enum Controls { RADIUS, INNER_RADIUS, ANGLE, }; KeyholeZoneEditWidget::KeyholeZoneEditWidget(KeyholeZone &_oz) :ObservationZoneEditWidget(_oz) {} void KeyholeZoneEditWidget::Prepare(ContainerWindow &parent, const PixelRect &rc) { ObservationZoneEditWidget::Prepare(parent, rc); AddFloat(_("Radius"), _("Radius of the OZ sector."), _T("%.1f %s"), _T("%.1f"), 0.1, 200, 1, true, UnitGroup::DISTANCE, GetObject().GetRadius(), this); AddFloat(_("Inner radius"), _("Inner radius of the OZ sector."), _T("%.1f %s"), _T("%.1f"), 0.1, 100, 1, true, UnitGroup::DISTANCE, GetObject().GetInnerRadius(), this); AddAngle(_("Angle"), nullptr, GetObject().GetSectorAngle(), 10, true, this); } bool KeyholeZoneEditWidget::Save(bool &_changed) { bool changed = false; auto radius = GetObject().GetRadius(); if (SaveValue(RADIUS, UnitGroup::DISTANCE, radius)) { GetObject().SetRadius(radius); changed = true; } auto inner_radius = GetObject().GetInnerRadius(); if (SaveValue(INNER_RADIUS, UnitGroup::DISTANCE, inner_radius)) { GetObject().SetInnerRadius(inner_radius); changed = true; } Angle angle = GetObject().GetSectorAngle(); if (SaveValue(ANGLE, angle)) { GetObject().SetSectorAngle(angle); changed = true; } _changed |= changed; return true; }
gpl-2.0
andreturket/android_kernel_d1_p1
arch/x86/mm/init.c
16
11493
#include <linux/gfp.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/swap.h> #include <linux/memblock.h> #include <asm/cacheflush.h> #include <asm/e820.h> #include <asm/init.h> #include <asm/page.h> #include <asm/page_types.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/proto.h> unsigned long __initdata pgt_buf_start; unsigned long __meminitdata pgt_buf_end; unsigned long __meminitdata pgt_buf_top; int after_bootmem; int direct_gbpages #ifdef CONFIG_DIRECT_GBPAGES = 1 #endif ; struct map_range { unsigned long start; unsigned long end; unsigned page_size_mask; }; /* * First calculate space needed for kernel direct mapping page tables to cover * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB * pages. Then find enough contiguous space for those page tables. */ static void __init find_early_table_space(struct map_range *mr, int nr_range) { int i; unsigned long puds = 0, pmds = 0, ptes = 0, tables; unsigned long start = 0, good_end; unsigned long pgd_extra = 0; phys_addr_t base; for (i = 0; i < nr_range; i++) { unsigned long range, extra; if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT)) pgd_extra++; range = mr[i].end - mr[i].start; puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; } else { pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; } if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); #ifdef CONFIG_X86_32 extra += PMD_SIZE; #endif ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; } else { ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; } } tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); tables += (pgd_extra * PAGE_SIZE); #ifdef CONFIG_X86_32 /* for fixmap */ tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); #endif good_end = max_pfn_mapped << PAGE_SHIFT; base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); if (!base) panic("Cannot find space for the kernel page tables"); pgt_buf_start = base >> PAGE_SHIFT; pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, (pgt_buf_top << PAGE_SHIFT) - 1); } void __init native_pagetable_reserve(u64 start, u64 end) { memblock_reserve(start, end - start); } #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ #define NR_RANGE_MR 5 #endif static int __meminit save_mr(struct map_range *mr, int nr_range, unsigned long start_pfn, unsigned long end_pfn, unsigned long page_size_mask) { if (start_pfn < end_pfn) { if (nr_range >= NR_RANGE_MR) panic("run out of range for init_memory_mapping\n"); mr[nr_range].start = start_pfn<<PAGE_SHIFT; mr[nr_range].end = end_pfn<<PAGE_SHIFT; mr[nr_range].page_size_mask = page_size_mask; nr_range++; } return nr_range; } /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) { unsigned long page_size_mask = 0; unsigned long start_pfn, end_pfn; unsigned long ret = 0; unsigned long pos; struct map_range mr[NR_RANGE_MR]; int nr_range, i; int use_pse, use_gbpages; printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) /* * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ use_pse = use_gbpages = 0; #else use_pse = cpu_has_pse; use_gbpages = direct_gbpages; #endif /* Enable PSE if available */ if (cpu_has_pse) set_in_cr4(X86_CR4_PSE); /* Enable PGE if available */ if (cpu_has_pge) { set_in_cr4(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } if (use_gbpages) page_size_mask |= 1 << PG_LEVEL_1G; if (use_pse) page_size_mask |= 1 << PG_LEVEL_2M; memset(mr, 0, sizeof(mr)); nr_range = 0; /* head if not big page alignment ? */ start_pfn = start >> PAGE_SHIFT; pos = start_pfn << PAGE_SHIFT; #ifdef CONFIG_X86_32 /* * Don't use a large page for the first 2/4MB of memory * because there are often fixed size MTRRs in there * and overlapping MTRRs into large pages can cause * slowdowns. */ if (pos == 0) end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); else end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); #else /* CONFIG_X86_64 */ end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); #endif if (end_pfn > (end >> PAGE_SHIFT)) end_pfn = end >> PAGE_SHIFT; if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); pos = end_pfn << PAGE_SHIFT; } /* big page (2M) range */ start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); #ifdef CONFIG_X86_32 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); #else /* CONFIG_X86_64 */ end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); #endif if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pos = end_pfn << PAGE_SHIFT; } #ifdef CONFIG_X86_64 /* big page (1G) range */ start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); pos = end_pfn << PAGE_SHIFT; } /* tail is not big page (1G) alignment */ start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pos = end_pfn << PAGE_SHIFT; } #endif /* tail is not big page (2M) alignment */ start_pfn = pos>>PAGE_SHIFT; end_pfn = end>>PAGE_SHIFT; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; if (mr[i].end != mr[i+1].start || mr[i].page_size_mask != mr[i+1].page_size_mask) continue; /* move it */ old_start = mr[i].start; memmove(&mr[i], &mr[i+1], (nr_range - 1 - i) * sizeof(struct map_range)); mr[i--].start = old_start; nr_range--; } for (i = 0; i < nr_range; i++) printk(KERN_DEBUG " %010lx - %010lx page %s\n", mr[i].start, mr[i].end, (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); /* * Find space for the kernel direct mapping tables. * * Later we should allocate these tables in the local node of the * memory mapped. Unfortunately this is done currently before the * nodes are discovered. */ if (!after_bootmem) find_early_table_space(mr, nr_range); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, mr[i].page_size_mask); #ifdef CONFIG_X86_32 early_ioremap_page_table_range_init(); load_cr3(swapper_pg_dir); #endif __flush_tlb_all(); /* * Reserve the kernel pagetable pages we used (pgt_buf_start - * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) * so that they can be reused for other purposes. * * On native it just means calling memblock_reserve, on Xen it also * means marking RW the pagetable pages that we allocated before * but that haven't been used. * * In fact on xen we mark RO the whole range pgt_buf_start - * pgt_buf_top, because we have to make sure that when * init_memory_mapping reaches the pagetable pages area, it maps * RO all the pagetable pages, including the ones that are beyond * pgt_buf_end at that time. */ if (!after_bootmem && pgt_buf_end > pgt_buf_start) x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), PFN_PHYS(pgt_buf_end)); if (!after_bootmem) early_memtest(start, end); return ret >> PAGE_SHIFT; } /* * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * * On x86, access has to be given to the first megabyte of ram because that area * contains bios code and data regions used by X and dosemu and similar apps. * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { if (pagenr <= 256) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) return 1; return 0; } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; unsigned long begin_aligned, end_aligned; /* Make sure boundaries are page aligned */ begin_aligned = PAGE_ALIGN(begin); end_aligned = end & PAGE_MASK; if (WARN_ON(begin_aligned != begin || end_aligned != end)) { begin = begin_aligned; end = end_aligned; } if (begin >= end) return; addr = begin; /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ #ifdef CONFIG_DEBUG_PAGEALLOC printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", begin, end); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); #else /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that * writeable and non-executable first. */ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } #endif } void free_initmem(void) { free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, * decompresser could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() * - relocate_initrd() * So here We can do PAGE_ALIGN() safely to get partial page to be freed */ free_init_pages("initrd memory", start, PAGE_ALIGN(end)); } #endif
gpl-2.0
ysei/linux-2.4.32-ipod
arch/ppc/kernel/process.c
16
16238
/* * linux/arch/ppc/kernel/process.c * * Derived from "arch/i386/kernel/process.c" * Copyright (C) 1995 Linus Torvalds * * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and * Paul Mackerras (paulus@cs.anu.edu.au) * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/init.h> #include <linux/prctl.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/prom.h> int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs); extern unsigned long _get_SP(void); struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_altivec = NULL; static struct fs_struct init_fs = INIT_FS; static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS; struct mm_struct init_mm = INIT_MM(init_mm); /* this is 16-byte aligned because it has a stack in it */ union task_union __attribute((aligned(16))) init_task_union = { INIT_TASK(init_task_union.task) }; /* only used to get secondary processor up */ struct task_struct *current_set[NR_CPUS] = {&init_task, }; #undef SHOW_TASK_SWITCHES #undef CHECK_STACK #if defined(CHECK_STACK) unsigned long kernel_stack_top(struct task_struct *tsk) { return ((unsigned long)tsk) + sizeof(union task_union); } unsigned long task_top(struct task_struct *tsk) { return ((unsigned long)tsk) + sizeof(struct task_struct); } /* check to make sure the kernel stack is healthy */ int check_stack(struct task_struct *tsk) { unsigned long stack_top = kernel_stack_top(tsk); unsigned long tsk_top = task_top(tsk); int ret = 0; #if 0 /* check thread magic */ if ( tsk->thread.magic != THREAD_MAGIC ) { ret |= 1; printk("thread.magic bad: %08x\n", tsk->thread.magic); } #endif if ( !tsk ) printk("check_stack(): tsk bad tsk %p\n",tsk); /* check if stored ksp is bad */ if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) ) { printk("stack out of bounds: %s/%d\n" " tsk_top %08lx ksp %08lx stack_top %08lx\n", tsk->comm,tsk->pid, tsk_top, tsk->thread.ksp, stack_top); ret |= 2; } /* check if stack ptr RIGHT NOW is bad */ if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) ) { printk("current stack ptr out of bounds: %s/%d\n" " tsk_top %08lx sp %08lx stack_top %08lx\n", current->comm,current->pid, tsk_top, _get_SP(), stack_top); ret |= 4; } #if 0 /* check amount of free stack */ for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ ) { if ( !i ) printk("check_stack(): i = %p\n", i); if ( *i != 0 ) { /* only notify if it's less than 900 bytes */ if ( (i - (unsigned long *)task_top(tsk)) < 900 ) printk("%d bytes free on stack\n", i - task_top(tsk)); break; } } #endif if (ret) { panic("bad kernel stack"); } return(ret); } #endif /* defined(CHECK_STACK) */ #ifdef CONFIG_ALTIVEC int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) { if (regs->msr & MSR_VEC) giveup_altivec(current); memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); return 1; } void enable_kernel_altivec(void) { #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) giveup_altivec(current); else giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ #else giveup_altivec(last_task_used_altivec); #endif /* __SMP __ */ } #endif /* CONFIG_ALTIVEC */ void enable_kernel_fp(void) { #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) giveup_fpu(current); else giveup_fpu(NULL); /* just enables FP for kernel */ #else giveup_fpu(last_task_used_math); #endif /* CONFIG_SMP */ } int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) { if (regs->msr & MSR_FP) giveup_fpu(current); memcpy(fpregs, &current->thread.fpr[0], sizeof(*fpregs)); return 1; } void _switch_to(struct task_struct *prev, struct task_struct *new, struct task_struct **last) { struct thread_struct *new_thread, *old_thread; unsigned long s; __save_flags(s); __cli(); #if CHECK_STACK check_stack(prev); check_stack(new); #endif #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu * by just saving it every time we switch out if * this task used the fpu during the last quantum. * * If it tries to use the fpu again, it'll trap and * reload its fp regs. So we don't have to do a restore * every switch, just a save. * -- Cort */ if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) ) giveup_fpu(prev); #ifdef CONFIG_ALTIVEC /* * If the previous thread used altivec in the last quantum * (thus changing altivec regs) then save them. * We used to check the VRSAVE register but not all apps * set it, so we don't rely on it now (and in fact we need * to save & restore VSCR even if VRSAVE == 0). -- paulus * * On SMP we always save/restore altivec regs just to avoid the * complexity of changing processors. * -- Cort */ if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))) giveup_altivec(prev); #endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_SMP */ current_set[smp_processor_id()] = new; /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_altivec -- Cort */ if (new->thread.regs && last_task_used_altivec == new) new->thread.regs->msr |= MSR_VEC; new_thread = &new->thread; old_thread = &current->thread; *last = _switch(old_thread, new_thread); __restore_flags(s); } void show_regs(struct pt_regs * regs) { int i; printk("NIP: %08lX XER: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", regs->nip, regs->xer, regs->link, regs->gpr[1], regs,regs->trap, print_tainted()); printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, regs->msr&MSR_IR ? 1 : 0, regs->msr&MSR_DR ? 1 : 0); #ifdef CONFIG_4xx /* * TRAP 0x800 is the hijacked FPU unavailable exception vector * on 40x used to implement the heavyweight data access * functionality. It is an emulated value (like all trap * vectors) on 440. */ if (regs->trap == 0x300 || regs->trap == 0x600 || regs->trap == 0x800) printk("DEAR: %08lX, ESR: %08lX\n", regs->dar, regs->dsisr); #else if (regs->trap == 0x300 || regs->trap == 0x600) printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); #endif printk("TASK = %p[%d] '%s' ", current, current->pid, current->comm); printk("Last syscall: %ld ", current->thread.last_syscall); printk("\nlast math %p last altivec %p", last_task_used_math, last_task_used_altivec); #if defined(CONFIG_4xx) && defined(DCRN_PLB0_BEAR) printk("\nPLB0: bear= 0x%8.8x acr= 0x%8.8x besr= 0x%8.8x\n", mfdcr(DCRN_PLB0_BEAR), mfdcr(DCRN_PLB0_ACR), mfdcr(DCRN_PLB0_BESR)); #endif #if defined(CONFIG_4xx) && defined(DCRN_POB0_BEAR) printk("PLB0 to OPB: bear= 0x%8.8x besr0= 0x%8.8x besr1= 0x%8.8x\n", mfdcr(DCRN_POB0_BEAR), mfdcr(DCRN_POB0_BESR0), mfdcr(DCRN_POB0_BESR1)); #endif #ifdef CONFIG_SMP printk(" CPU: %d", current->processor); #endif /* CONFIG_SMP */ printk("\n"); for (i = 0; i < 32; i++) { long r; if ((i % 8) == 0) { printk("GPR%02d: ", i); } if ( __get_user(r, &(regs->gpr[i])) ) goto out; printk("%08lX ", r); if ((i % 8) == 7) { printk("\n"); } } out: print_backtrace((unsigned long *)regs->gpr[1]); } void exit_thread(void) { if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) last_task_used_altivec = NULL; } void flush_thread(void) { if (last_task_used_math == current) last_task_used_math = NULL; if (last_task_used_altivec == current) last_task_used_altivec = NULL; } void release_thread(struct task_struct *t) { } /* * Copy a thread.. */ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs, *kregs; extern void ret_from_fork(void); unsigned long sp = (unsigned long)p + sizeof(union task_union); unsigned long childframe; /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; *childregs = *regs; if ((childregs->msr & MSR_PR) == 0) { /* for kernel thread, set `current' and stackptr in new task */ childregs->gpr[1] = sp + sizeof(struct pt_regs); childregs->gpr[2] = (unsigned long) p; p->thread.regs = NULL; /* no user register state */ } else p->thread.regs = childregs; childregs->gpr[3] = 0; /* Result from fork() */ sp -= STACK_FRAME_OVERHEAD; childframe = sp; /* * The way this works is that at some point in the future * some task will call _switch to switch to the new task. * That will pop off the stack frame created below and start * the new task running at ret_from_fork. The new task will * do some house keeping and then return from the fork or clone * system call, using the stack frame created above. */ sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; p->thread.ksp = sp; kregs->nip = (unsigned long)ret_from_fork; /* * copy fpu info - assume lazy fpu switch now always * -- Cort */ if (regs->msr & MSR_FP) { giveup_fpu(current); childregs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); } memcpy(&p->thread.fpr, &current->thread.fpr, sizeof(p->thread.fpr)); p->thread.fpscr = current->thread.fpscr; #ifdef CONFIG_ALTIVEC /* * copy altiVec info - assume lazy altiVec switch * - kumar */ if (regs->msr & MSR_VEC) giveup_altivec(current); memcpy(&p->thread.vr, &current->thread.vr, sizeof(p->thread.vr)); p->thread.vscr = current->thread.vscr; childregs->msr &= ~MSR_VEC; #endif /* CONFIG_ALTIVEC */ p->thread.last_syscall = -1; return 0; } /* * Set up a thread for executing a new program */ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) { set_fs(USER_DS); memset(regs->gpr, 0, sizeof(regs->gpr)); memset(&regs->ctr, 0, 5 * sizeof(regs->ctr)); regs->nip = nip; regs->gpr[1] = sp; regs->msr = MSR_USER; if (last_task_used_math == current) last_task_used_math = 0; if (last_task_used_altivec == current) last_task_used_altivec = 0; memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); current->thread.fpscr = 0; #ifdef CONFIG_ALTIVEC memset(current->thread.vr, 0, sizeof(current->thread.vr)); memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); current->thread.vrsave = 0; current->thread.used_vr = 0; #endif /* CONFIG_ALTIVEC */ } /* * Support for the PR_GET/SET_FPEXC prctl() calls. */ static inline unsigned int __unpack_fe01(unsigned int msr_bits) { return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); } static inline unsigned int __pack_fe01(unsigned int fpmode) { return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); } int set_fpexc_mode(struct task_struct *tsk, unsigned int val) { struct pt_regs *regs = tsk->thread.regs; if (val > PR_FP_EXC_PRECISE) return -EINVAL; tsk->thread.fpexc_mode = __pack_fe01(val); if (regs != NULL && (regs->msr & MSR_FP) != 0) regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | tsk->thread.fpexc_mode; return 0; } int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) { unsigned int val; val = __unpack_fe01(tsk->thread.fpexc_mode); return put_user(val, (unsigned int *) adr); } int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6, struct pt_regs *regs) { return do_fork(p1, p2, regs, 0); } int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, struct pt_regs *regs) { return do_fork(SIGCHLD, regs->gpr[1], regs, 0); } int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0); } int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, struct pt_regs *regs) { int error; char * filename; filename = getname((char *) a0); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; if (regs->msr & MSR_FP) giveup_fpu(current); #ifdef CONFIG_ALTIVEC if (regs->msr & MSR_VEC) giveup_altivec(current); #endif /* CONFIG_ALTIVEC */ error = do_execve(filename, (char **) a1, (char **) a2, regs); if (error == 0) current->ptrace &= ~PT_DTRACE; putname(filename); out: return error; } void print_backtrace(unsigned long *sp) { int cnt = 0; unsigned long i; if (sp == NULL) asm("mr %0,1" : "=r" (sp)); printk("Call backtrace: "); while (sp) { if (__get_user( i, &sp[1] )) break; if (cnt++ % 7 == 0) printk("\n"); printk("%08lX ", i); if (cnt > 32) break; if (__get_user(sp, (unsigned long **)sp)) break; } printk("\n"); } void show_trace_task(struct task_struct *tsk) { unsigned long stack_top = (unsigned long) tsk + THREAD_SIZE; unsigned long sp, prev_sp; int count = 0; if (tsk == NULL) return; sp = (unsigned long) &tsk->thread.ksp; do { prev_sp = sp; sp = *(unsigned long *)sp; if (sp <= prev_sp || sp >= stack_top || (sp & 3) != 0) break; if (count > 0) printk("[%08lx] ", *(unsigned long *)(sp + 4)); } while (++count < 16); if (count > 1) printk("\n"); } #if 0 /* * Low level print for debugging - Cort */ int __init ll_printk(const char *fmt, ...) { va_list args; char buf[256]; int i; va_start(args, fmt); i=vsprintf(buf,fmt,args); ll_puts(buf); va_end(args); return i; } int lines = 24, cols = 80; int orig_x = 0, orig_y = 0; void puthex(unsigned long val) { unsigned char buf[10]; int i; for (i = 7; i >= 0; i--) { buf[i] = "0123456789ABCDEF"[val & 0x0F]; val >>= 4; } buf[8] = '\0'; prom_print(buf); } void __init ll_puts(const char *s) { int x,y; char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000; char c; extern int mem_init_done; if ( mem_init_done ) /* assume this means we can printk */ { printk(s); return; } #if 0 if ( have_of ) { prom_print(s); return; } #endif /* * can't ll_puts on chrp without openfirmware yet. * vidmem just needs to be setup for it. * -- Cort */ if ( _machine != _MACH_prep ) return; x = orig_x; y = orig_y; while ( ( c = *s++ ) != '\0' ) { if ( c == '\n' ) { x = 0; if ( ++y >= lines ) { /*scroll();*/ /*y--;*/ y = 0; } } else { vidmem [ ( x + cols * y ) * 2 ] = c; if ( ++x >= cols ) { x = 0; if ( ++y >= lines ) { /*scroll();*/ /*y--;*/ y = 0; } } } } orig_x = x; orig_y = y; } #endif /* * These bracket the sleeping functions.. */ extern void scheduling_functions_start_here(void); extern void scheduling_functions_end_here(void); #define first_sched ((unsigned long) scheduling_functions_start_here) #define last_sched ((unsigned long) scheduling_functions_end_here) unsigned long get_wchan(struct task_struct *p) { unsigned long ip, sp; unsigned long stack_page = (unsigned long) p; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; sp = p->thread.ksp; do { sp = *(unsigned long *)sp; if (sp < stack_page || sp >= stack_page + 8188) return 0; if (count > 0) { ip = *(unsigned long *)(sp + 4); if (ip < first_sched || ip >= last_sched) return ip; } } while (count++ < 16); return 0; }
gpl-2.0
dorapanda/qemu-2.0.0-with-fm3
target-moxie/translate.c
16
29360
/* * Moxie emulation for qemu: main translation routines. * * Copyright (c) 2009, 2013 Anthony Green * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* For information on the Moxie architecture, see * http://moxielogic.org/wiki */ #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <inttypes.h> #include <assert.h> #include "cpu.h" #include "exec/exec-all.h" #include "disas/disas.h" #include "tcg-op.h" #include "helper.h" #define GEN_HELPER 1 #include "helper.h" /* This is the state at translation time. */ typedef struct DisasContext { struct TranslationBlock *tb; target_ulong pc, saved_pc; uint32_t opcode; uint32_t fp_status; /* Routine used to access memory */ int memidx; int bstate; target_ulong btarget; int singlestep_enabled; } DisasContext; enum { BS_NONE = 0, /* We go out of the TB without reaching a branch or an * exception condition */ BS_STOP = 1, /* We want to stop translation for any reason */ BS_BRANCH = 2, /* We reached a branch condition */ BS_EXCP = 3, /* We reached an exception condition */ }; static TCGv cpu_pc; static TCGv cpu_gregs[16]; static TCGv_ptr cpu_env; static TCGv cc_a, cc_b; #include "exec/gen-icount.h" #define REG(x) (cpu_gregs[x]) /* Extract the signed 10-bit offset from a 16-bit branch instruction. */ static int extract_branch_offset(int opcode) { return (((signed short)((opcode & ((1 << 10) - 1)) << 6)) >> 6) << 1; } void moxie_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int flags) { MoxieCPU *cpu = MOXIE_CPU(cs); CPUMoxieState *env = &cpu->env; int i; cpu_fprintf(f, "pc=0x%08x\n", env->pc); cpu_fprintf(f, "$fp=0x%08x $sp=0x%08x $r0=0x%08x $r1=0x%08x\n", env->gregs[0], env->gregs[1], env->gregs[2], env->gregs[3]); for (i = 4; i < 16; i += 4) { cpu_fprintf(f, "$r%d=0x%08x $r%d=0x%08x $r%d=0x%08x $r%d=0x%08x\n", i-2, env->gregs[i], i-1, env->gregs[i + 1], i, env->gregs[i + 2], i+1, env->gregs[i + 3]); } for (i = 4; i < 16; i += 4) { cpu_fprintf(f, "sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x\n", i-2, env->sregs[i], i-1, env->sregs[i + 1], i, env->sregs[i + 2], i+1, env->sregs[i + 3]); } } void moxie_translate_init(void) { int i; static int done_init; static const char * const gregnames[16] = { "$fp", "$sp", "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", "$r8", "$r9", "$r10", "$r11", "$r12", "$r13" }; if (done_init) { return; } cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); cpu_pc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUMoxieState, pc), "$pc"); for (i = 0; i < 16; i++) cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUMoxieState, gregs[i]), gregnames[i]); cc_a = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUMoxieState, cc_a), "cc_a"); cc_b = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUMoxieState, cc_b), "cc_b"); done_init = 1; } static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx, int n, target_ulong dest) { TranslationBlock *tb; tb = ctx->tb; if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) && !ctx->singlestep_enabled) { tcg_gen_goto_tb(n); tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_exit_tb((uintptr_t)tb + n); } else { tcg_gen_movi_i32(cpu_pc, dest); if (ctx->singlestep_enabled) { gen_helper_debug(cpu_env); } tcg_gen_exit_tb(0); } } static int decode_opc(MoxieCPU *cpu, DisasContext *ctx) { CPUMoxieState *env = &cpu->env; /* Local cache for the instruction opcode. */ int opcode; /* Set the default instruction length. */ int length = 2; if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(ctx->pc); } /* Examine the 16-bit opcode. */ opcode = ctx->opcode; /* Decode instruction. */ if (opcode & (1 << 15)) { if (opcode & (1 << 14)) { /* This is a Form 3 instruction. */ int inst = (opcode >> 10 & 0xf); #define BRANCH(cond) \ do { \ int l1 = gen_new_label(); \ tcg_gen_brcond_i32(cond, cc_a, cc_b, l1); \ gen_goto_tb(env, ctx, 1, ctx->pc+2); \ gen_set_label(l1); \ gen_goto_tb(env, ctx, 0, extract_branch_offset(opcode) + ctx->pc+2); \ ctx->bstate = BS_BRANCH; \ } while (0) switch (inst) { case 0x00: /* beq */ BRANCH(TCG_COND_EQ); break; case 0x01: /* bne */ BRANCH(TCG_COND_NE); break; case 0x02: /* blt */ BRANCH(TCG_COND_LT); break; case 0x03: /* bgt */ BRANCH(TCG_COND_GT); break; case 0x04: /* bltu */ BRANCH(TCG_COND_LTU); break; case 0x05: /* bgtu */ BRANCH(TCG_COND_GTU); break; case 0x06: /* bge */ BRANCH(TCG_COND_GE); break; case 0x07: /* ble */ BRANCH(TCG_COND_LE); break; case 0x08: /* bgeu */ BRANCH(TCG_COND_GEU); break; case 0x09: /* bleu */ BRANCH(TCG_COND_LEU); break; default: { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BAD); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; } } else { /* This is a Form 2 instruction. */ int inst = (opcode >> 12 & 0x3); switch (inst) { case 0x00: /* inc */ { int a = (opcode >> 8) & 0xf; unsigned int v = (opcode & 0xff); tcg_gen_addi_i32(REG(a), REG(a), v); } break; case 0x01: /* dec */ { int a = (opcode >> 8) & 0xf; unsigned int v = (opcode & 0xff); tcg_gen_subi_i32(REG(a), REG(a), v); } break; case 0x02: /* gsr */ { int a = (opcode >> 8) & 0xf; unsigned v = (opcode & 0xff); tcg_gen_ld_i32(REG(a), cpu_env, offsetof(CPUMoxieState, sregs[v])); } break; case 0x03: /* ssr */ { int a = (opcode >> 8) & 0xf; unsigned v = (opcode & 0xff); tcg_gen_st_i32(REG(a), cpu_env, offsetof(CPUMoxieState, sregs[v])); } break; default: { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BAD); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; } } } else { /* This is a Form 1 instruction. */ int inst = opcode >> 8; switch (inst) { case 0x00: /* nop */ break; case 0x01: /* ldi.l (immediate) */ { int reg = (opcode >> 4) & 0xf; int val = cpu_ldl_code(env, ctx->pc+2); tcg_gen_movi_i32(REG(reg), val); length = 6; } break; case 0x02: /* mov (register-to-register) */ { int dest = (opcode >> 4) & 0xf; int src = opcode & 0xf; tcg_gen_mov_i32(REG(dest), REG(src)); } break; case 0x03: /* jsra */ { TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_movi_i32(t1, ctx->pc + 6); /* Make space for the static chain and return address. */ tcg_gen_subi_i32(t2, REG(1), 8); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(t1, REG(1), ctx->memidx); /* Push the current frame pointer. */ tcg_gen_subi_i32(t2, REG(1), 4); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx); /* Set the pc and $fp. */ tcg_gen_mov_i32(REG(0), REG(1)); gen_goto_tb(env, ctx, 0, cpu_ldl_code(env, ctx->pc+2)); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); ctx->bstate = BS_BRANCH; length = 6; } break; case 0x04: /* ret */ { TCGv t1 = tcg_temp_new_i32(); /* The new $sp is the old $fp. */ tcg_gen_mov_i32(REG(1), REG(0)); /* Pop the frame pointer. */ tcg_gen_qemu_ld32u(REG(0), REG(1), ctx->memidx); tcg_gen_addi_i32(t1, REG(1), 4); tcg_gen_mov_i32(REG(1), t1); /* Pop the return address and skip over the static chain slot. */ tcg_gen_qemu_ld32u(cpu_pc, REG(1), ctx->memidx); tcg_gen_addi_i32(t1, REG(1), 8); tcg_gen_mov_i32(REG(1), t1); tcg_temp_free_i32(t1); /* Jump... */ tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; } break; case 0x05: /* add.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_add_i32(REG(a), REG(a), REG(b)); } break; case 0x06: /* push */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); tcg_gen_subi_i32(t1, REG(a), 4); tcg_gen_mov_i32(REG(a), t1); tcg_gen_qemu_st32(REG(b), REG(a), ctx->memidx); tcg_temp_free_i32(t1); } break; case 0x07: /* pop */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); tcg_gen_qemu_ld32u(REG(b), REG(a), ctx->memidx); tcg_gen_addi_i32(t1, REG(a), 4); tcg_gen_mov_i32(REG(a), t1); tcg_temp_free_i32(t1); } break; case 0x08: /* lda.l */ { int reg = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld32u(REG(reg), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x09: /* sta.l */ { int val = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st32(REG(val), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x0a: /* ld.l (register indirect) */ { int src = opcode & 0xf; int dest = (opcode >> 4) & 0xf; tcg_gen_qemu_ld32u(REG(dest), REG(src), ctx->memidx); } break; case 0x0b: /* st.l */ { int dest = (opcode >> 4) & 0xf; int val = opcode & 0xf; tcg_gen_qemu_st32(REG(val), REG(dest), ctx->memidx); } break; case 0x0c: /* ldo.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld32u(t2, t1, ctx->memidx); tcg_gen_mov_i32(REG(a), t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x0d: /* sto.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st32(REG(b), t1, ctx->memidx); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x0e: /* cmp */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_mov_i32(cc_a, REG(a)); tcg_gen_mov_i32(cc_b, REG(b)); } break; case 0x19: /* jsr */ { int fnreg = (opcode >> 4) & 0xf; /* Load the stack pointer into T0. */ TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_movi_i32(t1, ctx->pc+2); /* Make space for the static chain and return address. */ tcg_gen_subi_i32(t2, REG(1), 8); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(t1, REG(1), ctx->memidx); /* Push the current frame pointer. */ tcg_gen_subi_i32(t2, REG(1), 4); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx); /* Set the pc and $fp. */ tcg_gen_mov_i32(REG(0), REG(1)); tcg_gen_mov_i32(cpu_pc, REG(fnreg)); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; } break; case 0x1a: /* jmpa */ { tcg_gen_movi_i32(cpu_pc, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; length = 6; } break; case 0x1b: /* ldi.b (immediate) */ { int reg = (opcode >> 4) & 0xf; int val = cpu_ldl_code(env, ctx->pc+2); tcg_gen_movi_i32(REG(reg), val); length = 6; } break; case 0x1c: /* ld.b (register indirect) */ { int src = opcode & 0xf; int dest = (opcode >> 4) & 0xf; tcg_gen_qemu_ld8u(REG(dest), REG(src), ctx->memidx); } break; case 0x1d: /* lda.b */ { int reg = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld8u(REG(reg), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x1e: /* st.b */ { int dest = (opcode >> 4) & 0xf; int val = opcode & 0xf; tcg_gen_qemu_st8(REG(val), REG(dest), ctx->memidx); } break; case 0x1f: /* sta.b */ { int val = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st8(REG(val), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x20: /* ldi.s (immediate) */ { int reg = (opcode >> 4) & 0xf; int val = cpu_ldl_code(env, ctx->pc+2); tcg_gen_movi_i32(REG(reg), val); length = 6; } break; case 0x21: /* ld.s (register indirect) */ { int src = opcode & 0xf; int dest = (opcode >> 4) & 0xf; tcg_gen_qemu_ld16u(REG(dest), REG(src), ctx->memidx); } break; case 0x22: /* lda.s */ { int reg = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld16u(REG(reg), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x23: /* st.s */ { int dest = (opcode >> 4) & 0xf; int val = opcode & 0xf; tcg_gen_qemu_st16(REG(val), REG(dest), ctx->memidx); } break; case 0x24: /* sta.s */ { int val = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st16(REG(val), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x25: /* jmp */ { int reg = (opcode >> 4) & 0xf; tcg_gen_mov_i32(cpu_pc, REG(reg)); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; } break; case 0x26: /* and */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_and_i32(REG(a), REG(a), REG(b)); } break; case 0x27: /* lshr */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv sv = tcg_temp_new_i32(); tcg_gen_andi_i32(sv, REG(b), 0x1f); tcg_gen_shr_i32(REG(a), REG(a), sv); tcg_temp_free_i32(sv); } break; case 0x28: /* ashl */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv sv = tcg_temp_new_i32(); tcg_gen_andi_i32(sv, REG(b), 0x1f); tcg_gen_shl_i32(REG(a), REG(a), sv); tcg_temp_free_i32(sv); } break; case 0x29: /* sub.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_sub_i32(REG(a), REG(a), REG(b)); } break; case 0x2a: /* neg */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_neg_i32(REG(a), REG(b)); } break; case 0x2b: /* or */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_or_i32(REG(a), REG(a), REG(b)); } break; case 0x2c: /* not */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_not_i32(REG(a), REG(b)); } break; case 0x2d: /* ashr */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv sv = tcg_temp_new_i32(); tcg_gen_andi_i32(sv, REG(b), 0x1f); tcg_gen_sar_i32(REG(a), REG(a), sv); tcg_temp_free_i32(sv); } break; case 0x2e: /* xor */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_xor_i32(REG(a), REG(a), REG(b)); } break; case 0x2f: /* mul.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_mul_i32(REG(a), REG(a), REG(b)); } break; case 0x30: /* swi */ { int val = cpu_ldl_code(env, ctx->pc+2); TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(temp, val); tcg_gen_st_i32(temp, cpu_env, offsetof(CPUMoxieState, sregs[3])); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_SWI); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); length = 6; } break; case 0x31: /* div.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_movi_i32(cpu_pc, ctx->pc); gen_helper_div(REG(a), cpu_env, REG(a), REG(b)); } break; case 0x32: /* udiv.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_movi_i32(cpu_pc, ctx->pc); gen_helper_udiv(REG(a), cpu_env, REG(a), REG(b)); } break; case 0x33: /* mod.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_rem_i32(REG(a), REG(a), REG(b)); } break; case 0x34: /* umod.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_remu_i32(REG(a), REG(a), REG(b)); } break; case 0x35: /* brk */ { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BREAK); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; case 0x36: /* ldo.b */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld8u(t2, t1, ctx->memidx); tcg_gen_mov_i32(REG(a), t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x37: /* sto.b */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st8(REG(b), t1, ctx->memidx); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x38: /* ldo.s */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld16u(t2, t1, ctx->memidx); tcg_gen_mov_i32(REG(a), t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x39: /* sto.s */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st16(REG(b), t1, ctx->memidx); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; default: { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BAD); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; } } return length; } /* generate intermediate code for basic block 'tb'. */ static inline void gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb, bool search_pc) { CPUState *cs = CPU(cpu); DisasContext ctx; target_ulong pc_start; uint16_t *gen_opc_end; CPUBreakpoint *bp; int j, lj = -1; CPUMoxieState *env = &cpu->env; int num_insns; pc_start = tb->pc; gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; ctx.pc = pc_start; ctx.saved_pc = -1; ctx.tb = tb; ctx.memidx = 0; ctx.singlestep_enabled = 0; ctx.bstate = BS_NONE; num_insns = 0; gen_tb_start(); do { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (ctx.pc == bp->pc) { tcg_gen_movi_i32(cpu_pc, ctx.pc); gen_helper_debug(cpu_env); ctx.bstate = BS_EXCP; goto done_generating; } } } if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; if (lj < j) { lj++; while (lj < j) { tcg_ctx.gen_opc_instr_start[lj++] = 0; } } tcg_ctx.gen_opc_pc[lj] = ctx.pc; tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } ctx.opcode = cpu_lduw_code(env, ctx.pc); ctx.pc += decode_opc(cpu, &ctx); num_insns++; if (cs->singlestep_enabled) { break; } if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) { break; } } while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end); if (cs->singlestep_enabled) { tcg_gen_movi_tl(cpu_pc, ctx.pc); gen_helper_debug(cpu_env); } else { switch (ctx.bstate) { case BS_STOP: case BS_NONE: gen_goto_tb(env, &ctx, 0, ctx.pc); break; case BS_EXCP: tcg_gen_exit_tb(0); break; case BS_BRANCH: default: break; } } done_generating: gen_tb_end(tb, num_insns); *tcg_ctx.gen_opc_ptr = INDEX_op_end; if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; lj++; while (lj <= j) { tcg_ctx.gen_opc_instr_start[lj++] = 0; } } else { tb->size = ctx.pc - pc_start; tb->icount = num_insns; } } void gen_intermediate_code(CPUMoxieState *env, struct TranslationBlock *tb) { gen_intermediate_code_internal(moxie_env_get_cpu(env), tb, false); } void gen_intermediate_code_pc(CPUMoxieState *env, struct TranslationBlock *tb) { gen_intermediate_code_internal(moxie_env_get_cpu(env), tb, true); } void restore_state_to_opc(CPUMoxieState *env, TranslationBlock *tb, int pc_pos) { env->pc = tcg_ctx.gen_opc_pc[pc_pos]; }
gpl-2.0
kkiningh/glibc-2.20-tsx-experimental
libio/freopen64.c
16
3055
/* Copyright (C) 1993-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. As a special exception, if you link the code in this file with files compiled with a GNU compiler to produce an executable, that does not cause the resulting executable to be covered by the GNU Lesser General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU Lesser General Public License. This exception applies to code released by its copyright holders in files containing the exception. */ #include "libioP.h" #include "stdio.h" #include <fcntl.h> #include <stdlib.h> #include <unistd.h> #include <fd_to_filename.h> #include <kernel-features.h> FILE * freopen64 (filename, mode, fp) const char* filename; const char* mode; FILE *fp; { FILE *result; CHECK_FILE (fp, NULL); if (!(fp->_flags & _IO_IS_FILEBUF)) return NULL; _IO_acquire_lock (fp); int fd = _IO_fileno (fp); const char *gfilename = (filename == NULL && fd >= 0 ? fd_to_filename (fd) : filename); fp->_flags2 |= _IO_FLAGS2_NOCLOSE; _IO_file_close_it (fp); _IO_JUMPS ((struct _IO_FILE_plus *) fp) = &_IO_file_jumps; if (_IO_vtable_offset (fp) == 0 && fp->_wide_data != NULL) fp->_wide_data->_wide_vtable = &_IO_wfile_jumps; result = _IO_file_fopen (fp, gfilename, mode, 0); fp->_flags2 &= ~_IO_FLAGS2_NOCLOSE; if (result != NULL) result = __fopen_maybe_mmap (result); if (result != NULL) { /* unbound stream orientation */ result->_mode = 0; if (fd != -1) { #ifdef O_CLOEXEC # ifndef __ASSUME_DUP3 int newfd; if (__have_dup3 < 0) newfd = -1; else newfd = # endif __dup3 (_IO_fileno (result), fd, (result->_flags2 & _IO_FLAGS2_CLOEXEC) != 0 ? O_CLOEXEC : 0); #else # define newfd 1 #endif #ifndef __ASSUME_DUP3 if (newfd < 0) { if (errno == ENOSYS) __have_dup3 = -1; __dup2 (_IO_fileno (result), fd); if ((result->_flags2 & _IO_FLAGS2_CLOEXEC) != 0) __fcntl (fd, F_SETFD, FD_CLOEXEC); } #endif __close (_IO_fileno (result)); _IO_fileno (result) = fd; } } else if (fd != -1) __close (fd); if (filename == NULL) free ((char *) gfilename); _IO_release_lock (fp); return result; }
gpl-2.0
n1koo/eglibc-trusty
crypt/badsalttest.c
16
2368
/* Test program for bad DES salt detection in crypt. Copyright (C) 2012-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <unistd.h> #include <sys/mman.h> #include <crypt.h> static const char *tests[][2] = { { "no salt", "" }, { "single char", "/" }, { "first char bad", "!x" }, { "second char bad", "Z%" }, { "both chars bad", ":@" }, { "un$upported algorithm", "$2$" }, { "unsupported_algorithm", "_1" }, { "end of page", NULL } }; static int do_test (void) { int result = 0; struct crypt_data cd; size_t n = sizeof (tests) / sizeof (*tests); size_t pagesize = (size_t) sysconf (_SC_PAGESIZE); char *page; /* Check that crypt won't look at the second character if the first one is invalid. */ page = mmap (NULL, pagesize * 2, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (page == MAP_FAILED) { perror ("mmap"); n--; } else { if (mmap (page + pagesize, pagesize, 0, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0) != page + pagesize) perror ("mmap 2"); page[pagesize - 1] = '*'; tests[n - 1][1] = &page[pagesize - 1]; } for (size_t i = 0; i < n; i++) { if (crypt (tests[i][0], tests[i][1])) { result++; printf ("%s: crypt returned non-NULL with salt \"%s\"\n", tests[i][0], tests[i][1]); } if (crypt_r (tests[i][0], tests[i][1], &cd)) { result++; printf ("%s: crypt_r returned non-NULL with salt \"%s\"\n", tests[i][0], tests[i][1]); } } return result; } #define TIMEOUT 5 #define TEST_FUNCTION do_test () #include "../test-skeleton.c"
gpl-2.0
wcang/quagga-mld
isisd/isis_tlv.c
16
35937
/* * IS-IS Rout(e)ing protocol - isis_tlv.c * IS-IS TLV related routines * * Copyright (C) 2001,2002 Sampo Saaristo * Tampere University of Technology * Institute of Communications Engineering * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public Licenseas published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful,but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <zebra.h> #include "log.h" #include "linklist.h" #include "stream.h" #include "memory.h" #include "prefix.h" #include "vty.h" #include "if.h" #include "isisd/dict.h" #include "isisd/isis_constants.h" #include "isisd/isis_common.h" #include "isisd/isis_flags.h" #include "isisd/isis_circuit.h" #include "isisd/isis_tlv.h" #include "isisd/isisd.h" #include "isisd/isis_dynhn.h" #include "isisd/isis_misc.h" #include "isisd/isis_pdu.h" #include "isisd/isis_lsp.h" void free_tlv (void *val) { XFREE (MTYPE_ISIS_TLV, val); return; } /* * Called after parsing of a PDU. There shouldn't be any tlv's left, so this * is only a caution to avoid memory leaks */ void free_tlvs (struct tlvs *tlvs) { if (tlvs->area_addrs) list_delete (tlvs->area_addrs); if (tlvs->is_neighs) list_delete (tlvs->is_neighs); if (tlvs->te_is_neighs) list_delete (tlvs->te_is_neighs); if (tlvs->es_neighs) list_delete (tlvs->es_neighs); if (tlvs->lsp_entries) list_delete (tlvs->lsp_entries); if (tlvs->prefix_neighs) list_delete (tlvs->prefix_neighs); if (tlvs->lan_neighs) list_delete (tlvs->lan_neighs); if (tlvs->ipv4_addrs) list_delete (tlvs->ipv4_addrs); if (tlvs->ipv4_int_reachs) list_delete (tlvs->ipv4_int_reachs); if (tlvs->ipv4_ext_reachs) list_delete (tlvs->ipv4_ext_reachs); if (tlvs->te_ipv4_reachs) list_delete (tlvs->te_ipv4_reachs); #ifdef HAVE_IPV6 if (tlvs->ipv6_addrs) list_delete (tlvs->ipv6_addrs); if (tlvs->ipv6_reachs) list_delete (tlvs->ipv6_reachs); #endif /* HAVE_IPV6 */ memset (tlvs, 0, sizeof (struct tlvs)); return; } /* * Parses the tlvs found in the variant length part of the PDU. * Caller tells with flags in "expected" which TLV's it is interested in. */ int parse_tlvs (char *areatag, u_char * stream, int size, u_int32_t * expected, u_int32_t * found, struct tlvs *tlvs, u_int32_t *auth_tlv_offset) { u_char type, length; struct lan_neigh *lan_nei; struct area_addr *area_addr; struct is_neigh *is_nei; struct te_is_neigh *te_is_nei; struct es_neigh *es_nei; struct lsp_entry *lsp_entry; struct in_addr *ipv4_addr; struct ipv4_reachability *ipv4_reach; struct te_ipv4_reachability *te_ipv4_reach; #ifdef HAVE_IPV6 struct in6_addr *ipv6_addr; struct ipv6_reachability *ipv6_reach; int prefix_octets; #endif /* HAVE_IPV6 */ u_char virtual; int value_len, retval = ISIS_OK; u_char *start = stream, *pnt = stream, *endpnt; *found = 0; memset (tlvs, 0, sizeof (struct tlvs)); while (pnt < stream + size - 2) { type = *pnt; length = *(pnt + 1); pnt += 2; value_len = 0; if (pnt + length > stream + size) { zlog_warn ("ISIS-TLV (%s): TLV (type %d, length %d) exceeds packet " "boundaries", areatag, type, length); retval = ISIS_WARNING; break; } switch (type) { case AREA_ADDRESSES: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | Address Length | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Area Address | * +-------+-------+-------+-------+-------+-------+-------+-------+ * : : */ *found |= TLVFLAG_AREA_ADDRS; #ifdef EXTREME_TLV_DEBUG zlog_debug ("TLV Area Adresses len %d", length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_AREA_ADDRS) { while (length > value_len) { area_addr = (struct area_addr *) pnt; value_len += area_addr->addr_len + 1; pnt += area_addr->addr_len + 1; if (!tlvs->area_addrs) tlvs->area_addrs = list_new (); listnode_add (tlvs->area_addrs, area_addr); } } else { pnt += length; } break; case IS_NEIGHBOURS: *found |= TLVFLAG_IS_NEIGHS; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IS Neighbours length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (TLVFLAG_IS_NEIGHS & *expected) { /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | Virtual Flag | * +-------+-------+-------+-------+-------+-------+-------+-------+ */ virtual = *pnt; /* FIXME: what is the use for this? */ pnt++; value_len++; /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | 0 | I/E | Default Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Delay Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Expense Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Error Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Neighbour ID | * +---------------------------------------------------------------+ * : : */ while (length > value_len) { is_nei = (struct is_neigh *) pnt; value_len += 4 + ISIS_SYS_ID_LEN + 1; pnt += 4 + ISIS_SYS_ID_LEN + 1; if (!tlvs->is_neighs) tlvs->is_neighs = list_new (); listnode_add (tlvs->is_neighs, is_nei); } } else { pnt += length; } break; case TE_IS_NEIGHBOURS: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | Neighbour ID | 7 * +---------------------------------------------------------------+ * | TE Metric | 3 * +---------------------------------------------------------------+ * | SubTLVs Length | 1 * +---------------------------------------------------------------+ * : : */ *found |= TLVFLAG_TE_IS_NEIGHS; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): Extended IS Neighbours length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (TLVFLAG_TE_IS_NEIGHS & *expected) { while (length > value_len) { te_is_nei = (struct te_is_neigh *) pnt; value_len += 11; pnt += 11; /* FIXME - subtlvs are handled here, for now we skip */ value_len += te_is_nei->sub_tlvs_length; pnt += te_is_nei->sub_tlvs_length; if (!tlvs->te_is_neighs) tlvs->te_is_neighs = list_new (); listnode_add (tlvs->te_is_neighs, te_is_nei); } } else { pnt += length; } break; case ES_NEIGHBOURS: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | 0 | I/E | Default Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Delay Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Expense Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Error Metric | * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Neighbour ID | * +---------------------------------------------------------------+ * | Neighbour ID | * +---------------------------------------------------------------+ * : : */ #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): ES Neighbours length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ *found |= TLVFLAG_ES_NEIGHS; if (*expected & TLVFLAG_ES_NEIGHS) { es_nei = (struct es_neigh *) pnt; value_len += 4; pnt += 4; while (length > value_len) { /* FIXME FIXME FIXME - add to the list */ /* sys_id->id = pnt; */ value_len += ISIS_SYS_ID_LEN; pnt += ISIS_SYS_ID_LEN; /* if (!es_nei->neigh_ids) es_nei->neigh_ids = sysid; */ } if (!tlvs->es_neighs) tlvs->es_neighs = list_new (); listnode_add (tlvs->es_neighs, es_nei); } else { pnt += length; } break; case LAN_NEIGHBOURS: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | LAN Address | * +-------+-------+-------+-------+-------+-------+-------+-------+ * : : */ *found |= TLVFLAG_LAN_NEIGHS; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): LAN Neigbours length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (TLVFLAG_LAN_NEIGHS & *expected) { while (length > value_len) { lan_nei = (struct lan_neigh *) pnt; if (!tlvs->lan_neighs) tlvs->lan_neighs = list_new (); listnode_add (tlvs->lan_neighs, lan_nei); value_len += ETH_ALEN; pnt += ETH_ALEN; } } else { pnt += length; } break; case PADDING: #ifdef EXTREME_TLV_DEBUG zlog_debug ("TLV padding %d", length); #endif /* EXTREME_TLV_DEBUG */ pnt += length; break; case LSP_ENTRIES: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | Remaining Lifetime | 2 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | LSP ID | id+2 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | LSP Sequence Number | 4 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Checksum | 2 * +-------+-------+-------+-------+-------+-------+-------+-------+ */ #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): LSP Entries length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ *found |= TLVFLAG_LSP_ENTRIES; if (TLVFLAG_LSP_ENTRIES & *expected) { while (length > value_len) { lsp_entry = (struct lsp_entry *) pnt; value_len += 10 + ISIS_SYS_ID_LEN; pnt += 10 + ISIS_SYS_ID_LEN; if (!tlvs->lsp_entries) tlvs->lsp_entries = list_new (); listnode_add (tlvs->lsp_entries, lsp_entry); } } else { pnt += length; } break; case CHECKSUM: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | 16 bit fletcher CHECKSUM | * +-------+-------+-------+-------+-------+-------+-------+-------+ * : : */ *found |= TLVFLAG_CHECKSUM; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): Checksum length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_CHECKSUM) { tlvs->checksum = (struct checksum *) pnt; } pnt += length; break; case PROTOCOLS_SUPPORTED: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | NLPID | * +-------+-------+-------+-------+-------+-------+-------+-------+ * : : */ *found |= TLVFLAG_NLPID; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): Protocols Supported length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_NLPID) { tlvs->nlpids = (struct nlpids *) (pnt - 1); } pnt += length; break; case IPV4_ADDR: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * + IP version 4 address + 4 * +-------+-------+-------+-------+-------+-------+-------+-------+ * : : */ *found |= TLVFLAG_IPV4_ADDR; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IPv4 Address length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_IPV4_ADDR) { while (length > value_len) { ipv4_addr = (struct in_addr *) pnt; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s) : IP ADDR %s, pnt %p", areatag, inet_ntoa (*ipv4_addr), pnt); #endif /* EXTREME_TLV_DEBUG */ if (!tlvs->ipv4_addrs) tlvs->ipv4_addrs = list_new (); listnode_add (tlvs->ipv4_addrs, ipv4_addr); value_len += 4; pnt += 4; } } else { pnt += length; } break; case AUTH_INFO: *found |= TLVFLAG_AUTH_INFO; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IS-IS Authentication Information", areatag); #endif if (*expected & TLVFLAG_AUTH_INFO) { tlvs->auth_info.type = *pnt; if (length == 0) { zlog_warn ("ISIS-TLV (%s): TLV (type %d, length %d) " "incorrect.", areatag, type, length); return ISIS_WARNING; } --length; tlvs->auth_info.len = length; pnt++; memcpy (tlvs->auth_info.passwd, pnt, length); /* Return the authentication tlv pos for later computation * of MD5 (RFC 5304, 2) */ if (auth_tlv_offset) *auth_tlv_offset += (pnt - start - 3); pnt += length; } else { pnt += length; } break; case DYNAMIC_HOSTNAME: *found |= TLVFLAG_DYN_HOSTNAME; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): Dynamic Hostname length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_DYN_HOSTNAME) { /* the length is also included in the pointed struct */ tlvs->hostname = (struct hostname *) (pnt - 1); } pnt += length; break; case TE_ROUTER_ID: /* +---------------------------------------------------------------+ * + Router ID + 4 * +---------------------------------------------------------------+ */ *found |= TLVFLAG_TE_ROUTER_ID; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): TE Router ID %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_TE_ROUTER_ID) tlvs->router_id = (struct te_router_id *) (pnt); pnt += length; break; case IPV4_INT_REACHABILITY: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | 0 | I/E | Default Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Delay Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Expense Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Error Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | ip address | 4 * +---------------------------------------------------------------+ * | address mask | 4 * +---------------------------------------------------------------+ * : : */ *found |= TLVFLAG_IPV4_INT_REACHABILITY; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IPv4 internal Reachability length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_IPV4_INT_REACHABILITY) { while (length > value_len) { ipv4_reach = (struct ipv4_reachability *) pnt; if (!tlvs->ipv4_int_reachs) tlvs->ipv4_int_reachs = list_new (); listnode_add (tlvs->ipv4_int_reachs, ipv4_reach); value_len += 12; pnt += 12; } } else { pnt += length; } break; case IPV4_EXT_REACHABILITY: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | 0 | I/E | Default Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Delay Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Expense Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | S | I/E | Error Metric | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | ip address | 4 * +---------------------------------------------------------------+ * | address mask | 4 * +---------------------------------------------------------------+ * : : */ *found |= TLVFLAG_IPV4_EXT_REACHABILITY; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IPv4 external Reachability length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_IPV4_EXT_REACHABILITY) { while (length > value_len) { ipv4_reach = (struct ipv4_reachability *) pnt; if (!tlvs->ipv4_ext_reachs) tlvs->ipv4_ext_reachs = list_new (); listnode_add (tlvs->ipv4_ext_reachs, ipv4_reach); value_len += 12; pnt += 12; } } else { pnt += length; } break; case TE_IPV4_REACHABILITY: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | TE Metric | 4 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | U/D | sTLV? | Prefix Mask Len | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Prefix | 0-4 * +---------------------------------------------------------------+ * | sub tlvs | * +---------------------------------------------------------------+ * : : */ *found |= TLVFLAG_TE_IPV4_REACHABILITY; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IPv4 extended Reachability length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ endpnt = pnt + length; if (*expected & TLVFLAG_TE_IPV4_REACHABILITY) { while (length > value_len) { te_ipv4_reach = (struct te_ipv4_reachability *) pnt; if ((te_ipv4_reach->control & 0x3F) > IPV4_MAX_BITLEN) { zlog_warn ("ISIS-TLV (%s): invalid IPv4 extended reach" "ability prefix length %d", areatag, te_ipv4_reach->control & 0x3F); retval = ISIS_WARNING; break; } if (!tlvs->te_ipv4_reachs) tlvs->te_ipv4_reachs = list_new (); listnode_add (tlvs->te_ipv4_reachs, te_ipv4_reach); /* this trickery is permitable since no subtlvs are defined */ value_len += 5 + ((te_ipv4_reach->control & 0x3F) ? ((((te_ipv4_reach->control & 0x3F) - 1) >> 3) + 1) : 0); pnt += 5 + ((te_ipv4_reach->control & 0x3F) ? ((((te_ipv4_reach->control & 0x3F) - 1) >> 3) + 1) : 0); } } pnt = endpnt; break; #ifdef HAVE_IPV6 case IPV6_ADDR: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * + IP version 6 address + 16 * +-------+-------+-------+-------+-------+-------+-------+-------+ * : : */ *found |= TLVFLAG_IPV6_ADDR; #ifdef EXTREME_TLV_DEBUG zlog_debug ("ISIS-TLV (%s): IPv6 Address length %d", areatag, length); #endif /* EXTREME_TLV_DEBUG */ if (*expected & TLVFLAG_IPV6_ADDR) { while (length > value_len) { ipv6_addr = (struct in6_addr *) pnt; if (!tlvs->ipv6_addrs) tlvs->ipv6_addrs = list_new (); listnode_add (tlvs->ipv6_addrs, ipv6_addr); value_len += 16; pnt += 16; } } else { pnt += length; } break; case IPV6_REACHABILITY: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | Default Metric | 4 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Control Informantion | * +---------------------------------------------------------------+ * | IPv6 Prefix Length |--+ * +---------------------------------------------------------------+ | * | IPv6 Prefix |<-+ * +---------------------------------------------------------------+ */ *found |= TLVFLAG_IPV6_REACHABILITY; endpnt = pnt + length; if (*expected & TLVFLAG_IPV6_REACHABILITY) { while (length > value_len) { ipv6_reach = (struct ipv6_reachability *) pnt; if (ipv6_reach->prefix_len > IPV6_MAX_BITLEN) { zlog_warn ("ISIS-TLV (%s): invalid IPv6 extended reach" "ability prefix length %d", areatag, ipv6_reach->prefix_len); retval = ISIS_WARNING; break; } prefix_octets = ((ipv6_reach->prefix_len + 7) / 8); value_len += prefix_octets + 6; pnt += prefix_octets + 6; /* FIXME: sub-tlvs */ if (!tlvs->ipv6_reachs) tlvs->ipv6_reachs = list_new (); listnode_add (tlvs->ipv6_reachs, ipv6_reach); } } pnt = endpnt; break; #endif /* HAVE_IPV6 */ case WAY3_HELLO: /* +---------------------------------------------------------------+ * | Adjacency state | 1 * +---------------------------------------------------------------+ * | Extended Local Circuit ID | 4 * +---------------------------------------------------------------+ * | Neighbor System ID (If known) | 0-8 * (probably 6) * +---------------------------------------------------------------+ * | Neighbor Local Circuit ID (If known) | 4 * +---------------------------------------------------------------+ */ *found |= TLVFLAG_3WAY_HELLO; if (*expected & TLVFLAG_3WAY_HELLO) { while (length > value_len) { /* FIXME: make this work */ /* Adjacency State (one octet): 0 = Up 1 = Initializing 2 = Down Extended Local Circuit ID (four octets) Neighbor System ID if known (zero to eight octets) Neighbor Extended Local Circuit ID (four octets, if Neighbor System ID is present) */ pnt += length; value_len += length; } } else { pnt += length; } break; case GRACEFUL_RESTART: /* +-------+-------+-------+-------+-------+-------+-------+-------+ * | Reserved | SA | RA | RR | 1 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | Remaining Time | 2 * +---------------------------------------------------------------+ * | Restarting Neighbor ID (If known) | 0-8 * +---------------------------------------------------------------+ */ *found |= TLVFLAG_GRACEFUL_RESTART; if (*expected & TLVFLAG_GRACEFUL_RESTART) { /* FIXME: make this work */ } pnt += length; break; default: zlog_warn ("ISIS-TLV (%s): unsupported TLV type %d, length %d", areatag, type, length); retval = ISIS_WARNING; pnt += length; break; } } return retval; } int add_tlv (u_char tag, u_char len, u_char * value, struct stream *stream) { if ((stream_get_size (stream) - stream_get_endp (stream)) < (((unsigned)len) + 2)) { zlog_warn ("No room for TLV of type %d " "(total size %d available %d required %d)", tag, (int)stream_get_size (stream), (int)(stream_get_size (stream) - stream_get_endp (stream)), len+2); return ISIS_WARNING; } stream_putc (stream, tag); /* TAG */ stream_putc (stream, len); /* LENGTH */ stream_put (stream, value, (int) len); /* VALUE */ #ifdef EXTREME_DEBUG zlog_debug ("Added TLV %d len %d", tag, len); #endif /* EXTREME DEBUG */ return ISIS_OK; } int tlv_add_area_addrs (struct list *area_addrs, struct stream *stream) { struct listnode *node; struct area_addr *area_addr; u_char value[255]; u_char *pos = value; for (ALL_LIST_ELEMENTS_RO (area_addrs, node, area_addr)) { if (pos - value + area_addr->addr_len > 255) goto err; *pos = area_addr->addr_len; pos++; memcpy (pos, area_addr->area_addr, (int) area_addr->addr_len); pos += area_addr->addr_len; } return add_tlv (AREA_ADDRESSES, pos - value, value, stream); err: zlog_warn ("tlv_add_area_addrs(): TLV longer than 255"); return ISIS_WARNING; } int tlv_add_is_neighs (struct list *is_neighs, struct stream *stream) { struct listnode *node; struct is_neigh *is_neigh; u_char value[255]; u_char *pos = value; int retval; *pos = 0; /*is_neigh->virtual; */ pos++; for (ALL_LIST_ELEMENTS_RO (is_neighs, node, is_neigh)) { if (pos - value + IS_NEIGHBOURS_LEN > 255) { retval = add_tlv (IS_NEIGHBOURS, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } *pos = is_neigh->metrics.metric_default; pos++; *pos = is_neigh->metrics.metric_delay; pos++; *pos = is_neigh->metrics.metric_expense; pos++; *pos = is_neigh->metrics.metric_error; pos++; memcpy (pos, is_neigh->neigh_id, ISIS_SYS_ID_LEN + 1); pos += ISIS_SYS_ID_LEN + 1; } return add_tlv (IS_NEIGHBOURS, pos - value, value, stream); } int tlv_add_te_is_neighs (struct list *te_is_neighs, struct stream *stream) { struct listnode *node; struct te_is_neigh *te_is_neigh; u_char value[255]; u_char *pos = value; int retval; for (ALL_LIST_ELEMENTS_RO (te_is_neighs, node, te_is_neigh)) { /* FIXME: This will be wrong if we are going to add TE sub TLVs. */ if (pos - value + IS_NEIGHBOURS_LEN > 255) { retval = add_tlv (TE_IS_NEIGHBOURS, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } memcpy (pos, te_is_neigh->neigh_id, ISIS_SYS_ID_LEN + 1); pos += ISIS_SYS_ID_LEN + 1; memcpy (pos, te_is_neigh->te_metric, 3); pos += 3; /* Sub TLVs length. */ *pos = 0; pos++; } return add_tlv (TE_IS_NEIGHBOURS, pos - value, value, stream); } int tlv_add_lan_neighs (struct list *lan_neighs, struct stream *stream) { struct listnode *node; u_char *snpa; u_char value[255]; u_char *pos = value; int retval; for (ALL_LIST_ELEMENTS_RO (lan_neighs, node, snpa)) { if (pos - value + ETH_ALEN > 255) { retval = add_tlv (LAN_NEIGHBOURS, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } memcpy (pos, snpa, ETH_ALEN); pos += ETH_ALEN; } return add_tlv (LAN_NEIGHBOURS, pos - value, value, stream); } int tlv_add_nlpid (struct nlpids *nlpids, struct stream *stream) { return add_tlv (PROTOCOLS_SUPPORTED, nlpids->count, nlpids->nlpids, stream); } int tlv_add_authinfo (u_char auth_type, u_char auth_len, u_char *auth_value, struct stream *stream) { u_char value[255]; u_char *pos = value; *pos++ = auth_type; memcpy (pos, auth_value, auth_len); return add_tlv (AUTH_INFO, auth_len + 1, value, stream); } int tlv_add_checksum (struct checksum *checksum, struct stream *stream) { u_char value[255]; u_char *pos = value; return add_tlv (CHECKSUM, pos - value, value, stream); } int tlv_add_ip_addrs (struct list *ip_addrs, struct stream *stream) { struct listnode *node; struct prefix_ipv4 *ipv4; u_char value[255]; u_char *pos = value; int retval; for (ALL_LIST_ELEMENTS_RO (ip_addrs, node, ipv4)) { if (pos - value + IPV4_MAX_BYTELEN > 255) { /* RFC 1195 s4.2: only one tuple of 63 allowed. */ zlog_warn ("tlv_add_ip_addrs(): cutting off at 63 IP addresses"); break; } *(u_int32_t *) pos = ipv4->prefix.s_addr; pos += IPV4_MAX_BYTELEN; } return add_tlv (IPV4_ADDR, pos - value, value, stream); } /* Used to add TLV containing just one IPv4 address - either IPv4 address TLV * (in case of LSP) or TE router ID TLV. */ int tlv_add_in_addr (struct in_addr *addr, struct stream *stream, u_char tag) { u_char value[255]; u_char *pos = value; memcpy (pos, addr, IPV4_MAX_BYTELEN); pos += IPV4_MAX_BYTELEN; return add_tlv (tag, pos - value, value, stream); } int tlv_add_dynamic_hostname (struct hostname *hostname, struct stream *stream) { return add_tlv (DYNAMIC_HOSTNAME, hostname->namelen, hostname->name, stream); } int tlv_add_lsp_entries (struct list *lsps, struct stream *stream) { struct listnode *node; struct isis_lsp *lsp; u_char value[255]; u_char *pos = value; int retval; for (ALL_LIST_ELEMENTS_RO (lsps, node, lsp)) { if (pos - value + LSP_ENTRIES_LEN > 255) { retval = add_tlv (LSP_ENTRIES, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } *((u_int16_t *) pos) = lsp->lsp_header->rem_lifetime; pos += 2; memcpy (pos, lsp->lsp_header->lsp_id, ISIS_SYS_ID_LEN + 2); pos += ISIS_SYS_ID_LEN + 2; *((u_int32_t *) pos) = lsp->lsp_header->seq_num; pos += 4; *((u_int16_t *) pos) = lsp->lsp_header->checksum; pos += 2; } return add_tlv (LSP_ENTRIES, pos - value, value, stream); } int tlv_add_ipv4_reachs (struct list *ipv4_reachs, struct stream *stream) { struct listnode *node; struct ipv4_reachability *reach; u_char value[255]; u_char *pos = value; int retval; for (ALL_LIST_ELEMENTS_RO (ipv4_reachs, node, reach)) { if (pos - value + IPV4_REACH_LEN > 255) { retval = add_tlv (IPV4_INT_REACHABILITY, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } *pos = reach->metrics.metric_default; pos++; *pos = reach->metrics.metric_delay; pos++; *pos = reach->metrics.metric_expense; pos++; *pos = reach->metrics.metric_error; pos++; *(u_int32_t *) pos = reach->prefix.s_addr; pos += IPV4_MAX_BYTELEN; *(u_int32_t *) pos = reach->mask.s_addr; pos += IPV4_MAX_BYTELEN; } return add_tlv (IPV4_INT_REACHABILITY, pos - value, value, stream); } int tlv_add_te_ipv4_reachs (struct list *te_ipv4_reachs, struct stream *stream) { struct listnode *node; struct te_ipv4_reachability *te_reach; u_char value[255]; u_char *pos = value; u_char prefix_size; int retval; for (ALL_LIST_ELEMENTS_RO (te_ipv4_reachs, node, te_reach)) { prefix_size = ((((te_reach->control & 0x3F) - 1) >> 3) + 1); if (pos - value + (5 + prefix_size) > 255) { retval = add_tlv (TE_IPV4_REACHABILITY, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } *(u_int32_t *) pos = te_reach->te_metric; pos += 4; *pos = te_reach->control; pos++; memcpy (pos, &te_reach->prefix_start, prefix_size); pos += prefix_size; } return add_tlv (TE_IPV4_REACHABILITY, pos - value, value, stream); } #ifdef HAVE_IPV6 int tlv_add_ipv6_addrs (struct list *ipv6_addrs, struct stream *stream) { struct listnode *node; struct prefix_ipv6 *ipv6; u_char value[255]; u_char *pos = value; int retval; for (ALL_LIST_ELEMENTS_RO (ipv6_addrs, node, ipv6)) { if (pos - value + IPV6_MAX_BYTELEN > 255) { retval = add_tlv (IPV6_ADDR, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } memcpy (pos, ipv6->prefix.s6_addr, IPV6_MAX_BYTELEN); pos += IPV6_MAX_BYTELEN; } return add_tlv (IPV6_ADDR, pos - value, value, stream); } int tlv_add_ipv6_reachs (struct list *ipv6_reachs, struct stream *stream) { struct listnode *node; struct ipv6_reachability *ip6reach; u_char value[255]; u_char *pos = value; int retval, prefix_octets; for (ALL_LIST_ELEMENTS_RO (ipv6_reachs, node, ip6reach)) { if (pos - value + IPV6_MAX_BYTELEN + 6 > 255) { retval = add_tlv (IPV6_REACHABILITY, pos - value, value, stream); if (retval != ISIS_OK) return retval; pos = value; } *(uint32_t *) pos = ip6reach->metric; pos += 4; *pos = ip6reach->control_info; pos++; prefix_octets = ((ip6reach->prefix_len + 7) / 8); *pos = ip6reach->prefix_len; pos++; memcpy (pos, ip6reach->prefix, prefix_octets); pos += prefix_octets; } return add_tlv (IPV6_REACHABILITY, pos - value, value, stream); } #endif /* HAVE_IPV6 */ int tlv_add_padding (struct stream *stream) { int fullpads, i, left; /* * How many times can we add full padding ? */ fullpads = (stream_get_size (stream) - stream_get_endp (stream)) / 257; for (i = 0; i < fullpads; i++) { if (!stream_putc (stream, (u_char) PADDING)) /* TAG */ goto err; if (!stream_putc (stream, (u_char) 255)) /* LENGHT */ goto err; stream_put (stream, NULL, 255); /* zero padding */ } left = stream_get_size (stream) - stream_get_endp (stream); if (left < 2) return ISIS_OK; if (left == 2) { stream_putc (stream, PADDING); stream_putc (stream, 0); return ISIS_OK; } stream_putc (stream, PADDING); stream_putc (stream, left - 2); stream_put (stream, NULL, left-2); return ISIS_OK; err: zlog_warn ("tlv_add_padding(): no room for tlv"); return ISIS_WARNING; }
gpl-2.0
v3best/linux-v3best
drivers/usb/dwc3/dwc3-pci.c
272
5442
/** * dwc3-pci.c - PCI Specific glue layer * * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com * * Authors: Felipe Balbi <balbi@ti.com>, * Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 of * the License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/usb/otg.h> #include <linux/usb/usb_phy_gen_xceiv.h> /* FIXME define these in <linux/pci_ids.h> */ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e struct dwc3_pci { struct device *dev; struct platform_device *dwc3; struct platform_device *usb2_phy; struct platform_device *usb3_phy; }; static int dwc3_pci_register_phys(struct dwc3_pci *glue) { struct usb_phy_gen_xceiv_platform_data pdata; struct platform_device *pdev; int ret; memset(&pdata, 0x00, sizeof(pdata)); pdev = platform_device_alloc("usb_phy_gen_xceiv", 0); if (!pdev) return -ENOMEM; glue->usb2_phy = pdev; pdata.type = USB_PHY_TYPE_USB2; pdata.gpio_reset = -1; ret = platform_device_add_data(glue->usb2_phy, &pdata, sizeof(pdata)); if (ret) goto err1; pdev = platform_device_alloc("usb_phy_gen_xceiv", 1); if (!pdev) { ret = -ENOMEM; goto err1; } glue->usb3_phy = pdev; pdata.type = USB_PHY_TYPE_USB3; ret = platform_device_add_data(glue->usb3_phy, &pdata, sizeof(pdata)); if (ret) goto err2; ret = platform_device_add(glue->usb2_phy); if (ret) goto err2; ret = platform_device_add(glue->usb3_phy); if (ret) goto err3; return 0; err3: platform_device_del(glue->usb2_phy); err2: platform_device_put(glue->usb3_phy); err1: platform_device_put(glue->usb2_phy); return ret; } static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct resource res[2]; struct platform_device *dwc3; struct dwc3_pci *glue; int ret = -ENOMEM; struct device *dev = &pci->dev; glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(dev, "not enough memory\n"); return -ENOMEM; } glue->dev = dev; ret = pci_enable_device(pci); if (ret) { dev_err(dev, "failed to enable pci device\n"); return -ENODEV; } pci_set_master(pci); ret = dwc3_pci_register_phys(glue); if (ret) { dev_err(dev, "couldn't register PHYs\n"); return ret; } dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO); if (!dwc3) { dev_err(dev, "couldn't allocate dwc3 device\n"); ret = -ENOMEM; goto err1; } memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res)); res[0].start = pci_resource_start(pci, 0); res[0].end = pci_resource_end(pci, 0); res[0].name = "dwc_usb3"; res[0].flags = IORESOURCE_MEM; res[1].start = pci->irq; res[1].name = "dwc_usb3"; res[1].flags = IORESOURCE_IRQ; ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res)); if (ret) { dev_err(dev, "couldn't add resources to dwc3 device\n"); goto err1; } pci_set_drvdata(pci, glue); dma_set_coherent_mask(&dwc3->dev, dev->coherent_dma_mask); dwc3->dev.dma_mask = dev->dma_mask; dwc3->dev.dma_parms = dev->dma_parms; dwc3->dev.parent = dev; glue->dwc3 = dwc3; ret = platform_device_add(dwc3); if (ret) { dev_err(dev, "failed to register dwc3 device\n"); goto err3; } return 0; err3: platform_device_put(dwc3); err1: pci_disable_device(pci); return ret; } static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *glue = pci_get_drvdata(pci); platform_device_unregister(glue->dwc3); platform_device_unregister(glue->usb2_phy); platform_device_unregister(glue->usb3_phy); pci_disable_device(pci); } static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); #ifdef CONFIG_PM_SLEEP static int dwc3_pci_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); pci_disable_device(pci); return 0; } static int dwc3_pci_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); int ret; ret = pci_enable_device(pci); if (ret) { dev_err(dev, "can't re-enable device --> %d\n", ret); return ret; } pci_set_master(pci); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dwc3_pci_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume) }; static struct pci_driver dwc3_pci_driver = { .name = "dwc3-pci", .id_table = dwc3_pci_id_table, .probe = dwc3_pci_probe, .remove = dwc3_pci_remove, .driver = { .pm = &dwc3_pci_dev_pm_ops, }, }; MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("DesignWare USB3 PCI Glue Layer"); module_pci_driver(dwc3_pci_driver);
gpl-2.0
viaembedded/arm-soc
arch/x86/crypto/crc32c-intel_glue.c
528
7456
/* * Using hardware provided CRC32 instruction to accelerate the CRC32 disposal. * CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE) * CRC32 is a new instruction in Intel SSE4.2, the reference can be found at: * http://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2A: Instruction Set Reference, A-M * * Copyright (C) 2008 Intel Corporation * Authors: Austin Zhang <austin_zhang@linux.intel.com> * Kent Liu <kent.liu@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <crypto/internal/hash.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> #include <asm/fpu/internal.h> #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 #define SCALE_F sizeof(unsigned long) #ifdef CONFIG_X86_64 #define REX_PRE "0x48, " #else #define REX_PRE #endif #ifdef CONFIG_X86_64 /* * use carryless multiply version of crc32c when buffer * size is >= 512 (when eager fpu is enabled) or * >= 1024 (when eager fpu is disabled) to account * for fpu state save/restore overhead. */ #define CRC32C_PCL_BREAKEVEN_EAGERFPU 512 #define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len, unsigned int crc_init); static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU; #if defined(X86_FEATURE_EAGER_FPU) #define set_pcl_breakeven_point() \ do { \ if (!use_eager_fpu()) \ crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \ } while (0) #else #define set_pcl_breakeven_point() \ (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU) #endif #endif /* CONFIG_X86_64 */ static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length) { while (length--) { __asm__ __volatile__( ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1" :"=S"(crc) :"0"(crc), "c"(*data) ); data++; } return crc; } static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len) { unsigned int iquotient = len / SCALE_F; unsigned int iremainder = len % SCALE_F; unsigned long *ptmp = (unsigned long *)p; while (iquotient--) { __asm__ __volatile__( ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;" :"=S"(crc) :"0"(crc), "c"(*ptmp) ); ptmp++; } if (iremainder) crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp, iremainder); return crc; } /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32c_intel_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } static int crc32c_intel_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; } static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; } static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_intel_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *)out = ~cpu_to_le32p(crcp); return 0; } static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static int crc32c_intel_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } #ifdef CONFIG_X86_64 static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); /* * use faster PCL version if datasize is large enough to * overcome kernel fpu state save/restore overhead */ if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) { kernel_fpu_begin(); *crcp = crc_pcl(data, len, *crcp); kernel_fpu_end(); } else *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; } static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) { kernel_fpu_begin(); *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); kernel_fpu_end(); } else *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; } static int crc32c_pcl_intel_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_pcl_intel_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_pcl_intel_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_pcl_intel_finup(crypto_shash_ctx(desc->tfm), data, len, out); } #endif /* CONFIG_X86_64 */ static struct shash_alg alg = { .setkey = crc32c_intel_setkey, .init = crc32c_intel_init, .update = crc32c_intel_update, .final = crc32c_intel_final, .finup = crc32c_intel_finup, .digest = crc32c_intel_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "crc32c-intel", .cra_priority = 200, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_init = crc32c_intel_cra_init, } }; static const struct x86_cpu_id crc32c_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_XMM4_2), {} }; MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id); static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 if (cpu_has_pclmulqdq) { alg.update = crc32c_pcl_intel_update; alg.finup = crc32c_pcl_intel_finup; alg.digest = crc32c_pcl_intel_digest; set_pcl_breakeven_point(); } #endif return crypto_register_shash(&alg); } static void __exit crc32c_intel_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32c_intel_mod_init); module_exit(crc32c_intel_mod_fini); MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware."); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c"); MODULE_ALIAS_CRYPTO("crc32c-intel");
gpl-2.0
heptalium/rpi-sources-3.16
drivers/watchdog/bcm47xx_wdt.c
528
6340
/* * Watchdog driver for Broadcom BCM47XX * * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs> * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr> * Copyright (C) 2012-2013 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bcm47xx_wdt.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/jiffies.h> #define DRV_NAME "bcm47xx_wdt" #define WDT_DEFAULT_TIME 30 /* seconds */ #define WDT_SOFTTIMER_MAX 255 /* seconds */ #define WDT_SOFTTIMER_THRESHOLD 60 /* seconds */ static int timeout = WDT_DEFAULT_TIME; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog time in seconds. (default=" __MODULE_STRING(WDT_DEFAULT_TIME) ")"); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static inline struct bcm47xx_wdt *bcm47xx_wdt_get(struct watchdog_device *wdd) { return container_of(wdd, struct bcm47xx_wdt, wdd); } static int bcm47xx_wdt_hard_keepalive(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); wdt->timer_set_ms(wdt, wdd->timeout * 1000); return 0; } static int bcm47xx_wdt_hard_start(struct watchdog_device *wdd) { return 0; } static int bcm47xx_wdt_hard_stop(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); wdt->timer_set(wdt, 0); return 0; } static int bcm47xx_wdt_hard_set_timeout(struct watchdog_device *wdd, unsigned int new_time) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); u32 max_timer = wdt->max_timer_ms; if (new_time < 1 || new_time > max_timer / 1000) { pr_warn("timeout value must be 1<=x<=%d, using %d\n", max_timer / 1000, new_time); return -EINVAL; } wdd->timeout = new_time; return 0; } static struct watchdog_ops bcm47xx_wdt_hard_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_hard_start, .stop = bcm47xx_wdt_hard_stop, .ping = bcm47xx_wdt_hard_keepalive, .set_timeout = bcm47xx_wdt_hard_set_timeout, }; static void bcm47xx_wdt_soft_timer_tick(unsigned long data) { struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data; u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms); if (!atomic_dec_and_test(&wdt->soft_ticks)) { wdt->timer_set_ms(wdt, next_tick); mod_timer(&wdt->soft_timer, jiffies + HZ); } else { pr_crit("Watchdog will fire soon!!!\n"); } } static int bcm47xx_wdt_soft_keepalive(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); atomic_set(&wdt->soft_ticks, wdd->timeout); return 0; } static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); bcm47xx_wdt_soft_keepalive(wdd); bcm47xx_wdt_soft_timer_tick((unsigned long)wdt); return 0; } static int bcm47xx_wdt_soft_stop(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); del_timer_sync(&wdt->soft_timer); wdt->timer_set(wdt, 0); return 0; } static int bcm47xx_wdt_soft_set_timeout(struct watchdog_device *wdd, unsigned int new_time) { if (new_time < 1 || new_time > WDT_SOFTTIMER_MAX) { pr_warn("timeout value must be 1<=x<=%d, using %d\n", WDT_SOFTTIMER_MAX, new_time); return -EINVAL; } wdd->timeout = new_time; return 0; } static const struct watchdog_info bcm47xx_wdt_info = { .identity = DRV_NAME, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static int bcm47xx_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { struct bcm47xx_wdt *wdt; wdt = container_of(this, struct bcm47xx_wdt, notifier); if (code == SYS_DOWN || code == SYS_HALT) wdt->wdd.ops->stop(&wdt->wdd); return NOTIFY_DONE; } static struct watchdog_ops bcm47xx_wdt_soft_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_soft_start, .stop = bcm47xx_wdt_soft_stop, .ping = bcm47xx_wdt_soft_keepalive, .set_timeout = bcm47xx_wdt_soft_set_timeout, }; static int bcm47xx_wdt_probe(struct platform_device *pdev) { int ret; bool soft; struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev); if (!wdt) return -ENXIO; soft = wdt->max_timer_ms < WDT_SOFTTIMER_THRESHOLD * 1000; if (soft) { wdt->wdd.ops = &bcm47xx_wdt_soft_ops; setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, (long unsigned int)wdt); } else { wdt->wdd.ops = &bcm47xx_wdt_hard_ops; } wdt->wdd.info = &bcm47xx_wdt_info; wdt->wdd.timeout = WDT_DEFAULT_TIME; ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout); if (ret) goto err_timer; watchdog_set_nowayout(&wdt->wdd, nowayout); wdt->notifier.notifier_call = &bcm47xx_wdt_notify_sys; ret = register_reboot_notifier(&wdt->notifier); if (ret) goto err_timer; ret = watchdog_register_device(&wdt->wdd); if (ret) goto err_notifier; dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n", timeout, nowayout ? ", nowayout" : "", soft ? ", Software Timer" : ""); return 0; err_notifier: unregister_reboot_notifier(&wdt->notifier); err_timer: if (soft) del_timer_sync(&wdt->soft_timer); return ret; } static int bcm47xx_wdt_remove(struct platform_device *pdev) { struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev); if (!wdt) return -ENXIO; watchdog_unregister_device(&wdt->wdd); unregister_reboot_notifier(&wdt->notifier); return 0; } static struct platform_driver bcm47xx_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = "bcm47xx-wdt", }, .probe = bcm47xx_wdt_probe, .remove = bcm47xx_wdt_remove, }; module_platform_driver(bcm47xx_wdt_driver); MODULE_AUTHOR("Aleksandar Radovanovic"); MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx"); MODULE_LICENSE("GPL");
gpl-2.0
faux123/Shamu
drivers/gpu/drm/radeon/radeon_fb.c
2064
10487
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fb.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> #include "radeon.h" #include <drm/drm_fb_helper.h> #include <linux/vga_switcheroo.h> /* object hierarchy - this contains a helper + a radeon fb the helper contains a pointer to radeon framebuffer baseclass. */ struct radeon_fbdev { struct drm_fb_helper helper; struct radeon_framebuffer rfb; struct list_head fbdev_list; struct radeon_device *rdev; }; static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0; switch (bpp / 8) { case 1: pitch_mask = align_large ? 255 : 127; break; case 2: pitch_mask = align_large ? 127 : 31; break; case 3: case 4: pitch_mask = align_large ? 63 : 15; break; } aligned += pitch_mask; aligned &= ~pitch_mask; return aligned; } static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) { struct radeon_bo *rbo = gem_to_radeon_bo(gobj); int ret; ret = radeon_bo_reserve(rbo, false); if (likely(ret == 0)) { radeon_bo_kunmap(rbo); radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } drm_gem_object_unreference_unlocked(gobj); } static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct radeon_device *rdev = rfbdev->rdev; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; int ret; int aligned_size, size; int height = mode_cmd->height; u32 bpp, depth; drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); /* need to align pitch with crtc limits */ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp, fb_tiled) * ((bpp + 1) / 8); if (rdev->family >= CHIP_R600) height = ALIGN(mode_cmd->height, 8); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, false, true, &gobj); if (ret) { printk(KERN_ERR "failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; } rbo = gem_to_radeon_bo(gobj); if (fb_tiled) tiling_flags = RADEON_TILING_MACRO; #ifdef __BIG_ENDIAN switch (bpp) { case 32: tiling_flags |= RADEON_TILING_SWAP_32BIT; break; case 16: tiling_flags |= RADEON_TILING_SWAP_16BIT; default: break; } #endif if (tiling_flags) { ret = radeon_bo_set_tiling_flags(rbo, tiling_flags | RADEON_TILING_SURFACE, mode_cmd->pitches[0]); if (ret) dev_err(rdev->dev, "FB failed to set tiling flags\n"); } ret = radeon_bo_reserve(rbo, false); if (unlikely(ret != 0)) goto out_unref; /* Only 27 bit offset for legacy CRTC */ ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, NULL); if (ret) { radeon_bo_unreserve(rbo); goto out_unref; } if (fb_tiled) radeon_bo_check_tiling(rbo, 0, 0); ret = radeon_bo_kmap(rbo, NULL); radeon_bo_unreserve(rbo); if (ret) { goto out_unref; } *gobj_p = gobj; return 0; out_unref: radeonfb_destroy_pinned_object(gobj); *gobj_p = NULL; return ret; } static int radeonfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; struct radeon_device *rdev = rfbdev->rdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; struct device *device = &rdev->pdev->dev; int ret; unsigned long tmp; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; /* avivo can't scanout real 24bpp */ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) sizes->surface_bpp = 32; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; } rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = framebuffer_alloc(0, device); if (info == NULL) { ret = -ENOMEM; goto out_unref; } info->par = rfbdev; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initalise framebuffer %d\n", ret); goto out_unref; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; rfbdev->helper.fbdev = info; memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); strcpy(info->fix.id, "radeondrmfb"); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &radeonfb_ops; tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = radeon_bo_size(rbo); info->screen_base = rbo->kptr; info->screen_size = radeon_bo_size(rbo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unref; } info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = rdev->mc.aper_size; /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ if (info->screen_base == NULL) { ret = -ENOSPC; goto out_unref; } ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unref; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; out_unref: if (rbo) { } if (fb && ret) { drm_gem_object_unreference(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); } return ret; } void radeon_fb_output_poll_changed(struct radeon_device *rdev) { drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); } static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; if (rfbdev->helper.fbdev) { info = rfbdev->helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (rfb->obj) { radeonfb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_unregister_private(&rfb->base); drm_framebuffer_cleanup(&rfb->base); return 0; } static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, .gamma_get = radeon_crtc_fb_gamma_get, .fb_probe = radeonfb_create, }; int radeon_fbdev_init(struct radeon_device *rdev) { struct radeon_fbdev *rfbdev; int bpp_sel = 32; int ret; /* select 8 bpp console on RN50 or 16MB cards */ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) bpp_sel = 8; rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); if (!rfbdev) return -ENOMEM; rfbdev->rdev = rdev; rdev->mode_info.rfbdev = rfbdev; rfbdev->helper.funcs = &radeon_fb_helper_funcs; ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, rdev->num_crtc, RADEONFB_CONN_LIMIT); if (ret) { kfree(rfbdev); return ret; } drm_fb_helper_single_add_all_connectors(&rfbdev->helper); /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(rdev->ddev); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; } void radeon_fbdev_fini(struct radeon_device *rdev) { if (!rdev->mode_info.rfbdev) return; radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); kfree(rdev->mode_info.rfbdev); rdev->mode_info.rfbdev = NULL; } void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) { fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); } int radeon_fbdev_total_size(struct radeon_device *rdev) { struct radeon_bo *robj; int size = 0; robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); size += radeon_bo_size(robj); return size; } bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) return true; return false; }
gpl-2.0
Fusion-Devices/android_kernel_moto_shamu_old
drivers/xen/xen-pciback/pci_stub.c
2064
38415
/* * PCI Stub Driver - Grabs devices in backend to be exported later * * Ryan Wilson <hap9@epoch.ncsc.mil> * Chris Bookholt <hap10@epoch.ncsc.mil> */ #include <linux/module.h> #include <linux/init.h> #include <linux/rwsem.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/kref.h> #include <linux/pci.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/atomic.h> #include <xen/events.h> #include <asm/xen/pci.h> #include <asm/xen/hypervisor.h> #include <xen/interface/physdev.h> #include "pciback.h" #include "conf_space.h" #include "conf_space_quirks.h" static char *pci_devs_to_hide; wait_queue_head_t xen_pcibk_aer_wait_queue; /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops, * We want to avoid in middle of AER ops, xen_pcibk devices is being removed */ static DECLARE_RWSEM(pcistub_sem); module_param_named(hide, pci_devs_to_hide, charp, 0444); struct pcistub_device_id { struct list_head slot_list; int domain; unsigned char bus; unsigned int devfn; }; static LIST_HEAD(pcistub_device_ids); static DEFINE_SPINLOCK(device_ids_lock); struct pcistub_device { struct kref kref; struct list_head dev_list; spinlock_t lock; struct pci_dev *dev; struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */ }; /* Access to pcistub_devices & seized_devices lists and the initialize_devices * flag must be locked with pcistub_devices_lock */ static DEFINE_SPINLOCK(pcistub_devices_lock); static LIST_HEAD(pcistub_devices); /* wait for device_initcall before initializing our devices * (see pcistub_init_devices_late) */ static int initialize_devices; static LIST_HEAD(seized_devices); static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) { struct pcistub_device *psdev; dev_dbg(&dev->dev, "pcistub_device_alloc\n"); psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); if (!psdev) return NULL; psdev->dev = pci_dev_get(dev); if (!psdev->dev) { kfree(psdev); return NULL; } kref_init(&psdev->kref); spin_lock_init(&psdev->lock); return psdev; } /* Don't call this directly as it's called by pcistub_device_put */ static void pcistub_device_release(struct kref *kref) { struct pcistub_device *psdev; struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data; psdev = container_of(kref, struct pcistub_device, kref); dev = psdev->dev; dev_data = pci_get_drvdata(dev); dev_dbg(&dev->dev, "pcistub_device_release\n"); xen_unregister_device_domain_owner(dev); /* Call the reset function which does not take lock as this * is called from "unbind" which takes a device_lock mutex. */ __pci_reset_function_locked(dev); if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) dev_dbg(&dev->dev, "Could not reload PCI state\n"); else pci_restore_state(dev); if (dev->msix_cap) { struct physdev_pci_device ppdev = { .seg = pci_domain_nr(dev->bus), .bus = dev->bus->number, .devfn = dev->devfn }; int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, &ppdev); if (err) dev_warn(&dev->dev, "MSI-X release failed (%d)\n", err); } /* Disable the device */ xen_pcibk_reset_device(dev); kfree(dev_data); pci_set_drvdata(dev, NULL); /* Clean-up the device */ xen_pcibk_config_free_dyn_fields(dev); xen_pcibk_config_free_dev(dev); dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; pci_dev_put(dev); kfree(psdev); } static inline void pcistub_device_get(struct pcistub_device *psdev) { kref_get(&psdev->kref); } static inline void pcistub_device_put(struct pcistub_device *psdev) { kref_put(&psdev->kref, pcistub_device_release); } static struct pcistub_device *pcistub_device_find(int domain, int bus, int slot, int func) { struct pcistub_device *psdev = NULL; unsigned long flags; spin_lock_irqsave(&pcistub_devices_lock, flags); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (psdev->dev != NULL && domain == pci_domain_nr(psdev->dev->bus) && bus == psdev->dev->bus->number && slot == PCI_SLOT(psdev->dev->devfn) && func == PCI_FUNC(psdev->dev->devfn)) { pcistub_device_get(psdev); goto out; } } /* didn't find it */ psdev = NULL; out: spin_unlock_irqrestore(&pcistub_devices_lock, flags); return psdev; } static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev, struct pcistub_device *psdev) { struct pci_dev *pci_dev = NULL; unsigned long flags; pcistub_device_get(psdev); spin_lock_irqsave(&psdev->lock, flags); if (!psdev->pdev) { psdev->pdev = pdev; pci_dev = psdev->dev; } spin_unlock_irqrestore(&psdev->lock, flags); if (!pci_dev) pcistub_device_put(psdev); return pci_dev; } struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev, int domain, int bus, int slot, int func) { struct pcistub_device *psdev; struct pci_dev *found_dev = NULL; unsigned long flags; spin_lock_irqsave(&pcistub_devices_lock, flags); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (psdev->dev != NULL && domain == pci_domain_nr(psdev->dev->bus) && bus == psdev->dev->bus->number && slot == PCI_SLOT(psdev->dev->devfn) && func == PCI_FUNC(psdev->dev->devfn)) { found_dev = pcistub_device_get_pci_dev(pdev, psdev); break; } } spin_unlock_irqrestore(&pcistub_devices_lock, flags); return found_dev; } struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev) { struct pcistub_device *psdev; struct pci_dev *found_dev = NULL; unsigned long flags; spin_lock_irqsave(&pcistub_devices_lock, flags); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (psdev->dev == dev) { found_dev = pcistub_device_get_pci_dev(pdev, psdev); break; } } spin_unlock_irqrestore(&pcistub_devices_lock, flags); return found_dev; } void pcistub_put_pci_dev(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; unsigned long flags; spin_lock_irqsave(&pcistub_devices_lock, flags); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (psdev->dev == dev) { found_psdev = psdev; break; } } spin_unlock_irqrestore(&pcistub_devices_lock, flags); if (WARN_ON(!found_psdev)) return; /*hold this lock for avoiding breaking link between * pcistub and xen_pcibk when AER is in processing */ down_write(&pcistub_sem); /* Cleanup our device * (so it's ready for the next domain) */ /* This is OK - we are running from workqueue context * and want to inhibit the user from fiddling with 'reset' */ pci_reset_function(dev); pci_restore_state(psdev->dev); /* This disables the device. */ xen_pcibk_reset_device(found_psdev->dev); /* And cleanup up our emulated fields. */ xen_pcibk_config_free_dyn_fields(found_psdev->dev); xen_pcibk_config_reset_dev(found_psdev->dev); xen_unregister_device_domain_owner(found_psdev->dev); spin_lock_irqsave(&found_psdev->lock, flags); found_psdev->pdev = NULL; spin_unlock_irqrestore(&found_psdev->lock, flags); pcistub_device_put(found_psdev); up_write(&pcistub_sem); } static int pcistub_match_one(struct pci_dev *dev, struct pcistub_device_id *pdev_id) { /* Match the specified device by domain, bus, slot, func and also if * any of the device's parent bridges match. */ for (; dev != NULL; dev = dev->bus->self) { if (pci_domain_nr(dev->bus) == pdev_id->domain && dev->bus->number == pdev_id->bus && dev->devfn == pdev_id->devfn) return 1; /* Sometimes topmost bridge links to itself. */ if (dev == dev->bus->self) break; } return 0; } static int pcistub_match(struct pci_dev *dev) { struct pcistub_device_id *pdev_id; unsigned long flags; int found = 0; spin_lock_irqsave(&device_ids_lock, flags); list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) { if (pcistub_match_one(dev, pdev_id)) { found = 1; break; } } spin_unlock_irqrestore(&device_ids_lock, flags); return found; } static int pcistub_init_device(struct pci_dev *dev) { struct xen_pcibk_dev_data *dev_data; int err = 0; dev_dbg(&dev->dev, "initializing...\n"); /* The PCI backend is not intended to be a module (or to work with * removable PCI devices (yet). If it were, xen_pcibk_config_free() * would need to be called somewhere to free the memory allocated * here and then to call kfree(pci_get_drvdata(psdev->dev)). */ dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") + strlen(pci_name(dev)) + 1, GFP_ATOMIC); if (!dev_data) { err = -ENOMEM; goto out; } pci_set_drvdata(dev, dev_data); /* * Setup name for fake IRQ handler. It will only be enabled * once the device is turned on by the guest. */ sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev)); dev_dbg(&dev->dev, "initializing config\n"); init_waitqueue_head(&xen_pcibk_aer_wait_queue); err = xen_pcibk_config_init_dev(dev); if (err) goto out; /* HACK: Force device (& ACPI) to determine what IRQ it's on - we * must do this here because pcibios_enable_device may specify * the pci device's true irq (and possibly its other resources) * if they differ from what's in the configuration space. * This makes the assumption that the device's resources won't * change after this point (otherwise this code may break!) */ dev_dbg(&dev->dev, "enabling device\n"); err = pci_enable_device(dev); if (err) goto config_release; if (dev->msix_cap) { struct physdev_pci_device ppdev = { .seg = pci_domain_nr(dev->bus), .bus = dev->bus->number, .devfn = dev->devfn }; err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); if (err) dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", err); } /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); dev_data->pci_saved_state = pci_store_saved_state(dev); if (!dev_data->pci_saved_state) dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); else { dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n"); __pci_reset_function_locked(dev); pci_restore_state(dev); } /* Now disable the device (this also ensures some private device * data is setup before we export) */ dev_dbg(&dev->dev, "reset device\n"); xen_pcibk_reset_device(dev); dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; return 0; config_release: xen_pcibk_config_free_dev(dev); out: pci_set_drvdata(dev, NULL); kfree(dev_data); return err; } /* * Because some initialization still happens on * devices during fs_initcall, we need to defer * full initialization of our devices until * device_initcall. */ static int __init pcistub_init_devices_late(void) { struct pcistub_device *psdev; unsigned long flags; int err = 0; pr_debug(DRV_NAME ": pcistub_init_devices_late\n"); spin_lock_irqsave(&pcistub_devices_lock, flags); while (!list_empty(&seized_devices)) { psdev = container_of(seized_devices.next, struct pcistub_device, dev_list); list_del(&psdev->dev_list); spin_unlock_irqrestore(&pcistub_devices_lock, flags); err = pcistub_init_device(psdev->dev); if (err) { dev_err(&psdev->dev->dev, "error %d initializing device\n", err); kfree(psdev); psdev = NULL; } spin_lock_irqsave(&pcistub_devices_lock, flags); if (psdev) list_add_tail(&psdev->dev_list, &pcistub_devices); } initialize_devices = 1; spin_unlock_irqrestore(&pcistub_devices_lock, flags); return 0; } static int pcistub_seize(struct pci_dev *dev) { struct pcistub_device *psdev; unsigned long flags; int err = 0; psdev = pcistub_device_alloc(dev); if (!psdev) return -ENOMEM; spin_lock_irqsave(&pcistub_devices_lock, flags); if (initialize_devices) { spin_unlock_irqrestore(&pcistub_devices_lock, flags); /* don't want irqs disabled when calling pcistub_init_device */ err = pcistub_init_device(psdev->dev); spin_lock_irqsave(&pcistub_devices_lock, flags); if (!err) list_add(&psdev->dev_list, &pcistub_devices); } else { dev_dbg(&dev->dev, "deferring initialization\n"); list_add(&psdev->dev_list, &seized_devices); } spin_unlock_irqrestore(&pcistub_devices_lock, flags); if (err) pcistub_device_put(psdev); return err; } static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err = 0; dev_dbg(&dev->dev, "probing...\n"); if (pcistub_match(dev)) { if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { dev_err(&dev->dev, "can't export pci devices that " "don't have a normal (0) or bridge (1) " "header type!\n"); err = -ENODEV; goto out; } dev_info(&dev->dev, "seizing device\n"); err = pcistub_seize(dev); } else /* Didn't find the device */ err = -ENODEV; out: return err; } static void pcistub_remove(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; unsigned long flags; dev_dbg(&dev->dev, "removing\n"); spin_lock_irqsave(&pcistub_devices_lock, flags); xen_pcibk_config_quirk_release(dev); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (psdev->dev == dev) { found_psdev = psdev; break; } } spin_unlock_irqrestore(&pcistub_devices_lock, flags); if (found_psdev) { dev_dbg(&dev->dev, "found device to remove - in use? %p\n", found_psdev->pdev); if (found_psdev->pdev) { printk(KERN_WARNING DRV_NAME ": ****** removing device " "%s while still in-use! ******\n", pci_name(found_psdev->dev)); printk(KERN_WARNING DRV_NAME ": ****** driver domain may" " still access this device's i/o resources!\n"); printk(KERN_WARNING DRV_NAME ": ****** shutdown driver " "domain before binding device\n"); printk(KERN_WARNING DRV_NAME ": ****** to other drivers " "or domains\n"); xen_pcibk_release_pci_dev(found_psdev->pdev, found_psdev->dev); } spin_lock_irqsave(&pcistub_devices_lock, flags); list_del(&found_psdev->dev_list); spin_unlock_irqrestore(&pcistub_devices_lock, flags); /* the final put for releasing from the list */ pcistub_device_put(found_psdev); } } static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = { { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, {0,}, }; #define PCI_NODENAME_MAX 40 static void kill_domain_by_device(struct pcistub_device *psdev) { struct xenbus_transaction xbt; int err; char nodename[PCI_NODENAME_MAX]; BUG_ON(!psdev); snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0", psdev->pdev->xdev->otherend_id); again: err = xenbus_transaction_start(&xbt); if (err) { dev_err(&psdev->dev->dev, "error %d when start xenbus transaction\n", err); return; } /*PV AER handlers will set this flag*/ xenbus_printf(xbt, nodename, "aerState" , "aerfail"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; dev_err(&psdev->dev->dev, "error %d when end xenbus transaction\n", err); return; } } /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and * backend need to have cooperation. In xen_pcibk, those steps will do similar * jobs: send service request and waiting for front_end response. */ static pci_ers_result_t common_process(struct pcistub_device *psdev, pci_channel_state_t state, int aer_cmd, pci_ers_result_t result) { pci_ers_result_t res = result; struct xen_pcie_aer_op *aer_op; int ret; /*with PV AER drivers*/ aer_op = &(psdev->pdev->sh_info->aer_op); aer_op->cmd = aer_cmd ; /*useful for error_detected callback*/ aer_op->err = state; /*pcifront_end BDF*/ ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev, &aer_op->domain, &aer_op->bus, &aer_op->devfn); if (!ret) { dev_err(&psdev->dev->dev, DRV_NAME ": failed to get pcifront device\n"); return PCI_ERS_RESULT_NONE; } wmb(); dev_dbg(&psdev->dev->dev, DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n", aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn); /*local flag to mark there's aer request, xen_pcibk callback will use * this flag to judge whether we need to check pci-front give aer * service ack signal */ set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); /*It is possible that a pcifront conf_read_write ops request invokes * the callback which cause the spurious execution of wake_up. * Yet it is harmless and better than a spinlock here */ set_bit(_XEN_PCIB_active, (unsigned long *)&psdev->pdev->sh_info->flags); wmb(); notify_remote_via_irq(psdev->pdev->evtchn_irq); ret = wait_event_timeout(xen_pcibk_aer_wait_queue, !(test_bit(_XEN_PCIB_active, (unsigned long *) &psdev->pdev->sh_info->flags)), 300*HZ); if (!ret) { if (test_bit(_XEN_PCIB_active, (unsigned long *)&psdev->pdev->sh_info->flags)) { dev_err(&psdev->dev->dev, "pcifront aer process not responding!\n"); clear_bit(_XEN_PCIB_active, (unsigned long *)&psdev->pdev->sh_info->flags); aer_op->err = PCI_ERS_RESULT_NONE; return res; } } clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); if (test_bit(_XEN_PCIF_active, (unsigned long *)&psdev->pdev->sh_info->flags)) { dev_dbg(&psdev->dev->dev, "schedule pci_conf service in " DRV_NAME "\n"); xen_pcibk_test_and_schedule_op(psdev->pdev); } res = (pci_ers_result_t)aer_op->err; return res; } /* * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case * of the device driver could provide this service, and then wait for pcifront * ack. * @dev: pointer to PCI devices * return value is used by aer_core do_recovery policy */ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev) { struct pcistub_device *psdev; pci_ers_result_t result; result = PCI_ERS_RESULT_RECOVERED; dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n", dev->bus->number, dev->devfn); down_write(&pcistub_sem); psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { dev_err(&dev->dev, DRV_NAME " device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { dev_err(&dev->dev, DRV_NAME " device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; } if (!test_bit(_XEN_PCIB_AERHANDLER, (unsigned long *)&psdev->pdev->sh_info->flags)) { dev_err(&dev->dev, "guest with no AER driver should have been killed\n"); goto end; } result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result); if (result == PCI_ERS_RESULT_NONE || result == PCI_ERS_RESULT_DISCONNECT) { dev_dbg(&dev->dev, "No AER slot_reset service or disconnected!\n"); kill_domain_by_device(psdev); } end: if (psdev) pcistub_device_put(psdev); up_write(&pcistub_sem); return result; } /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront * in case of the device driver could provide this service, and then wait * for pcifront ack * @dev: pointer to PCI devices * return value is used by aer_core do_recovery policy */ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev) { struct pcistub_device *psdev; pci_ers_result_t result; result = PCI_ERS_RESULT_RECOVERED; dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n", dev->bus->number, dev->devfn); down_write(&pcistub_sem); psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { dev_err(&dev->dev, DRV_NAME " device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { dev_err(&dev->dev, DRV_NAME " device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; } if (!test_bit(_XEN_PCIB_AERHANDLER, (unsigned long *)&psdev->pdev->sh_info->flags)) { dev_err(&dev->dev, "guest with no AER driver should have been killed\n"); goto end; } result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result); if (result == PCI_ERS_RESULT_NONE || result == PCI_ERS_RESULT_DISCONNECT) { dev_dbg(&dev->dev, "No AER mmio_enabled service or disconnected!\n"); kill_domain_by_device(psdev); } end: if (psdev) pcistub_device_put(psdev); up_write(&pcistub_sem); return result; } /*xen_pcibk_error_detected: it will send the error_detected request to pcifront * in case of the device driver could provide this service, and then wait * for pcifront ack. * @dev: pointer to PCI devices * @error: the current PCI connection state * return value is used by aer_core do_recovery policy */ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev, pci_channel_state_t error) { struct pcistub_device *psdev; pci_ers_result_t result; result = PCI_ERS_RESULT_CAN_RECOVER; dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n", dev->bus->number, dev->devfn); down_write(&pcistub_sem); psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { dev_err(&dev->dev, DRV_NAME " device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { dev_err(&dev->dev, DRV_NAME " device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; } /*Guest owns the device yet no aer handler regiested, kill guest*/ if (!test_bit(_XEN_PCIB_AERHANDLER, (unsigned long *)&psdev->pdev->sh_info->flags)) { dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n"); kill_domain_by_device(psdev); goto end; } result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result); if (result == PCI_ERS_RESULT_NONE || result == PCI_ERS_RESULT_DISCONNECT) { dev_dbg(&dev->dev, "No AER error_detected service or disconnected!\n"); kill_domain_by_device(psdev); } end: if (psdev) pcistub_device_put(psdev); up_write(&pcistub_sem); return result; } /*xen_pcibk_error_resume: it will send the error_resume request to pcifront * in case of the device driver could provide this service, and then wait * for pcifront ack. * @dev: pointer to PCI devices */ static void xen_pcibk_error_resume(struct pci_dev *dev) { struct pcistub_device *psdev; dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n", dev->bus->number, dev->devfn); down_write(&pcistub_sem); psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { dev_err(&dev->dev, DRV_NAME " device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { dev_err(&dev->dev, DRV_NAME " device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; } if (!test_bit(_XEN_PCIB_AERHANDLER, (unsigned long *)&psdev->pdev->sh_info->flags)) { dev_err(&dev->dev, "guest with no AER driver should have been killed\n"); kill_domain_by_device(psdev); goto end; } common_process(psdev, 1, XEN_PCI_OP_aer_resume, PCI_ERS_RESULT_RECOVERED); end: if (psdev) pcistub_device_put(psdev); up_write(&pcistub_sem); return; } /*add xen_pcibk AER handling*/ static const struct pci_error_handlers xen_pcibk_error_handler = { .error_detected = xen_pcibk_error_detected, .mmio_enabled = xen_pcibk_mmio_enabled, .slot_reset = xen_pcibk_slot_reset, .resume = xen_pcibk_error_resume, }; /* * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't * for a normal device. I don't want it to be loaded automatically. */ static struct pci_driver xen_pcibk_pci_driver = { /* The name should be xen_pciback, but until the tools are updated * we will keep it as pciback. */ .name = "pciback", .id_table = pcistub_ids, .probe = pcistub_probe, .remove = pcistub_remove, .err_handler = &xen_pcibk_error_handler, }; static inline int str_to_slot(const char *buf, int *domain, int *bus, int *slot, int *func) { int parsed = 0; switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func, &parsed)) { case 3: *func = -1; sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed); break; case 2: *slot = *func = -1; sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed); break; } if (parsed && !buf[parsed]) return 0; /* try again without domain */ *domain = 0; switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) { case 2: *func = -1; sscanf(buf, " %x:%x.* %n", bus, slot, &parsed); break; case 1: *slot = *func = -1; sscanf(buf, " %x:*.* %n", bus, &parsed); break; } if (parsed && !buf[parsed]) return 0; return -EINVAL; } static inline int str_to_quirk(const char *buf, int *domain, int *bus, int *slot, int *func, int *reg, int *size, int *mask) { int parsed = 0; sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func, reg, size, mask, &parsed); if (parsed && !buf[parsed]) return 0; /* try again without domain */ *domain = 0; sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size, mask, &parsed); if (parsed && !buf[parsed]) return 0; return -EINVAL; } static int pcistub_device_id_add(int domain, int bus, int slot, int func) { struct pcistub_device_id *pci_dev_id; unsigned long flags; int rc = 0, devfn = PCI_DEVFN(slot, func); if (slot < 0) { for (slot = 0; !rc && slot < 32; ++slot) rc = pcistub_device_id_add(domain, bus, slot, func); return rc; } if (func < 0) { for (func = 0; !rc && func < 8; ++func) rc = pcistub_device_id_add(domain, bus, slot, func); return rc; } if (( #if !defined(MODULE) /* pci_domains_supported is not being exported */ \ || !defined(CONFIG_PCI_DOMAINS) !pci_domains_supported ? domain : #endif domain < 0 || domain > 0xffff) || bus < 0 || bus > 0xff || PCI_SLOT(devfn) != slot || PCI_FUNC(devfn) != func) return -EINVAL; pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); if (!pci_dev_id) return -ENOMEM; pci_dev_id->domain = domain; pci_dev_id->bus = bus; pci_dev_id->devfn = devfn; pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n", domain, bus, slot, func); spin_lock_irqsave(&device_ids_lock, flags); list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids); spin_unlock_irqrestore(&device_ids_lock, flags); return 0; } static int pcistub_device_id_remove(int domain, int bus, int slot, int func) { struct pcistub_device_id *pci_dev_id, *t; int err = -ENOENT; unsigned long flags; spin_lock_irqsave(&device_ids_lock, flags); list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) { if (pci_dev_id->domain == domain && pci_dev_id->bus == bus && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot) && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) { /* Don't break; here because it's possible the same * slot could be in the list more than once */ list_del(&pci_dev_id->slot_list); kfree(pci_dev_id); err = 0; pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%d from " "seize list\n", domain, bus, slot, func); } } spin_unlock_irqrestore(&device_ids_lock, flags); return err; } static int pcistub_reg_add(int domain, int bus, int slot, int func, unsigned int reg, unsigned int size, unsigned int mask) { int err = 0; struct pcistub_device *psdev; struct pci_dev *dev; struct config_field *field; if (reg > 0xfff || (size < 4 && (mask >> (size * 8)))) return -EINVAL; psdev = pcistub_device_find(domain, bus, slot, func); if (!psdev) { err = -ENODEV; goto out; } dev = psdev->dev; field = kzalloc(sizeof(*field), GFP_ATOMIC); if (!field) { err = -ENOMEM; goto out; } field->offset = reg; field->size = size; field->mask = mask; field->init = NULL; field->reset = NULL; field->release = NULL; field->clean = xen_pcibk_config_field_free; err = xen_pcibk_config_quirks_add_field(dev, field); if (err) kfree(field); out: if (psdev) pcistub_device_put(psdev); return err; } static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf, size_t count) { int domain, bus, slot, func; int err; err = str_to_slot(buf, &domain, &bus, &slot, &func); if (err) goto out; err = pcistub_device_id_add(domain, bus, slot, func); out: if (!err) err = count; return err; } static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add); static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf, size_t count) { int domain, bus, slot, func; int err; err = str_to_slot(buf, &domain, &bus, &slot, &func); if (err) goto out; err = pcistub_device_id_remove(domain, bus, slot, func); out: if (!err) err = count; return err; } static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove); static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf) { struct pcistub_device_id *pci_dev_id; size_t count = 0; unsigned long flags; spin_lock_irqsave(&device_ids_lock, flags); list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) { if (count >= PAGE_SIZE) break; count += scnprintf(buf + count, PAGE_SIZE - count, "%04x:%02x:%02x.%d\n", pci_dev_id->domain, pci_dev_id->bus, PCI_SLOT(pci_dev_id->devfn), PCI_FUNC(pci_dev_id->devfn)); } spin_unlock_irqrestore(&device_ids_lock, flags); return count; } static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL); static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf) { struct pcistub_device *psdev; struct xen_pcibk_dev_data *dev_data; size_t count = 0; unsigned long flags; spin_lock_irqsave(&pcistub_devices_lock, flags); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (count >= PAGE_SIZE) break; if (!psdev->dev) continue; dev_data = pci_get_drvdata(psdev->dev); if (!dev_data) continue; count += scnprintf(buf + count, PAGE_SIZE - count, "%s:%s:%sing:%ld\n", pci_name(psdev->dev), dev_data->isr_on ? "on" : "off", dev_data->ack_intr ? "ack" : "not ack", dev_data->handled); } spin_unlock_irqrestore(&pcistub_devices_lock, flags); return count; } static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL); static ssize_t pcistub_irq_handler_switch(struct device_driver *drv, const char *buf, size_t count) { struct pcistub_device *psdev; struct xen_pcibk_dev_data *dev_data; int domain, bus, slot, func; int err = -ENOENT; err = str_to_slot(buf, &domain, &bus, &slot, &func); if (err) return err; psdev = pcistub_device_find(domain, bus, slot, func); if (!psdev) goto out; dev_data = pci_get_drvdata(psdev->dev); if (!dev_data) goto out; dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n", dev_data->irq_name, dev_data->isr_on, !dev_data->isr_on); dev_data->isr_on = !(dev_data->isr_on); if (dev_data->isr_on) dev_data->ack_intr = 1; out: if (psdev) pcistub_device_put(psdev); if (!err) err = count; return err; } static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch); static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf, size_t count) { int domain, bus, slot, func, reg, size, mask; int err; err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size, &mask); if (err) goto out; err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask); out: if (!err) err = count; return err; } static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf) { int count = 0; unsigned long flags; struct xen_pcibk_config_quirk *quirk; struct xen_pcibk_dev_data *dev_data; const struct config_field *field; const struct config_field_entry *cfg_entry; spin_lock_irqsave(&device_ids_lock, flags); list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) { if (count >= PAGE_SIZE) goto out; count += scnprintf(buf + count, PAGE_SIZE - count, "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n", quirk->pdev->bus->number, PCI_SLOT(quirk->pdev->devfn), PCI_FUNC(quirk->pdev->devfn), quirk->devid.vendor, quirk->devid.device, quirk->devid.subvendor, quirk->devid.subdevice); dev_data = pci_get_drvdata(quirk->pdev); list_for_each_entry(cfg_entry, &dev_data->config_fields, list) { field = cfg_entry->field; if (count >= PAGE_SIZE) goto out; count += scnprintf(buf + count, PAGE_SIZE - count, "\t\t%08x:%01x:%08x\n", cfg_entry->base_offset + field->offset, field->size, field->mask); } } out: spin_unlock_irqrestore(&device_ids_lock, flags); return count; } static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add); static ssize_t permissive_add(struct device_driver *drv, const char *buf, size_t count) { int domain, bus, slot, func; int err; struct pcistub_device *psdev; struct xen_pcibk_dev_data *dev_data; err = str_to_slot(buf, &domain, &bus, &slot, &func); if (err) goto out; psdev = pcistub_device_find(domain, bus, slot, func); if (!psdev) { err = -ENODEV; goto out; } dev_data = pci_get_drvdata(psdev->dev); /* the driver data for a device should never be null at this point */ if (!dev_data) { err = -ENXIO; goto release; } if (!dev_data->permissive) { dev_data->permissive = 1; /* Let user know that what they're doing could be unsafe */ dev_warn(&psdev->dev->dev, "enabling permissive mode " "configuration space accesses!\n"); dev_warn(&psdev->dev->dev, "permissive mode is potentially unsafe!\n"); } release: pcistub_device_put(psdev); out: if (!err) err = count; return err; } static ssize_t permissive_show(struct device_driver *drv, char *buf) { struct pcistub_device *psdev; struct xen_pcibk_dev_data *dev_data; size_t count = 0; unsigned long flags; spin_lock_irqsave(&pcistub_devices_lock, flags); list_for_each_entry(psdev, &pcistub_devices, dev_list) { if (count >= PAGE_SIZE) break; if (!psdev->dev) continue; dev_data = pci_get_drvdata(psdev->dev); if (!dev_data || !dev_data->permissive) continue; count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n", pci_name(psdev->dev)); } spin_unlock_irqrestore(&pcistub_devices_lock, flags); return count; } static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add); static void pcistub_exit(void) { driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot); driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_remove_slot); driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots); driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks); driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_permissive); driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_irq_handlers); driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_irq_handler_state); pci_unregister_driver(&xen_pcibk_pci_driver); } static int __init pcistub_init(void) { int pos = 0; int err = 0; int domain, bus, slot, func; int parsed; if (pci_devs_to_hide && *pci_devs_to_hide) { do { parsed = 0; err = sscanf(pci_devs_to_hide + pos, " (%x:%x:%x.%x) %n", &domain, &bus, &slot, &func, &parsed); switch (err) { case 3: func = -1; sscanf(pci_devs_to_hide + pos, " (%x:%x:%x.*) %n", &domain, &bus, &slot, &parsed); break; case 2: slot = func = -1; sscanf(pci_devs_to_hide + pos, " (%x:%x:*.*) %n", &domain, &bus, &parsed); break; } if (!parsed) { domain = 0; err = sscanf(pci_devs_to_hide + pos, " (%x:%x.%x) %n", &bus, &slot, &func, &parsed); switch (err) { case 2: func = -1; sscanf(pci_devs_to_hide + pos, " (%x:%x.*) %n", &bus, &slot, &parsed); break; case 1: slot = func = -1; sscanf(pci_devs_to_hide + pos, " (%x:*.*) %n", &bus, &parsed); break; } } if (parsed <= 0) goto parse_error; err = pcistub_device_id_add(domain, bus, slot, func); if (err) goto out; pos += parsed; } while (pci_devs_to_hide[pos]); } /* If we're the first PCI Device Driver to register, we're the * first one to get offered PCI devices as they become * available (and thus we can be the first to grab them) */ err = pci_register_driver(&xen_pcibk_pci_driver); if (err < 0) goto out; err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot); if (!err) err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_remove_slot); if (!err) err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots); if (!err) err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks); if (!err) err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_permissive); if (!err) err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_irq_handlers); if (!err) err = driver_create_file(&xen_pcibk_pci_driver.driver, &driver_attr_irq_handler_state); if (err) pcistub_exit(); out: return err; parse_error: printk(KERN_ERR DRV_NAME ": Error parsing pci_devs_to_hide at \"%s\"\n", pci_devs_to_hide + pos); return -EINVAL; } #ifndef MODULE /* * fs_initcall happens before device_initcall * so xen_pcibk *should* get called first (b/c we * want to suck up any device before other drivers * get a chance by being the first pci device * driver to register) */ fs_initcall(pcistub_init); #endif static int __init xen_pcibk_init(void) { int err; if (!xen_initial_domain()) return -ENODEV; err = xen_pcibk_config_init(); if (err) return err; #ifdef MODULE err = pcistub_init(); if (err < 0) return err; #endif pcistub_init_devices_late(); err = xen_pcibk_xenbus_register(); if (err) pcistub_exit(); return err; } static void __exit xen_pcibk_cleanup(void) { xen_pcibk_xenbus_unregister(); pcistub_exit(); } module_init(xen_pcibk_init); module_exit(xen_pcibk_cleanup); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:pci");
gpl-2.0
Nihhaar/android_kernel_xiaomi_mocha
net/irda/ircomm/ircomm_tty_attach.c
2320
28140
/********************************************************************* * * Filename: ircomm_tty_attach.c * Version: * Description: Code for attaching the serial driver to IrCOMM * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Jun 5 17:42:00 1999 * Modified at: Tue Jan 4 14:20:49 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/init.h> #include <linux/sched.h> #include <net/irda/irda.h> #include <net/irda/irlmp.h> #include <net/irda/iriap.h> #include <net/irda/irttp.h> #include <net/irda/irias_object.h> #include <net/irda/parameters.h> #include <net/irda/ircomm_core.h> #include <net/irda/ircomm_param.h> #include <net/irda/ircomm_event.h> #include <net/irda/ircomm_tty.h> #include <net/irda/ircomm_tty_attach.h> static void ircomm_tty_ias_register(struct ircomm_tty_cb *self); static void ircomm_tty_discovery_indication(discinfo_t *discovery, DISCOVERY_MODE mode, void *priv); static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, struct ias_value *value, void *priv); static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self, int timeout); static void ircomm_tty_watchdog_timer_expired(void *data); static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); static int ircomm_tty_state_search(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info); const char *const ircomm_tty_state[] = { "IRCOMM_TTY_IDLE", "IRCOMM_TTY_SEARCH", "IRCOMM_TTY_QUERY_PARAMETERS", "IRCOMM_TTY_QUERY_LSAP_SEL", "IRCOMM_TTY_SETUP", "IRCOMM_TTY_READY", "*** ERROR *** ", }; #ifdef CONFIG_IRDA_DEBUG static const char *const ircomm_tty_event[] = { "IRCOMM_TTY_ATTACH_CABLE", "IRCOMM_TTY_DETACH_CABLE", "IRCOMM_TTY_DATA_REQUEST", "IRCOMM_TTY_DATA_INDICATION", "IRCOMM_TTY_DISCOVERY_REQUEST", "IRCOMM_TTY_DISCOVERY_INDICATION", "IRCOMM_TTY_CONNECT_CONFIRM", "IRCOMM_TTY_CONNECT_INDICATION", "IRCOMM_TTY_DISCONNECT_REQUEST", "IRCOMM_TTY_DISCONNECT_INDICATION", "IRCOMM_TTY_WD_TIMER_EXPIRED", "IRCOMM_TTY_GOT_PARAMETERS", "IRCOMM_TTY_GOT_LSAPSEL", "*** ERROR ****", }; #endif /* CONFIG_IRDA_DEBUG */ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) = { ircomm_tty_state_idle, ircomm_tty_state_search, ircomm_tty_state_query_parameters, ircomm_tty_state_query_lsap_sel, ircomm_tty_state_setup, ircomm_tty_state_ready, }; /* * Function ircomm_tty_attach_cable (driver) * * Try to attach cable (IrCOMM link). This function will only return * when the link has been connected, or if an error condition occurs. * If success, the return value is the resulting service type. */ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) { struct tty_struct *tty; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if somebody has already connected to us */ if (ircomm_is_connected(self->ircomm)) { IRDA_DEBUG(0, "%s(), already connected!\n", __func__ ); return 0; } /* Make sure nobody tries to write before the link is up */ tty = tty_port_tty_get(&self->port); if (tty) { tty->hw_stopped = 1; tty_kref_put(tty); } ircomm_tty_ias_register(self); ircomm_tty_do_event(self, IRCOMM_TTY_ATTACH_CABLE, NULL, NULL); return 0; } /* * Function ircomm_detach_cable (driver) * * Detach cable, or cable has been detached by peer * */ void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) { IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); del_timer(&self->watchdog_timer); /* Remove discovery handler */ if (self->ckey) { irlmp_unregister_client(self->ckey); self->ckey = NULL; } /* Remove IrCOMM hint bits */ if (self->skey) { irlmp_unregister_service(self->skey); self->skey = NULL; } if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } /* Remove LM-IAS object */ if (self->obj) { irias_delete_object(self->obj); self->obj = NULL; } ircomm_tty_do_event(self, IRCOMM_TTY_DETACH_CABLE, NULL, NULL); /* Reset some values */ self->daddr = self->saddr = 0; self->dlsap_sel = self->slsap_sel = 0; memset(&self->settings, 0, sizeof(struct ircomm_params)); } /* * Function ircomm_tty_ias_register (self) * * Register with LM-IAS depending on which service type we are * */ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self) { __u8 oct_seq[6]; __u16 hints; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* Compute hint bits based on service */ hints = irlmp_service_to_hint(S_COMM); if (self->service_type & IRCOMM_3_WIRE_RAW) hints |= irlmp_service_to_hint(S_PRINTER); /* Advertise IrCOMM hint bit in discovery */ if (!self->skey) self->skey = irlmp_register_service(hints); /* Set up a discovery handler */ if (!self->ckey) self->ckey = irlmp_register_client(hints, ircomm_tty_discovery_indication, NULL, (void *) self); /* If already done, no need to do it again */ if (self->obj) return; if (self->service_type & IRCOMM_3_WIRE_RAW) { /* Register IrLPT with LM-IAS */ self->obj = irias_new_object("IrLPT", IAS_IRLPT_ID); irias_add_integer_attrib(self->obj, "IrDA:IrLMP:LsapSel", self->slsap_sel, IAS_KERNEL_ATTR); } else { /* Register IrCOMM with LM-IAS */ self->obj = irias_new_object("IrDA:IrCOMM", IAS_IRCOMM_ID); irias_add_integer_attrib(self->obj, "IrDA:TinyTP:LsapSel", self->slsap_sel, IAS_KERNEL_ATTR); /* Code the parameters into the buffer */ irda_param_pack(oct_seq, "bbbbbb", IRCOMM_SERVICE_TYPE, 1, self->service_type, IRCOMM_PORT_TYPE, 1, IRCOMM_SERIAL); /* Register parameters with LM-IAS */ irias_add_octseq_attrib(self->obj, "Parameters", oct_seq, 6, IAS_KERNEL_ATTR); } irias_insert_object(self->obj); } /* * Function ircomm_tty_ias_unregister (self) * * Remove our IAS object and client hook while connected. * */ static void ircomm_tty_ias_unregister(struct ircomm_tty_cb *self) { /* Remove LM-IAS object now so it is not reused. * IrCOMM deals very poorly with multiple incoming connections. * It should looks a lot more like IrNET, and "dup" a server TSAP * to the application TSAP (based on various rules). * This is a cheap workaround allowing multiple clients to * connect to us. It will not always work. * Each IrCOMM socket has an IAS entry. Incoming connection will * pick the first one found. So, when we are fully connected, * we remove our IAS entries so that the next IAS entry is used. * We do that for *both* client and server, because a server * can also create client instances. * Jean II */ if (self->obj) { irias_delete_object(self->obj); self->obj = NULL; } #if 0 /* Remove discovery handler. * While we are connected, we no longer need to receive * discovery events. This would be the case if there is * multiple IrLAP interfaces. Jean II */ if (self->ckey) { irlmp_unregister_client(self->ckey); self->ckey = NULL; } #endif } /* * Function ircomm_send_initial_parameters (self) * * Send initial parameters to the remote IrCOMM device. These parameters * must be sent before any data. */ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self) { IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); if (self->service_type & IRCOMM_3_WIRE_RAW) return 0; /* * Set default values, but only if the application for some reason * haven't set them already */ IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ , self->settings.data_rate); if (!self->settings.data_rate) self->settings.data_rate = 9600; IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ , self->settings.data_format); if (!self->settings.data_format) self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ , self->settings.flow_control); /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ /* Do not set delta values for the initial parameters */ self->settings.dte = IRCOMM_DTR | IRCOMM_RTS; /* Only send service type parameter when we are the client */ if (self->client) ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE); ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE); ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE); /* For a 3 wire service, we just flush the last parameter and return */ if (self->settings.service_type == IRCOMM_3_WIRE) { ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE); return 0; } /* Only 9-wire service types continue here */ ircomm_param_request(self, IRCOMM_FLOW_CONTROL, FALSE); #if 0 ircomm_param_request(self, IRCOMM_XON_XOFF, FALSE); ircomm_param_request(self, IRCOMM_ENQ_ACK, FALSE); #endif /* Notify peer that we are ready to receive data */ ircomm_param_request(self, IRCOMM_DTE, TRUE); return 0; } /* * Function ircomm_tty_discovery_indication (discovery) * * Remote device is discovered, try query the remote IAS to see which * device it is, and which services it has. * */ static void ircomm_tty_discovery_indication(discinfo_t *discovery, DISCOVERY_MODE mode, void *priv) { struct ircomm_tty_cb *self; struct ircomm_tty_info info; IRDA_DEBUG(2, "%s()\n", __func__ ); /* Important note : * We need to drop all passive discoveries. * The LSAP management of IrComm is deficient and doesn't deal * with the case of two instance connecting to each other * simultaneously (it will deadlock in LMP). * The proper fix would be to use the same technique as in IrNET, * to have one server socket and separate instances for the * connecting/connected socket. * The workaround is to drop passive discovery, which drastically * reduce the probability of this happening. * Jean II */ if(mode == DISCOVERY_PASSIVE) return; info.daddr = discovery->daddr; info.saddr = discovery->saddr; self = priv; ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION, NULL, &info); } /* * Function ircomm_tty_disconnect_indication (instance, sap, reason, skb) * * Link disconnected * */ void ircomm_tty_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); tty = tty_port_tty_get(&self->port); if (!tty) return; /* This will stop control data transfers */ self->flow = FLOW_STOP; /* Stop data transfers */ tty->hw_stopped = 1; ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL, NULL); tty_kref_put(tty); } /* * Function ircomm_tty_getvalue_confirm (result, obj_id, value, priv) * * Got result from the IAS query we make * */ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, struct ias_value *value, void *priv) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* We probably don't need to make any more queries */ iriap_close(self->iriap); self->iriap = NULL; /* Check if request succeeded */ if (result != IAS_SUCCESS) { IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ ); return; } switch (value->type) { case IAS_OCT_SEQ: IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ ); irda_param_extract_all(self, value->t.oct_seq, value->len, &ircomm_param_info); ircomm_tty_do_event(self, IRCOMM_TTY_GOT_PARAMETERS, NULL, NULL); break; case IAS_INTEGER: /* Got LSAP selector */ IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ , value->t.integer); if (value->t.integer == -1) { IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ ); } else self->dlsap_sel = value->t.integer; ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); break; case IAS_MISSING: IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ ); break; default: IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ ); break; } irias_delete_value(value); } /* * Function ircomm_tty_connect_confirm (instance, sap, qos, max_sdu_size, skb) * * Connection confirmed * */ void ircomm_tty_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_data_size, __u8 max_header_size, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); self->client = TRUE; self->max_data_size = max_data_size; self->max_header_size = max_header_size; self->flow = FLOW_START; ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_CONFIRM, NULL, NULL); /* No need to kfree_skb - see ircomm_ttp_connect_confirm() */ } /* * Function ircomm_tty_connect_indication (instance, sap, qos, max_sdu_size, * skb) * * we are discovered and being requested to connect by remote device ! * */ void ircomm_tty_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_data_size, __u8 max_header_size, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); self->client = FALSE; self->max_data_size = max_data_size; self->max_header_size = max_header_size; self->flow = FLOW_START; clen = skb->data[0]; if (clen) irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len, clen), &ircomm_param_info); ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL); /* No need to kfree_skb - see ircomm_ttp_connect_indication() */ } /* * Function ircomm_tty_link_established (self) * * Called when the IrCOMM link is established * */ void ircomm_tty_link_established(struct ircomm_tty_cb *self) { struct tty_struct *tty; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); tty = tty_port_tty_get(&self->port); if (!tty) return; del_timer(&self->watchdog_timer); /* * IrCOMM link is now up, and if we are not using hardware * flow-control, then declare the hardware as running. Otherwise we * will have to wait for the peer device (DCE) to raise the CTS * line. */ if (tty_port_cts_enabled(&self->port) && ((self->settings.dce & IRCOMM_CTS) == 0)) { IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ ); goto put; } else { IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ ); tty->hw_stopped = 0; /* Wake up processes blocked on open */ wake_up_interruptible(&self->port.open_wait); } schedule_work(&self->tqueue); put: tty_kref_put(tty); } /* * Function ircomm_tty_start_watchdog_timer (self, timeout) * * Start the watchdog timer. This timer is used to make sure that any * connection attempt is successful, and if not, we will retry after * the timeout */ static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self, int timeout) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); irda_start_timer(&self->watchdog_timer, timeout, (void *) self, ircomm_tty_watchdog_timer_expired); } /* * Function ircomm_tty_watchdog_timer_expired (data) * * Called when the connect procedure have taken to much time. * */ static void ircomm_tty_watchdog_timer_expired(void *data) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); ircomm_tty_do_event(self, IRCOMM_TTY_WD_TIMER_EXPIRED, NULL, NULL); } /* * Function ircomm_tty_do_event (self, event, skb) * * Process event * */ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , ircomm_tty_state[self->state], ircomm_tty_event[event]); return (*state[self->state])(self, event, skb, info); } /* * Function ircomm_tty_next_state (self, state) * * Switch state * */ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_STATE state) { /* IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ , ircomm_tty_state[self->state], self->service_type); */ self->state = state; } /* * Function ircomm_tty_state_idle (self, event, skb, info) * * Just hanging around * */ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { int ret = 0; IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_ATTACH_CABLE: /* Try to discover any remote devices */ ircomm_tty_start_watchdog_timer(self, 3*HZ); ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS); break; case IRCOMM_TTY_DISCOVERY_INDICATION: self->daddr = info->daddr; self->saddr = info->saddr; if (self->iriap) { IRDA_WARNING("%s(), busy with a previous query\n", __func__); return -EBUSY; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, ircomm_tty_getvalue_confirm); iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, "IrDA:IrCOMM", "Parameters"); ircomm_tty_start_watchdog_timer(self, 3*HZ); ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS); break; case IRCOMM_TTY_CONNECT_INDICATION: del_timer(&self->watchdog_timer); /* Accept connection */ ircomm_connect_response(self->ircomm, NULL); ircomm_tty_next_state(self, IRCOMM_TTY_READY); break; case IRCOMM_TTY_WD_TIMER_EXPIRED: /* Just stay idle */ break; case IRCOMM_TTY_DETACH_CABLE: ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , ircomm_tty_event[event]); ret = -EINVAL; } return ret; } /* * Function ircomm_tty_state_search (self, event, skb, info) * * Trying to discover an IrCOMM device * */ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { int ret = 0; IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_DISCOVERY_INDICATION: self->daddr = info->daddr; self->saddr = info->saddr; if (self->iriap) { IRDA_WARNING("%s(), busy with a previous query\n", __func__); return -EBUSY; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, ircomm_tty_getvalue_confirm); if (self->service_type == IRCOMM_3_WIRE_RAW) { iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, "IrLPT", "IrDA:IrLMP:LsapSel"); ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL); } else { iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, "IrDA:IrCOMM", "Parameters"); ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS); } ircomm_tty_start_watchdog_timer(self, 3*HZ); break; case IRCOMM_TTY_CONNECT_INDICATION: del_timer(&self->watchdog_timer); ircomm_tty_ias_unregister(self); /* Accept connection */ ircomm_connect_response(self->ircomm, NULL); ircomm_tty_next_state(self, IRCOMM_TTY_READY); break; case IRCOMM_TTY_WD_TIMER_EXPIRED: #if 1 /* Give up */ #else /* Try to discover any remote devices */ ircomm_tty_start_watchdog_timer(self, 3*HZ); irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS); #endif break; case IRCOMM_TTY_DETACH_CABLE: ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , ircomm_tty_event[event]); ret = -EINVAL; } return ret; } /* * Function ircomm_tty_state_query (self, event, skb, info) * * Querying the remote LM-IAS for IrCOMM parameters * */ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { int ret = 0; IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_PARAMETERS: if (self->iriap) { IRDA_WARNING("%s(), busy with a previous query\n", __func__); return -EBUSY; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, ircomm_tty_getvalue_confirm); iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, "IrDA:IrCOMM", "IrDA:TinyTP:LsapSel"); ircomm_tty_start_watchdog_timer(self, 3*HZ); ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL); break; case IRCOMM_TTY_WD_TIMER_EXPIRED: /* Go back to search mode */ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); ircomm_tty_start_watchdog_timer(self, 3*HZ); break; case IRCOMM_TTY_CONNECT_INDICATION: del_timer(&self->watchdog_timer); ircomm_tty_ias_unregister(self); /* Accept connection */ ircomm_connect_response(self->ircomm, NULL); ircomm_tty_next_state(self, IRCOMM_TTY_READY); break; case IRCOMM_TTY_DETACH_CABLE: ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , ircomm_tty_event[event]); ret = -EINVAL; } return ret; } /* * Function ircomm_tty_state_query_lsap_sel (self, event, skb, info) * * Query remote LM-IAS for the LSAP selector which we can connect to * */ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { int ret = 0; IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_LSAPSEL: /* Connect to remote device */ ret = ircomm_connect_request(self->ircomm, self->dlsap_sel, self->saddr, self->daddr, NULL, self->service_type); ircomm_tty_start_watchdog_timer(self, 3*HZ); ircomm_tty_next_state(self, IRCOMM_TTY_SETUP); break; case IRCOMM_TTY_WD_TIMER_EXPIRED: /* Go back to search mode */ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); ircomm_tty_start_watchdog_timer(self, 3*HZ); break; case IRCOMM_TTY_CONNECT_INDICATION: del_timer(&self->watchdog_timer); ircomm_tty_ias_unregister(self); /* Accept connection */ ircomm_connect_response(self->ircomm, NULL); ircomm_tty_next_state(self, IRCOMM_TTY_READY); break; case IRCOMM_TTY_DETACH_CABLE: ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , ircomm_tty_event[event]); ret = -EINVAL; } return ret; } /* * Function ircomm_tty_state_setup (self, event, skb, info) * * Trying to connect * */ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { int ret = 0; IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_CONNECT_CONFIRM: del_timer(&self->watchdog_timer); ircomm_tty_ias_unregister(self); /* * Send initial parameters. This will also send out queued * parameters waiting for the connection to come up */ ircomm_tty_send_initial_parameters(self); ircomm_tty_link_established(self); ircomm_tty_next_state(self, IRCOMM_TTY_READY); break; case IRCOMM_TTY_CONNECT_INDICATION: del_timer(&self->watchdog_timer); ircomm_tty_ias_unregister(self); /* Accept connection */ ircomm_connect_response(self->ircomm, NULL); ircomm_tty_next_state(self, IRCOMM_TTY_READY); break; case IRCOMM_TTY_WD_TIMER_EXPIRED: /* Go back to search mode */ ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); ircomm_tty_start_watchdog_timer(self, 3*HZ); break; case IRCOMM_TTY_DETACH_CABLE: /* ircomm_disconnect_request(self->ircomm, NULL); */ ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , ircomm_tty_event[event]); ret = -EINVAL; } return ret; } /* * Function ircomm_tty_state_ready (self, event, skb, info) * * IrCOMM is now connected * */ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) { int ret = 0; switch (event) { case IRCOMM_TTY_DATA_REQUEST: ret = ircomm_data_request(self->ircomm, skb); break; case IRCOMM_TTY_DETACH_CABLE: ircomm_disconnect_request(self->ircomm, NULL); ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; case IRCOMM_TTY_DISCONNECT_INDICATION: ircomm_tty_ias_register(self); ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); ircomm_tty_start_watchdog_timer(self, 3*HZ); if (self->port.flags & ASYNC_CHECK_CD) { /* Drop carrier */ self->settings.dce = IRCOMM_DELTA_CD; ircomm_tty_check_modem_status(self); } else { IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ ); tty_port_tty_hangup(&self->port, false); } break; default: IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , ircomm_tty_event[event]); ret = -EINVAL; } return ret; }
gpl-2.0
faux123/private_msm8660_ics
arch/x86/kernel/signal.c
2320
22291
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/uaccess.h> #include <linux/user-return-notifier.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/i387.h> #include <asm/vdso.h> #include <asm/mce.h> #ifdef CONFIG_X86_64 #include <asm/proto.h> #include <asm/ia32_unistd.h> #endif /* CONFIG_X86_64 */ #include <asm/syscall.h> #include <asm/syscalls.h> #include <asm/sigframe.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ X86_EFLAGS_CF) #ifdef CONFIG_X86_32 # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) #else # define FIX_EFLAGS __FIX_EFLAGS #endif #define COPY(x) do { \ get_user_ex(regs->x, &sc->x); \ } while (0) #define GET_SEG(seg) ({ \ unsigned short tmp; \ get_user_ex(tmp, &sc->seg); \ tmp; \ }) #define COPY_SEG(seg) do { \ regs->seg = GET_SEG(seg); \ } while (0) #define COPY_SEG_CPL3(seg) do { \ regs->seg = GET_SEG(seg) | 3; \ } while (0) static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *pax) { void __user *buf; unsigned int tmpflags; unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; get_user_try { #ifdef CONFIG_X86_32 set_user_gs(regs, GET_SEG(gs)); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); #endif /* CONFIG_X86_32 */ COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); #ifdef CONFIG_X86_64 COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(r13); COPY(r14); COPY(r15); #endif /* CONFIG_X86_64 */ #ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); #else /* !CONFIG_X86_32 */ /* Kernel saves and restores only the CS segment register on signals, * which is the bare minimum needed to allow mixed 32/64-bit code. * App's signal handler can save/restore other segments if needed. */ COPY_SEG_CPL3(cs); #endif /* CONFIG_X86_32 */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ get_user_ex(buf, &sc->fpstate); err |= restore_i387_xstate(buf); get_user_ex(*pax, &sc->ax); } get_user_catch(err); return err; } static int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask) { int err = 0; put_user_try { #ifdef CONFIG_X86_32 put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs); put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); put_user_ex(regs->es, (unsigned int __user *)&sc->es); put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); #endif /* CONFIG_X86_32 */ put_user_ex(regs->di, &sc->di); put_user_ex(regs->si, &sc->si); put_user_ex(regs->bp, &sc->bp); put_user_ex(regs->sp, &sc->sp); put_user_ex(regs->bx, &sc->bx); put_user_ex(regs->dx, &sc->dx); put_user_ex(regs->cx, &sc->cx); put_user_ex(regs->ax, &sc->ax); #ifdef CONFIG_X86_64 put_user_ex(regs->r8, &sc->r8); put_user_ex(regs->r9, &sc->r9); put_user_ex(regs->r10, &sc->r10); put_user_ex(regs->r11, &sc->r11); put_user_ex(regs->r12, &sc->r12); put_user_ex(regs->r13, &sc->r13); put_user_ex(regs->r14, &sc->r14); put_user_ex(regs->r15, &sc->r15); #endif /* CONFIG_X86_64 */ put_user_ex(current->thread.trap_no, &sc->trapno); put_user_ex(current->thread.error_code, &sc->err); put_user_ex(regs->ip, &sc->ip); #ifdef CONFIG_X86_32 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->sp, &sc->sp_at_signal); put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); #else /* !CONFIG_X86_32 */ put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->cs, &sc->cs); put_user_ex(0, &sc->gs); put_user_ex(0, &sc->fs); #endif /* CONFIG_X86_32 */ put_user_ex(fpstate, &sc->fpstate); /* non-iBCS2 extensions.. */ put_user_ex(mask, &sc->oldmask); put_user_ex(current->thread.cr2, &sc->cr2); } put_user_catch(err); return err; } /* * Set up a signal frame. */ /* * Determine which stack to use.. */ static unsigned long align_sigframe(unsigned long sp) { #ifdef CONFIG_X86_32 /* * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ sp = ((sp + 4) & -16ul) - 4; #else /* !CONFIG_X86_32 */ sp = round_down(sp, 16) - 8; #endif return sp; } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { /* Default to using normal stack */ unsigned long sp = regs->sp; int onsigstack = on_sig_stack(sp); #ifdef CONFIG_X86_64 /* redzone */ sp -= 128; #endif /* CONFIG_X86_64 */ if (!onsigstack) { /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (current->sas_ss_size) sp = current->sas_ss_sp + current->sas_ss_size; } else { #ifdef CONFIG_X86_32 /* This is the legacy signal stack switching. */ if ((regs->ss & 0xffff) != __USER_DS && !(ka->sa.sa_flags & SA_RESTORER) && ka->sa.sa_restorer) sp = (unsigned long) ka->sa.sa_restorer; #endif /* CONFIG_X86_32 */ } } if (used_math()) { sp -= sig_xstate_size; #ifdef CONFIG_X86_64 sp = round_down(sp, 64); #endif /* CONFIG_X86_64 */ *fpstate = (void __user *)sp; } sp = align_sigframe(sp - frame_size); /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (onsigstack && !likely(on_sig_stack(sp))) return (void __user *)-1L; /* save i387 state */ if (used_math() && save_i387_xstate(*fpstate) < 0) return (void __user *)-1L; return (void __user *)sp; } #ifdef CONFIG_X86_32 static const struct { u16 poplmovl; u32 val; u16 int80; } __attribute__((packed)) retcode = { 0xb858, /* popl %eax; movl $..., %eax */ __NR_sigreturn, 0x80cd, /* int $0x80 */ }; static const struct { u8 movl; u32 val; u16 int80; u8 pad; } __attribute__((packed)) rt_retcode = { 0xb8, /* movl $..., %eax */ __NR_rt_sigreturn, 0x80cd, /* int $0x80 */ 0 }; static int __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; void __user *restorer; int err = 0; void __user *fpstate = NULL; frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; if (__put_user(sig, &frame->sig)) return -EFAULT; if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) return -EFAULT; if (_NSIG_WORDS > 1) { if (__copy_to_user(&frame->extramask, &set->sig[1], sizeof(frame->extramask))) return -EFAULT; } if (current->mm->context.vdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; /* Set up to return from userspace. */ err |= __put_user(restorer, &frame->pretcode); /* * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); if (err) return -EFAULT; /* Set up registers for signal handler */ regs->sp = (unsigned long)frame; regs->ip = (unsigned long)ka->sa.sa_handler; regs->ax = (unsigned long)sig; regs->dx = 0; regs->cx = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; return 0; } static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; void __user *restorer; int err = 0; void __user *fpstate = NULL; frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; put_user_try { put_user_ex(sig, &frame->sig); put_user_ex(&frame->info, &frame->pinfo); put_user_ex(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ if (cpu_has_xsave) put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); put_user_ex(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. */ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; put_user_ex(restorer, &frame->pretcode); /* * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); } put_user_catch(err); if (err) return -EFAULT; /* Set up registers for signal handler */ regs->sp = (unsigned long)frame; regs->ip = (unsigned long)ka->sa.sa_handler; regs->ax = (unsigned long)sig; regs->dx = (unsigned long)&frame->info; regs->cx = (unsigned long)&frame->uc; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; return 0; } #else /* !CONFIG_X86_32 */ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; void __user *fp = NULL; int err = 0; struct task_struct *me = current; frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; if (ka->sa.sa_flags & SA_SIGINFO) { if (copy_siginfo_to_user(&frame->info, info)) return -EFAULT; } put_user_try { /* Create the ucontext. */ if (cpu_has_xsave) put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); put_user_ex(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* x86-64 should always use SA_RESTORER. */ if (ka->sa.sa_flags & SA_RESTORER) { put_user_ex(ka->sa.sa_restorer, &frame->pretcode); } else { /* could use a vstub here */ err |= -EFAULT; } } put_user_catch(err); if (err) return -EFAULT; /* Set up registers for signal handler */ regs->di = sig; /* In case the signal handler was declared without prototypes */ regs->ax = 0; /* This also works for non SA_SIGINFO handlers because they expect the next argument after the signal number on the stack. */ regs->si = (unsigned long)&frame->info; regs->dx = (unsigned long)&frame->uc; regs->ip = (unsigned long) ka->sa.sa_handler; regs->sp = (unsigned long)frame; /* Set up the CS register to run signal handlers in 64-bit mode, even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; return 0; } #endif /* CONFIG_X86_32 */ #ifdef CONFIG_X86_32 /* * Atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret = 0; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act))) return -EFAULT; get_user_try { get_user_ex(new_ka.sa.sa_handler, &act->sa_handler); get_user_ex(new_ka.sa.sa_flags, &act->sa_flags); get_user_ex(mask, &act->sa_mask); get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer); } get_user_catch(ret); if (ret) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) return -EFAULT; put_user_try { put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler); put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags); put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer); } put_user_catch(ret); if (ret) return -EFAULT; } return ret; } #endif /* CONFIG_X86_32 */ long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) { return do_sigaltstack(uss, uoss, regs->sp); } /* * Do a signal return; undo the signal stack. */ #ifdef CONFIG_X86_32 unsigned long sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; unsigned long ax; sigset_t set; frame = (struct sigframe __user *)(regs->sp - 8); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(regs, &frame->sc, &ax)) goto badframe; return ax; badframe: signal_fault(regs, frame, "sigreturn"); return 0; } #endif /* CONFIG_X86_32 */ long sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; unsigned long ax; sigset_t set; frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) goto badframe; return ax; badframe: signal_fault(regs, frame, "rt_sigreturn"); return 0; } /* * OK, we're invoking a handler: */ static int signr_convert(int sig) { #ifdef CONFIG_X86_32 struct thread_info *info = current_thread_info(); if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32) return info->exec_domain->signal_invmap[sig]; #endif /* CONFIG_X86_32 */ return sig; } #ifdef CONFIG_X86_32 #define is_ia32 1 #define ia32_setup_frame __setup_frame #define ia32_setup_rt_frame __setup_rt_frame #else /* !CONFIG_X86_32 */ #ifdef CONFIG_IA32_EMULATION #define is_ia32 test_thread_flag(TIF_IA32) #else /* !CONFIG_IA32_EMULATION */ #define is_ia32 0 #endif /* CONFIG_IA32_EMULATION */ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs); int ia32_setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs); #endif /* CONFIG_X86_32 */ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { int usig = signr_convert(sig); int ret; /* Set up the stack frame */ if (is_ia32) { if (ka->sa.sa_flags & SA_SIGINFO) ret = ia32_setup_rt_frame(usig, ka, info, set, regs); else ret = ia32_setup_frame(usig, ka, set, regs); } else ret = __setup_rt_frame(sig, ka, info, set, regs); if (ret) { force_sigsegv(sig, current); return -EFAULT; } return ret; } static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { sigset_t blocked; int ret; /* Are we from a system call? */ if (syscall_get_nr(current, regs) >= 0) { /* If so, check system call restarting.. */ switch (syscall_get_error(current, regs)) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->ax = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->ax = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; } } /* * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF * flag so that register information in the sigcontext is correct. */ if (unlikely(regs->flags & X86_EFLAGS_TF) && likely(test_and_clear_thread_flag(TIF_FORCED_TF))) regs->flags &= ~X86_EFLAGS_TF; ret = setup_rt_frame(sig, ka, info, oldset, regs); if (ret) return ret; #ifdef CONFIG_X86_64 /* * This has nothing to do with segment registers, * despite the name. This magic affects uaccess.h * macros' behavior. Reset it to the normal setting. */ set_fs(USER_DS); #endif /* * Clear the direction flag as per the ABI for function entry. */ regs->flags &= ~X86_EFLAGS_DF; /* * Clear TF when entering the signal handler, but * notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ regs->flags &= ~X86_EFLAGS_TF; sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&blocked, sig); set_current_blocked(&blocked); tracehook_signal_handler(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); return 0; } #ifdef CONFIG_X86_32 #define NR_restart_syscall __NR_restart_syscall #else /* !CONFIG_X86_32 */ #define NR_restart_syscall \ test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall #endif /* CONFIG_X86_32 */ /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; siginfo_t info; int signr; sigset_t *oldset; /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything * if so. * X86_32: vm86 regs switched out by assembly code before reaching * here, so testing against kernel CS suffices. */ if (!user_mode(regs)) return; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; } return; } /* Did we come from a system call? */ if (syscall_get_nr(current, regs) >= 0) { /* Restart the system call - no handlers present */ switch (syscall_get_error(current, regs)) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; case -ERESTART_RESTARTBLOCK: regs->ax = NR_restart_syscall; regs->ip -= 2; break; } } /* * If there's no signal to deliver, we just put the saved sigmask * back. */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } /* * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { #ifdef CONFIG_X86_MCE /* notify userspace of pending MCEs */ if (thread_info_flags & _TIF_MCE_NOTIFY) mce_notify_process(); #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); #ifdef CONFIG_X86_32 clear_thread_flag(TIF_IRET); #endif /* CONFIG_X86_32 */ } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) { struct task_struct *me = current; if (show_unhandled_signals && printk_ratelimit()) { printk("%s" "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); print_vma_addr(" in ", regs->ip); printk(KERN_CONT "\n"); } force_sig(SIGSEGV, me); }
gpl-2.0
pio-masaki/at100-kernel
arch/arm/mach-s5p64x0/cpu.c
2832
4427
/* linux/arch/arm/mach-s5p64x0/cpu.c * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/sysdev.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/proc-fns.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/regs-serial.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/clock.h> #include <plat/s5p6440.h> #include <plat/s5p6450.h> #include <plat/adc-core.h> /* Initial IO mappings */ static struct map_desc s5p64x0_iodesc[] __initdata = { { .virtual = (unsigned long)S5P_VA_GPIO, .pfn = __phys_to_pfn(S5P64X0_PA_GPIO), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)VA_VIC0, .pfn = __phys_to_pfn(S5P64X0_PA_VIC0), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)VA_VIC1, .pfn = __phys_to_pfn(S5P64X0_PA_VIC1), .length = SZ_16K, .type = MT_DEVICE, }, }; static struct map_desc s5p6440_iodesc[] __initdata = { { .virtual = (unsigned long)S3C_VA_UART, .pfn = __phys_to_pfn(S5P6440_PA_UART(0)), .length = SZ_4K, .type = MT_DEVICE, }, }; static struct map_desc s5p6450_iodesc[] __initdata = { { .virtual = (unsigned long)S3C_VA_UART, .pfn = __phys_to_pfn(S5P6450_PA_UART(0)), .length = SZ_512K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_UART + SZ_512K, .pfn = __phys_to_pfn(S5P6450_PA_UART(5)), .length = SZ_4K, .type = MT_DEVICE, }, }; static void s5p64x0_idle(void) { unsigned long val; if (!need_resched()) { val = __raw_readl(S5P64X0_PWR_CFG); val &= ~(0x3 << 5); val |= (0x1 << 5); __raw_writel(val, S5P64X0_PWR_CFG); cpu_do_idle(); } local_irq_enable(); } /* * s5p64x0_map_io * * register the standard CPU IO areas */ void __init s5p6440_map_io(void) { /* initialize any device information early */ s3c_adc_setname("s3c64xx-adc"); iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc)); iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc)); } void __init s5p6450_map_io(void) { /* initialize any device information early */ s3c_adc_setname("s3c64xx-adc"); iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc)); iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc)); } /* * s5p64x0_init_clocks * * register and setup the CPU clocks */ void __init s5p6440_init_clocks(int xtal) { printk(KERN_DEBUG "%s: initializing clocks\n", __func__); s3c24xx_register_baseclocks(xtal); s5p_register_clocks(xtal); s5p6440_register_clocks(); s5p6440_setup_clocks(); } void __init s5p6450_init_clocks(int xtal) { printk(KERN_DEBUG "%s: initializing clocks\n", __func__); s3c24xx_register_baseclocks(xtal); s5p_register_clocks(xtal); s5p6450_register_clocks(); s5p6450_setup_clocks(); } /* * s5p64x0_init_irq * * register the CPU interrupts */ void __init s5p6440_init_irq(void) { /* S5P6440 supports 2 VIC */ u32 vic[2]; /* * VIC0 is missing IRQ_VIC0[3, 4, 8, 10, (12-22)] * VIC1 is missing IRQ VIC1[1, 3, 4, 10, 11, 12, 14, 15, 22] */ vic[0] = 0xff800ae7; vic[1] = 0xffbf23e5; s5p_init_irq(vic, ARRAY_SIZE(vic)); } void __init s5p6450_init_irq(void) { /* S5P6450 supports only 2 VIC */ u32 vic[2]; /* * VIC0 is missing IRQ_VIC0[(13-15), (21-22)] * VIC1 is missing IRQ VIC1[12, 14, 23] */ vic[0] = 0xff9f1fff; vic[1] = 0xff7fafff; s5p_init_irq(vic, ARRAY_SIZE(vic)); } struct sysdev_class s5p64x0_sysclass = { .name = "s5p64x0-core", }; static struct sys_device s5p64x0_sysdev = { .cls = &s5p64x0_sysclass, }; static int __init s5p64x0_core_init(void) { return sysdev_class_register(&s5p64x0_sysclass); } core_initcall(s5p64x0_core_init); int __init s5p64x0_init(void) { printk(KERN_INFO "S5P64X0(S5P6440/S5P6450): Initializing architecture\n"); /* set idle function */ pm_idle = s5p64x0_idle; return sysdev_register(&s5p64x0_sysdev); }
gpl-2.0
zales/RamosW17pro-kernel-common
drivers/infiniband/hw/ipath/ipath_mad.c
4112
46434
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) static int reply(struct ib_smp *smp) { /* * The verbs framework will handle the directed/LID route * packet changes. */ smp->method = IB_MGMT_METHOD_GET_RESP; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->status |= IB_SMP_DIRECTION; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static int recv_subn_get_nodedescription(struct ib_smp *smp, struct ib_device *ibdev) { if (smp->attr_mod) smp->status |= IB_SMP_INVALID_FIELD; memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); return reply(smp); } struct nodeinfo { u8 base_version; u8 class_version; u8 node_type; u8 num_ports; __be64 sys_guid; __be64 node_guid; __be64 port_guid; __be16 partition_cap; __be16 device_id; __be32 revision; u8 local_port_num; u8 vendor_id[3]; } __attribute__ ((packed)); static int recv_subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct nodeinfo *nip = (struct nodeinfo *)&smp->data; struct ipath_devdata *dd = to_idev(ibdev)->dd; u32 vendor, majrev, minrev; /* GUID 0 is illegal */ if (smp->attr_mod || (dd->ipath_guid == 0)) smp->status |= IB_SMP_INVALID_FIELD; nip->base_version = 1; nip->class_version = 1; nip->node_type = 1; /* channel adapter */ /* * XXX The num_ports value will need a layer function to get * the value if we ever have more than one IB port on a chip. * We will also need to get the GUID for the port. */ nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ nip->sys_guid = to_idev(ibdev)->sys_image_guid; nip->node_guid = dd->ipath_guid; nip->port_guid = dd->ipath_guid; nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd)); nip->device_id = cpu_to_be16(dd->ipath_deviceid); majrev = dd->ipath_majrev; minrev = dd->ipath_minrev; nip->revision = cpu_to_be32((majrev << 16) | minrev); nip->local_port_num = port; vendor = dd->ipath_vendorid; nip->vendor_id[0] = IPATH_SRC_OUI_1; nip->vendor_id[1] = IPATH_SRC_OUI_2; nip->vendor_id[2] = IPATH_SRC_OUI_3; return reply(smp); } static int recv_subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev) { u32 startgx = 8 * be32_to_cpu(smp->attr_mod); __be64 *p = (__be64 *) smp->data; /* 32 blocks of 8 64-bit GUIDs per block */ memset(smp->data, 0, sizeof(smp->data)); /* * We only support one GUID for now. If this changes, the * portinfo.guid_cap field needs to be updated too. */ if (startgx == 0) { __be64 g = to_idev(ibdev)->dd->ipath_guid; if (g == 0) /* GUID 0 is illegal */ smp->status |= IB_SMP_INVALID_FIELD; else /* The first is a copy of the read-only HW GUID. */ *p = g; } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static void set_link_width_enabled(struct ipath_devdata *dd, u32 w) { (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w); } static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s) { (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s); } static int get_overrunthreshold(struct ipath_devdata *dd) { return (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; } /** * set_overrunthreshold - set the overrun threshold * @dd: the infinipath device * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n) { unsigned v; v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; if (v != n) { dd->ipath_ibcctrl &= ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); dd->ipath_ibcctrl |= (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); } return 0; } static int get_phyerrthreshold(struct ipath_devdata *dd) { return (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; } /** * set_phyerrthreshold - set the physical error threshold * @dd: the infinipath device * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) { unsigned v; v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; if (v != n) { dd->ipath_ibcctrl &= ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); dd->ipath_ibcctrl |= (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); } return 0; } /** * get_linkdowndefaultstate - get the default linkdown state * @dd: the infinipath device * * Returns zero if the default is POLL, 1 if the default is SLEEP. */ static int get_linkdowndefaultstate(struct ipath_devdata *dd) { return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); } static int recv_subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ipath_ibdev *dev; struct ipath_devdata *dd; struct ib_port_info *pip = (struct ib_port_info *)smp->data; u16 lid; u8 ibcstat; u8 mtu; int ret; if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) { smp->status |= IB_SMP_INVALID_FIELD; ret = reply(smp); goto bail; } dev = to_idev(ibdev); dd = dev->dd; /* Clear all fields. Only set the non-zero fields. */ memset(smp->data, 0, sizeof(smp->data)); /* Only return the mkey if the protection field allows it. */ if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey || dev->mkeyprot == 0) pip->mkey = dev->mkey; pip->gid_prefix = dev->gid_prefix; lid = dd->ipath_lid; pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; pip->sm_lid = cpu_to_be16(dev->sm_lid); pip->cap_mask = cpu_to_be32(dev->port_cap_flags); /* pip->diag_code; */ pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period); pip->local_port_num = port; pip->link_width_enabled = dd->ipath_link_width_enabled; pip->link_width_supported = dd->ipath_link_width_supported; pip->link_width_active = dd->ipath_link_width_active; pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4; ibcstat = dd->ipath_lastibcstat; /* map LinkState to IB portinfo values. */ pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1; pip->portphysstate_linkdown = (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) | (get_linkdowndefaultstate(dd) ? 1 : 2); pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc; pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) | dd->ipath_link_speed_enabled; switch (dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; default: /* oops, something is wrong */ mtu = IB_MTU_2048; break; } pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl; pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */ pip->vl_high_limit = dev->vl_high_limit; /* pip->vl_arb_high_cap; // only one VL */ /* pip->vl_arb_low_cap; // only one VL */ /* InitTypeReply = 0 */ /* our mtu cap depends on whether 4K MTU enabled or not */ pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048; /* HCAs ignore VLStallCount and HOQLife */ /* pip->vlstallcnt_hoqlife; */ pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ pip->mkey_violations = cpu_to_be16(dev->mkey_violations); /* P_KeyViolations are counted by hardware. */ pip->pkey_violations = cpu_to_be16((ipath_get_cr_errpkey(dd) - dev->z_pkey_violations) & 0xFFFF); pip->qkey_violations = cpu_to_be16(dev->qkey_violations); /* Only the hardware GUID is supported for now */ pip->guid_cap = 1; pip->clientrereg_resv_subnetto = dev->subnet_timeout; /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = (get_phyerrthreshold(dd) << 4) | get_overrunthreshold(dd); /* pip->max_credit_hint; */ if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { u32 v; v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY); pip->link_roundtrip_latency[0] = v >> 16; pip->link_roundtrip_latency[1] = v >> 8; pip->link_roundtrip_latency[2] = v; } ret = reply(smp); bail: return ret; } /** * get_pkeys - return the PKEY table for port 0 * @dd: the infinipath device * @pkeys: the pkey table is placed here */ static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) { /* always a kernel port, no locking needed */ struct ipath_portdata *pd = dd->ipath_pd[0]; memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); return 0; } static int recv_subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); u16 *p = (u16 *) smp->data; __be16 *q = (__be16 *) smp->data; /* 64 blocks of 32 16-bit P_Key entries */ memset(smp->data, 0, sizeof(smp->data)); if (startpx == 0) { struct ipath_ibdev *dev = to_idev(ibdev); unsigned i, n = ipath_get_npkeys(dev->dd); get_pkeys(dev->dd, p); for (i = 0; i < n; i++) q[i] = cpu_to_be16(p[i]); } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static int recv_subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev) { /* The only GUID we support is the first read-only entry. */ return recv_subn_get_guidinfo(smp, ibdev); } /** * set_linkdowndefaultstate - set the default linkdown state * @dd: the infinipath device * @sleep: the new state * * Note that this will only take effect when the link state changes. */ static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep) { if (sleep) dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; else dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); return 0; } /** * recv_subn_set_portinfo - set port information * @smp: the incoming SM packet * @ibdev: the infiniband device * @port: the port on the device * * Set Portinfo (see ch. 14.2.5.6). */ static int recv_subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ib_port_info *pip = (struct ib_port_info *)smp->data; struct ib_event event; struct ipath_ibdev *dev; struct ipath_devdata *dd; char clientrereg = 0; u16 lid, smlid; u8 lwe; u8 lse; u8 state; u16 lstate; u32 mtu; int ret, ore; if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) goto err; dev = to_idev(ibdev); dd = dev->dd; event.device = ibdev; event.element.port_num = port; dev->mkey = pip->mkey; dev->gid_prefix = pip->gid_prefix; dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); if (dd->ipath_lid != lid || dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) { /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) goto err; ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7); event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } smlid = be16_to_cpu(pip->sm_lid); if (smlid != dev->sm_lid) { /* Must be a valid unicast LID address. */ if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE) goto err; dev->sm_lid = smlid; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } /* Allow 1x or 4x to be set (see 14.2.6.6). */ lwe = pip->link_width_enabled; if (lwe) { if (lwe == 0xFF) lwe = dd->ipath_link_width_supported; else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported)) goto err; set_link_width_enabled(dd, lwe); } /* Allow 2.5 or 5.0 Gbs. */ lse = pip->linkspeedactive_enabled & 0xF; if (lse) { if (lse == 15) lse = dd->ipath_link_speed_supported; else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported)) goto err; set_link_speed_enabled(dd, lse); } /* Set link down default state. */ switch (pip->portphysstate_linkdown & 0xF) { case 0: /* NOP */ break; case 1: /* SLEEP */ if (set_linkdowndefaultstate(dd, 1)) goto err; break; case 2: /* POLL */ if (set_linkdowndefaultstate(dd, 0)) goto err; break; default: goto err; } dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6; dev->vl_high_limit = pip->vl_high_limit; switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) { case IB_MTU_256: mtu = 256; break; case IB_MTU_512: mtu = 512; break; case IB_MTU_1024: mtu = 1024; break; case IB_MTU_2048: mtu = 2048; break; case IB_MTU_4096: if (!ipath_mtu4096) goto err; mtu = 4096; break; default: /* XXX We have already partially updated our state! */ goto err; } ipath_set_mtu(dd, mtu); dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; /* We only support VL0 */ if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1) goto err; if (pip->mkey_violations == 0) dev->mkey_violations = 0; /* * Hardware counter can't be reset so snapshot and subtract * later. */ if (pip->pkey_violations == 0) dev->z_pkey_violations = ipath_get_cr_errpkey(dd); if (pip->qkey_violations == 0) dev->qkey_violations = 0; ore = pip->localphyerrors_overrunerrors; if (set_phyerrthreshold(dd, (ore >> 4) & 0xF)) goto err; if (set_overrunthreshold(dd, (ore & 0xF))) goto err; dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; if (pip->clientrereg_resv_subnetto & 0x80) { clientrereg = 1; event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } /* * Do the port state change now that the other link parameters * have been set. * Changing the port physical state only makes sense if the link * is down or is being set to down. */ state = pip->linkspeed_portstate & 0xF; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) goto err; /* * Only state changes of DOWN, ARM, and ACTIVE are valid * and must be in the correct state to take effect (see 7.2.6). */ switch (state) { case IB_PORT_NOP: if (lstate == 0) break; /* FALLTHROUGH */ case IB_PORT_DOWN: if (lstate == 0) lstate = IPATH_IB_LINKDOWN_ONLY; else if (lstate == 1) lstate = IPATH_IB_LINKDOWN_SLEEP; else if (lstate == 2) lstate = IPATH_IB_LINKDOWN; else if (lstate == 3) lstate = IPATH_IB_LINKDOWN_DISABLE; else goto err; ipath_set_linkstate(dd, lstate); if (lstate == IPATH_IB_LINKDOWN_DISABLE) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto done; } ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | IPATH_LINKACTIVE, 1000); break; case IB_PORT_ARMED: ipath_set_linkstate(dd, IPATH_IB_LINKARM); break; case IB_PORT_ACTIVE: ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); break; default: /* XXX We have already partially updated our state! */ goto err; } ret = recv_subn_get_portinfo(smp, ibdev, port); if (clientrereg) pip->clientrereg_resv_subnetto |= 0x80; goto done; err: smp->status |= IB_SMP_INVALID_FIELD; ret = recv_subn_get_portinfo(smp, ibdev, port); done: return ret; } /** * rm_pkey - decrecment the reference count for the given PKEY * @dd: the infinipath device * @key: the PKEY index * * Return true if this was the last reference and the hardware table entry * needs to be changed. */ static int rm_pkey(struct ipath_devdata *dd, u16 key) { int i; int ret; for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (dd->ipath_pkeys[i] != key) continue; if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { dd->ipath_pkeys[i] = 0; ret = 1; goto bail; } break; } ret = 0; bail: return ret; } /** * add_pkey - add the given PKEY to the hardware table * @dd: the infinipath device * @key: the PKEY * * Return an error code if unable to add the entry, zero if no change, * or 1 if the hardware PKEY register needs to be updated. */ static int add_pkey(struct ipath_devdata *dd, u16 key) { int i; u16 lkey = key & 0x7FFF; int any = 0; int ret; if (lkey == 0x7FFF) { ret = 0; goto bail; } /* Look for an empty slot or a matching PKEY. */ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i]) { any++; continue; } /* If it matches exactly, try to increment the ref count */ if (dd->ipath_pkeys[i] == key) { if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { ret = 0; goto bail; } /* Lost the race. Look for an empty slot below. */ atomic_dec(&dd->ipath_pkeyrefs[i]); any++; } /* * It makes no sense to have both the limited and unlimited * PKEY set at the same time since the unlimited one will * disable the limited one. */ if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { ret = -EEXIST; goto bail; } } if (!any) { ret = -EBUSY; goto bail; } for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i] && atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { /* for ipathstats, etc. */ ipath_stats.sps_pkeys[i] = lkey; dd->ipath_pkeys[i] = key; ret = 1; goto bail; } } ret = -EBUSY; bail: return ret; } /** * set_pkeys - set the PKEY table for port 0 * @dd: the infinipath device * @pkeys: the PKEY table */ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) { struct ipath_portdata *pd; int i; int changed = 0; /* always a kernel port, no locking needed */ pd = dd->ipath_pd[0]; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { u16 key = pkeys[i]; u16 okey = pd->port_pkeys[i]; if (key == okey) continue; /* * The value of this PKEY table entry is changing. * Remove the old entry in the hardware's array of PKEYs. */ if (okey & 0x7FFF) changed |= rm_pkey(dd, okey); if (key & 0x7FFF) { int ret = add_pkey(dd, key); if (ret < 0) key = 0; else changed |= ret; } pd->port_pkeys[i] = key; } if (changed) { u64 pkey; pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", (unsigned long long) pkey); ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, pkey); } return 0; } static int recv_subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); __be16 *p = (__be16 *) smp->data; u16 *q = (u16 *) smp->data; struct ipath_ibdev *dev = to_idev(ibdev); unsigned i, n = ipath_get_npkeys(dev->dd); for (i = 0; i < n; i++) q[i] = be16_to_cpu(p[i]); if (startpx != 0 || set_pkeys(dev->dd, q) != 0) smp->status |= IB_SMP_INVALID_FIELD; return recv_subn_get_pkeytable(smp, ibdev); } #define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) #define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010) #define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011) #define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012) #define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D) #define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E) struct ib_perf { u8 base_version; u8 mgmt_class; u8 class_version; u8 method; __be16 status; __be16 unused; __be64 tid; __be16 attr_id; __be16 resv; __be32 attr_mod; u8 reserved[40]; u8 data[192]; } __attribute__ ((packed)); struct ib_pma_classportinfo { u8 base_version; u8 class_version; __be16 cap_mask; u8 reserved[3]; u8 resp_time_value; /* only lower 5 bits */ union ib_gid redirect_gid; __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ __be16 redirect_lid; __be16 redirect_pkey; __be32 redirect_qp; /* only lower 24 bits */ __be32 redirect_qkey; union ib_gid trap_gid; __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ __be16 trap_lid; __be16 trap_pkey; __be32 trap_hl_qp; /* 8, 24 bits respectively */ __be32 trap_qkey; } __attribute__ ((packed)); struct ib_pma_portsamplescontrol { u8 opcode; u8 port_select; u8 tick; u8 counter_width; /* only lower 3 bits */ __be32 counter_mask0_9; /* 2, 10 * 3, bits */ __be16 counter_mask10_14; /* 1, 5 * 3, bits */ u8 sample_mechanisms; u8 sample_status; /* only lower 2 bits */ __be64 option_mask; __be64 vendor_mask; __be32 sample_start; __be32 sample_interval; __be16 tag; __be16 counter_select[15]; } __attribute__ ((packed)); struct ib_pma_portsamplesresult { __be16 tag; __be16 sample_status; /* only lower 2 bits */ __be32 counter[15]; } __attribute__ ((packed)); struct ib_pma_portsamplesresult_ext { __be16 tag; __be16 sample_status; /* only lower 2 bits */ __be32 extended_width; /* only upper 2 bits */ __be64 counter[15]; } __attribute__ ((packed)); struct ib_pma_portcounters { u8 reserved; u8 port_select; __be16 counter_select; __be16 symbol_error_counter; u8 link_error_recovery_counter; u8 link_downed_counter; __be16 port_rcv_errors; __be16 port_rcv_remphys_errors; __be16 port_rcv_switch_relay_errors; __be16 port_xmit_discards; u8 port_xmit_constraint_errors; u8 port_rcv_constraint_errors; u8 reserved1; u8 lli_ebor_errors; /* 4, 4, bits */ __be16 reserved2; __be16 vl15_dropped; __be32 port_xmit_data; __be32 port_rcv_data; __be32 port_xmit_packets; __be32 port_rcv_packets; } __attribute__ ((packed)); #define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001) #define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002) #define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004) #define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008) #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010) #define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040) #define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200) #define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400) #define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800) #define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000) #define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000) #define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000) #define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000) struct ib_pma_portcounters_ext { u8 reserved; u8 port_select; __be16 counter_select; __be32 reserved1; __be64 port_xmit_data; __be64 port_rcv_data; __be64 port_xmit_packets; __be64 port_rcv_packets; __be64 port_unicast_xmit_packets; __be64 port_unicast_rcv_packets; __be64 port_multicast_xmit_packets; __be64 port_multicast_rcv_packets; } __attribute__ ((packed)); #define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001) #define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002) #define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004) #define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008) #define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010) #define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020) #define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040) #define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080) static int recv_pma_get_classportinfo(struct ib_perf *pmp) { struct ib_pma_classportinfo *p = (struct ib_pma_classportinfo *)pmp->data; memset(pmp->data, 0, sizeof(pmp->data)); if (pmp->attr_mod != 0) pmp->status |= IB_SMP_INVALID_FIELD; /* Indicate AllPortSelect is valid (only one port anyway) */ p->cap_mask = cpu_to_be16(1 << 8); p->base_version = 1; p->class_version = 1; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 * sec. */ p->resp_time_value = 18; return reply((struct ib_smp *) pmp); } /* * The PortSamplesControl.CounterMasks field is an array of 3 bit fields * which specify the N'th counter's capabilities. See ch. 16.1.3.2. * We support 5 counters which only count the mandatory quantities. */ #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) #define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \ COUNTER_MASK(1, 1) | \ COUNTER_MASK(1, 2) | \ COUNTER_MASK(1, 3) | \ COUNTER_MASK(1, 4)) static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; unsigned long flags; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->attr_mod != 0 || (port_select != port && port_select != 0xFF)) pmp->status |= IB_SMP_INVALID_FIELD; /* * Ticks are 10x the link transfer period which for 2.5Gbs is 4 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample * intervals are counted in ticks. Since we use Linux timers, that * count in jiffies, we can't sample for less than 1000 ticks if HZ * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that * have hardware support for delaying packets. */ if (crp->cr_psstat) p->tick = dev->dd->ipath_link_speed_active - 1; else p->tick = 250; /* 1 usec. */ p->counter_width = 4; /* 32 bit counters */ p->counter_mask0_9 = COUNTER_MASK0_9; spin_lock_irqsave(&dev->pending_lock, flags); if (crp->cr_psstat) p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat); else p->sample_status = dev->pma_sample_status; p->sample_start = cpu_to_be32(dev->pma_sample_start); p->sample_interval = cpu_to_be32(dev->pma_sample_interval); p->tag = cpu_to_be16(dev->pma_tag); p->counter_select[0] = dev->pma_counter_select[0]; p->counter_select[1] = dev->pma_counter_select[1]; p->counter_select[2] = dev->pma_counter_select[2]; p->counter_select[3] = dev->pma_counter_select[3]; p->counter_select[4] = dev->pma_counter_select[4]; spin_unlock_irqrestore(&dev->pending_lock, flags); return reply((struct ib_smp *) pmp); } static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; unsigned long flags; u8 status; int ret; if (pmp->attr_mod != 0 || (p->port_select != port && p->port_select != 0xFF)) { pmp->status |= IB_SMP_INVALID_FIELD; ret = reply((struct ib_smp *) pmp); goto bail; } spin_lock_irqsave(&dev->pending_lock, flags); if (crp->cr_psstat) status = ipath_read_creg32(dev->dd, crp->cr_psstat); else status = dev->pma_sample_status; if (status == IB_PMA_SAMPLE_STATUS_DONE) { dev->pma_sample_start = be32_to_cpu(p->sample_start); dev->pma_sample_interval = be32_to_cpu(p->sample_interval); dev->pma_tag = be16_to_cpu(p->tag); dev->pma_counter_select[0] = p->counter_select[0]; dev->pma_counter_select[1] = p->counter_select[1]; dev->pma_counter_select[2] = p->counter_select[2]; dev->pma_counter_select[3] = p->counter_select[3]; dev->pma_counter_select[4] = p->counter_select[4]; if (crp->cr_psstat) { ipath_write_creg(dev->dd, crp->cr_psinterval, dev->pma_sample_interval); ipath_write_creg(dev->dd, crp->cr_psstart, dev->pma_sample_start); } else dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED; } spin_unlock_irqrestore(&dev->pending_lock, flags); ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port); bail: return ret; } static u64 get_counter(struct ipath_ibdev *dev, struct ipath_cregs const *crp, __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: ret = (crp->cr_psxmitdatacount) ? ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) : dev->ipath_sword; break; case IB_PMA_PORT_RCV_DATA: ret = (crp->cr_psrcvdatacount) ? ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) : dev->ipath_rword; break; case IB_PMA_PORT_XMIT_PKTS: ret = (crp->cr_psxmitpktscount) ? ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) : dev->ipath_spkts; break; case IB_PMA_PORT_RCV_PKTS: ret = (crp->cr_psrcvpktscount) ? ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) : dev->ipath_rpkts; break; case IB_PMA_PORT_XMIT_WAIT: ret = (crp->cr_psxmitwaitcount) ? ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) : dev->ipath_xmit_wait; break; default: ret = 0; } return ret; } static int recv_pma_get_portsamplesresult(struct ib_perf *pmp, struct ib_device *ibdev) { struct ib_pma_portsamplesresult *p = (struct ib_pma_portsamplesresult *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); p->tag = cpu_to_be16(dev->pma_tag); if (crp->cr_psstat) status = ipath_read_creg32(dev->dd, crp->cr_psstat); else status = dev->pma_sample_status; p->sample_status = cpu_to_be16(status); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : cpu_to_be32( get_counter(dev, crp, dev->pma_counter_select[i])); return reply((struct ib_smp *) pmp); } static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp, struct ib_device *ibdev) { struct ib_pma_portsamplesresult_ext *p = (struct ib_pma_portsamplesresult_ext *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); p->tag = cpu_to_be16(dev->pma_tag); if (crp->cr_psstat) status = ipath_read_creg32(dev->dd, crp->cr_psstat); else status = dev->pma_sample_status; p->sample_status = cpu_to_be16(status); /* 64 bits */ p->extended_width = cpu_to_be32(0x80000000); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : cpu_to_be64( get_counter(dev, crp, dev->pma_counter_select[i])); return reply((struct ib_smp *) pmp); } static int recv_pma_get_portcounters(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_verbs_counters cntrs; u8 port_select = p->port_select; ipath_get_counters(dev->dd, &cntrs); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= dev->z_symbol_error_counter; cntrs.link_error_recovery_counter -= dev->z_link_error_recovery_counter; cntrs.link_downed_counter -= dev->z_link_downed_counter; cntrs.port_rcv_errors += dev->rcv_errors; cntrs.port_rcv_errors -= dev->z_port_rcv_errors; cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors; cntrs.port_xmit_discards -= dev->z_port_xmit_discards; cntrs.port_xmit_data -= dev->z_port_xmit_data; cntrs.port_rcv_data -= dev->z_port_rcv_data; cntrs.port_xmit_packets -= dev->z_port_xmit_packets; cntrs.port_rcv_packets -= dev->z_port_rcv_packets; cntrs.local_link_integrity_errors -= dev->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= dev->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= dev->z_vl15_dropped; cntrs.vl15_dropped += dev->n_vl15_dropped; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->attr_mod != 0 || (port_select != port && port_select != 0xFF)) pmp->status |= IB_SMP_INVALID_FIELD; if (cntrs.symbol_error_counter > 0xFFFFUL) p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16((u16)cntrs.symbol_error_counter); if (cntrs.link_error_recovery_counter > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)cntrs.link_error_recovery_counter; if (cntrs.link_downed_counter > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); if (cntrs.local_link_integrity_errors > 0xFUL) cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); if (cntrs.port_xmit_data > 0xFFFFFFFFUL) p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); if (cntrs.port_rcv_data > 0xFFFFFFFFUL) p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_packets = cpu_to_be32((u32)cntrs.port_xmit_packets); if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_packets = cpu_to_be32((u32) cntrs.port_rcv_packets); return reply((struct ib_smp *) pmp); } static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); u64 swords, rwords, spkts, rpkts, xwait; u8 port_select = p->port_select; ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, &rpkts, &xwait); /* Adjust counters for any resets done. */ swords -= dev->z_port_xmit_data; rwords -= dev->z_port_rcv_data; spkts -= dev->z_port_xmit_packets; rpkts -= dev->z_port_rcv_packets; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->attr_mod != 0 || (port_select != port && port_select != 0xFF)) pmp->status |= IB_SMP_INVALID_FIELD; p->port_xmit_data = cpu_to_be64(swords); p->port_rcv_data = cpu_to_be64(rwords); p->port_xmit_packets = cpu_to_be64(spkts); p->port_rcv_packets = cpu_to_be64(rpkts); p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit); p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv); p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit); p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv); return reply((struct ib_smp *) pmp); } static int recv_pma_set_portcounters(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_verbs_counters cntrs; /* * Since the HW doesn't support clearing counters, we save the * current count and subtract it from future responses. */ ipath_get_counters(dev->dd, &cntrs); if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) dev->z_symbol_error_counter = cntrs.symbol_error_counter; if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) dev->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) dev->z_link_downed_counter = cntrs.link_downed_counter; if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) dev->z_port_rcv_errors = cntrs.port_rcv_errors + dev->rcv_errors; if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) dev->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) dev->z_port_xmit_discards = cntrs.port_xmit_discards; if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) dev->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) dev->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { dev->n_vl15_dropped = 0; dev->z_vl15_dropped = cntrs.vl15_dropped; } if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) dev->z_port_xmit_data = cntrs.port_xmit_data; if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) dev->z_port_rcv_data = cntrs.port_rcv_data; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) dev->z_port_xmit_packets = cntrs.port_xmit_packets; if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) dev->z_port_rcv_packets = cntrs.port_rcv_packets; return recv_pma_get_portcounters(pmp, ibdev, port); } static int recv_pma_set_portcounters_ext(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); u64 swords, rwords, spkts, rpkts, xwait; ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, &rpkts, &xwait); if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) dev->z_port_xmit_data = swords; if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) dev->z_port_rcv_data = rwords; if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) dev->z_port_xmit_packets = spkts; if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) dev->z_port_rcv_packets = rpkts; if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) dev->n_unicast_xmit = 0; if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) dev->n_unicast_rcv = 0; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) dev->n_multicast_xmit = 0; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) dev->n_multicast_rcv = 0; return recv_pma_get_portcounters_ext(pmp, ibdev, port); } static int process_subn(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; struct ipath_ibdev *dev = to_idev(ibdev); int ret; *out_mad = *in_mad; if (smp->class_version != 1) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply(smp); goto bail; } /* Is the mkey in the process of expiring? */ if (dev->mkey_lease_timeout && time_after_eq(jiffies, dev->mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ dev->mkey_lease_timeout = 0; dev->mkeyprot = 0; } /* * M_Key checking depends on * Portinfo:M_Key_protect_bits */ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 && dev->mkey != smp->mkey && (smp->method == IB_MGMT_METHOD_SET || (smp->method == IB_MGMT_METHOD_GET && dev->mkeyprot >= 2))) { if (dev->mkey_violations != 0xFFFF) ++dev->mkey_violations; if (dev->mkey_lease_timeout || dev->mkey_lease_period == 0) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } dev->mkey_lease_timeout = jiffies + dev->mkey_lease_period * HZ; /* Future: Generate a trap notice. */ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } else if (dev->mkey_lease_timeout) dev->mkey_lease_timeout = 0; switch (smp->method) { case IB_MGMT_METHOD_GET: switch (smp->attr_id) { case IB_SMP_ATTR_NODE_DESC: ret = recv_subn_get_nodedescription(smp, ibdev); goto bail; case IB_SMP_ATTR_NODE_INFO: ret = recv_subn_get_nodeinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_GUID_INFO: ret = recv_subn_get_guidinfo(smp, ibdev); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = recv_subn_get_portinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = recv_subn_get_pkeytable(smp, ibdev); goto bail; case IB_SMP_ATTR_SM_INFO: if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (dev->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_SET: switch (smp->attr_id) { case IB_SMP_ATTR_GUID_INFO: ret = recv_subn_set_guidinfo(smp, ibdev); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = recv_subn_set_portinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = recv_subn_set_pkeytable(smp, ibdev); goto bail; case IB_SMP_ATTR_SM_INFO: if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (dev->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_REPORT_RESP: case IB_MGMT_METHOD_TRAP_REPRESS: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; default: smp->status |= IB_SMP_UNSUP_METHOD; ret = reply(smp); } bail: return ret; } static int process_perf(struct ib_device *ibdev, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_perf *pmp = (struct ib_perf *)out_mad; int ret; *out_mad = *in_mad; if (pmp->class_version != 1) { pmp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_smp *) pmp); goto bail; } switch (pmp->method) { case IB_MGMT_METHOD_GET: switch (pmp->attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = recv_pma_get_classportinfo(pmp); goto bail; case IB_PMA_PORT_SAMPLES_CONTROL: ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_SAMPLES_RESULT: ret = recv_pma_get_portsamplesresult(pmp, ibdev); goto bail; case IB_PMA_PORT_SAMPLES_RESULT_EXT: ret = recv_pma_get_portsamplesresult_ext(pmp, ibdev); goto bail; case IB_PMA_PORT_COUNTERS: ret = recv_pma_get_portcounters(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = recv_pma_get_portcounters_ext(pmp, ibdev, port_num); goto bail; default: pmp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_SET: switch (pmp->attr_id) { case IB_PMA_PORT_SAMPLES_CONTROL: ret = recv_pma_set_portsamplescontrol(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_COUNTERS: ret = recv_pma_set_portcounters(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = recv_pma_set_portcounters_ext(pmp, ibdev, port_num); goto bail; default: pmp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; default: pmp->status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_smp *) pmp); } bail: return ret; } /** * ipath_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on * @mad_flags: MAD flags * @port_num: the port number this packet came in on * @in_wc: the work completion entry for this packet * @in_grh: the global route header for this packet * @in_mad: the incoming MAD * @out_mad: any outgoing MAD reply * * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not * interested in processing. * * Note that the verbs framework has already done the MAD sanity checks, * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * MADs. * * This is called by the ib_mad module. */ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { int ret; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port_num, in_mad, out_mad); goto bail; case IB_MGMT_CLASS_PERF_MGMT: ret = process_perf(ibdev, port_num, in_mad, out_mad); goto bail; default: ret = IB_MAD_RESULT_SUCCESS; } bail: return ret; }
gpl-2.0
zyrgit/linux-yocto-3.10-work
drivers/usb/gadget/functions.c
4368
2271
#include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/err.h> #include <linux/usb/composite.h> static LIST_HEAD(func_list); static DEFINE_MUTEX(func_lock); static struct usb_function_instance *try_get_usb_function_instance(const char *name) { struct usb_function_driver *fd; struct usb_function_instance *fi; fi = ERR_PTR(-ENOENT); mutex_lock(&func_lock); list_for_each_entry(fd, &func_list, list) { if (strcmp(name, fd->name)) continue; if (!try_module_get(fd->mod)) { fi = ERR_PTR(-EBUSY); break; } fi = fd->alloc_inst(); if (IS_ERR(fi)) module_put(fd->mod); else fi->fd = fd; break; } mutex_unlock(&func_lock); return fi; } struct usb_function_instance *usb_get_function_instance(const char *name) { struct usb_function_instance *fi; int ret; fi = try_get_usb_function_instance(name); if (!IS_ERR(fi)) return fi; ret = PTR_ERR(fi); if (ret != -ENOENT) return fi; ret = request_module("usbfunc:%s", name); if (ret < 0) return ERR_PTR(ret); return try_get_usb_function_instance(name); } EXPORT_SYMBOL_GPL(usb_get_function_instance); struct usb_function *usb_get_function(struct usb_function_instance *fi) { struct usb_function *f; f = fi->fd->alloc_func(fi); if (IS_ERR(f)) return f; f->fi = fi; return f; } EXPORT_SYMBOL_GPL(usb_get_function); void usb_put_function_instance(struct usb_function_instance *fi) { struct module *mod; if (!fi) return; mod = fi->fd->mod; fi->free_func_inst(fi); module_put(mod); } EXPORT_SYMBOL_GPL(usb_put_function_instance); void usb_put_function(struct usb_function *f) { if (!f) return; f->free_func(f); } EXPORT_SYMBOL_GPL(usb_put_function); int usb_function_register(struct usb_function_driver *newf) { struct usb_function_driver *fd; int ret; ret = -EEXIST; mutex_lock(&func_lock); list_for_each_entry(fd, &func_list, list) { if (!strcmp(fd->name, newf->name)) goto out; } ret = 0; list_add_tail(&newf->list, &func_list); out: mutex_unlock(&func_lock); return ret; } EXPORT_SYMBOL_GPL(usb_function_register); void usb_function_unregister(struct usb_function_driver *fd) { mutex_lock(&func_lock); list_del(&fd->list); mutex_unlock(&func_lock); } EXPORT_SYMBOL_GPL(usb_function_unregister);
gpl-2.0
AOSPXS/kernel_sony_msm8x60
drivers/nubus/nubus.c
5136
27007
/* * Macintosh Nubus Interface Code * * Originally by Alan Cox * * Mostly rewritten by David Huggins-Daines, C. Scott Ananian, * and others. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nubus.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/hwtest.h> #include <linux/proc_fs.h> #include <asm/mac_via.h> #include <asm/mac_oss.h> extern void via_nubus_init(void); extern void oss_nubus_init(void); /* Constants */ /* This is, of course, the size in bytelanes, rather than the size in actual bytes */ #define FORMAT_BLOCK_SIZE 20 #define ROM_DIR_OFFSET 0x24 #define NUBUS_TEST_PATTERN 0x5A932BC7 /* Define this if you like to live dangerously - it is known not to work on pretty much every machine except the Quadra 630 and the LC III. */ #undef I_WANT_TO_PROBE_SLOT_ZERO /* This sometimes helps combat failure to boot */ #undef TRY_TO_DODGE_WSOD /* Globals */ struct nubus_dev* nubus_devices; struct nubus_board* nubus_boards; /* Meaning of "bytelanes": The card ROM may appear on any or all bytes of each long word in NuBus memory. The low 4 bits of the "map" value found in the format block (at the top of the slot address space, as well as at the top of the MacOS ROM) tells us which bytelanes, i.e. which byte offsets within each longword, are valid. Thus: A map of 0x0f, as found in the MacOS ROM, means that all bytelanes are valid. A map of 0xf0 means that no bytelanes are valid (We pray that we will never encounter this, but stranger things have happened) A map of 0xe1 means that only the MSB of each long word is actually part of the card ROM. (We hope to never encounter NuBus on a little-endian machine. Again, stranger things have happened) A map of 0x78 means that only the LSB of each long word is valid. Etcetera, etcetera. Hopefully this clears up some confusion over what the following code actually does. */ static inline int not_useful(void *p, int map) { unsigned long pv=(unsigned long)p; pv &= 3; if(map & (1<<pv)) return 0; return 1; } static unsigned long nubus_get_rom(unsigned char **ptr, int len, int map) { /* This will hold the result */ unsigned long v = 0; unsigned char *p = *ptr; while(len) { v <<= 8; while(not_useful(p,map)) p++; v |= *p++; len--; } *ptr = p; return v; } static void nubus_rewind(unsigned char **ptr, int len, int map) { unsigned char *p=*ptr; /* Sanity check */ if(len > 65536) printk(KERN_ERR "rewind of 0x%08x!\n", len); while(len) { do { p--; } while(not_useful(p, map)); len--; } *ptr=p; } static void nubus_advance(unsigned char **ptr, int len, int map) { unsigned char *p = *ptr; if(len>65536) printk(KERN_ERR "advance of 0x%08x!\n", len); while(len) { while(not_useful(p,map)) p++; p++; len--; } *ptr = p; } static void nubus_move(unsigned char **ptr, int len, int map) { if(len > 0) nubus_advance(ptr, len, map); else if(len < 0) nubus_rewind(ptr, -len, map); } /* Now, functions to read the sResource tree */ /* Each sResource entry consists of a 1-byte ID and a 3-byte data field. If that data field contains an offset, then obviously we have to expand it from a 24-bit signed number to a 32-bit signed number. */ static inline long nubus_expand32(long foo) { if(foo & 0x00800000) /* 24bit negative */ foo |= 0xFF000000; return foo; } static inline void *nubus_rom_addr(int slot) { /* * Returns the first byte after the card. We then walk * backwards to get the lane register and the config */ return (void *)(0xF1000000+(slot<<24)); } static unsigned char *nubus_dirptr(const struct nubus_dirent *nd) { unsigned char *p = nd->base; /* Essentially, just step over the bytelanes using whatever offset we might have found */ nubus_move(&p, nubus_expand32(nd->data), nd->mask); /* And return the value */ return p; } /* These two are for pulling resource data blocks (i.e. stuff that's pointed to with offsets) out of the card ROM. */ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent* dirent, int len) { unsigned char *t = (unsigned char *)dest; unsigned char *p = nubus_dirptr(dirent); while(len) { *t++ = nubus_get_rom(&p, 1, dirent->mask); len--; } } EXPORT_SYMBOL(nubus_get_rsrc_mem); void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent, int len) { unsigned char *t=(unsigned char *)dest; unsigned char *p = nubus_dirptr(dirent); while(len) { *t = nubus_get_rom(&p, 1, dirent->mask); if(!*t++) break; len--; } } EXPORT_SYMBOL(nubus_get_rsrc_str); int nubus_get_root_dir(const struct nubus_board* board, struct nubus_dir* dir) { dir->ptr = dir->base = board->directory; dir->done = 0; dir->mask = board->lanes; return 0; } EXPORT_SYMBOL(nubus_get_root_dir); /* This is a slyly renamed version of the above */ int nubus_get_func_dir(const struct nubus_dev* dev, struct nubus_dir* dir) { dir->ptr = dir->base = dev->directory; dir->done = 0; dir->mask = dev->board->lanes; return 0; } EXPORT_SYMBOL(nubus_get_func_dir); int nubus_get_board_dir(const struct nubus_board* board, struct nubus_dir* dir) { struct nubus_dirent ent; dir->ptr = dir->base = board->directory; dir->done = 0; dir->mask = board->lanes; /* Now dereference it (the first directory is always the board directory) */ if (nubus_readdir(dir, &ent) == -1) return -1; if (nubus_get_subdir(&ent, dir) == -1) return -1; return 0; } EXPORT_SYMBOL(nubus_get_board_dir); int nubus_get_subdir(const struct nubus_dirent *ent, struct nubus_dir *dir) { dir->ptr = dir->base = nubus_dirptr(ent); dir->done = 0; dir->mask = ent->mask; return 0; } EXPORT_SYMBOL(nubus_get_subdir); int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) { u32 resid; if (nd->done) return -1; /* Do this first, otherwise nubus_rewind & co are off by 4 */ ent->base = nd->ptr; /* This moves nd->ptr forward */ resid = nubus_get_rom(&nd->ptr, 4, nd->mask); /* EOL marker, as per the Apple docs */ if((resid&0xff000000) == 0xff000000) { /* Mark it as done */ nd->done = 1; return -1; } /* First byte is the resource ID */ ent->type = resid >> 24; /* Low 3 bytes might contain data (or might not) */ ent->data = resid & 0xffffff; ent->mask = nd->mask; return 0; } EXPORT_SYMBOL(nubus_readdir); int nubus_rewinddir(struct nubus_dir* dir) { dir->ptr = dir->base; return 0; } EXPORT_SYMBOL(nubus_rewinddir); /* Driver interface functions, more or less like in pci.c */ struct nubus_dev* nubus_find_device(unsigned short category, unsigned short type, unsigned short dr_hw, unsigned short dr_sw, const struct nubus_dev* from) { struct nubus_dev* itor = from ? from->next : nubus_devices; while (itor) { if (itor->category == category && itor->type == type && itor->dr_hw == dr_hw && itor->dr_sw == dr_sw) return itor; itor = itor->next; } return NULL; } EXPORT_SYMBOL(nubus_find_device); struct nubus_dev* nubus_find_type(unsigned short category, unsigned short type, const struct nubus_dev* from) { struct nubus_dev* itor = from ? from->next : nubus_devices; while (itor) { if (itor->category == category && itor->type == type) return itor; itor = itor->next; } return NULL; } EXPORT_SYMBOL(nubus_find_type); struct nubus_dev* nubus_find_slot(unsigned int slot, const struct nubus_dev* from) { struct nubus_dev* itor = from ? from->next : nubus_devices; while (itor) { if (itor->board->slot == slot) return itor; itor = itor->next; } return NULL; } EXPORT_SYMBOL(nubus_find_slot); int nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type, struct nubus_dirent* ent) { while (nubus_readdir(dir, ent) != -1) { if (ent->type == rsrc_type) return 0; } return -1; } EXPORT_SYMBOL(nubus_find_rsrc); /* Initialization functions - decide which slots contain stuff worth looking at, and print out lots and lots of information from the resource blocks. */ /* FIXME: A lot of this stuff will eventually be useful after initialization, for intelligently probing Ethernet and video chips, among other things. The rest of it should go in the /proc code. For now, we just use it to give verbose boot logs. */ static int __init nubus_show_display_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (ent->type) { case NUBUS_RESID_GAMMADIR: printk(KERN_INFO " gamma directory offset: 0x%06x\n", ent->data); break; case 0x0080 ... 0x0085: printk(KERN_INFO " mode %02X info offset: 0x%06x\n", ent->type, ent->data); break; default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static int __init nubus_show_network_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (ent->type) { case NUBUS_RESID_MAC_ADDRESS: { char addr[6]; int i; nubus_get_rsrc_mem(addr, ent, 6); printk(KERN_INFO " MAC address: "); for (i = 0; i < 6; i++) printk("%02x%s", addr[i] & 0xff, i == 5 ? "" : ":"); printk("\n"); break; } default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static int __init nubus_show_cpu_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (ent->type) { case NUBUS_RESID_MEMINFO: { unsigned long meminfo[2]; nubus_get_rsrc_mem(&meminfo, ent, 8); printk(KERN_INFO " memory: [ 0x%08lx 0x%08lx ]\n", meminfo[0], meminfo[1]); break; } case NUBUS_RESID_ROMINFO: { unsigned long rominfo[2]; nubus_get_rsrc_mem(&rominfo, ent, 8); printk(KERN_INFO " ROM: [ 0x%08lx 0x%08lx ]\n", rominfo[0], rominfo[1]); break; } default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static int __init nubus_show_private_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (dev->category) { case NUBUS_CAT_DISPLAY: nubus_show_display_resource(dev, ent); break; case NUBUS_CAT_NETWORK: nubus_show_network_resource(dev, ent); break; case NUBUS_CAT_CPU: nubus_show_cpu_resource(dev, ent); break; default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static struct nubus_dev* __init nubus_get_functional_resource(struct nubus_board* board, int slot, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; struct nubus_dev* dev; printk(KERN_INFO " Function 0x%02x:\n", parent->type); nubus_get_subdir(parent, &dir); /* Apple seems to have botched the ROM on the IIx */ if (slot == 0 && (unsigned long)dir.base % 2) dir.base += 1; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_functional_resource: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); /* Actually we should probably panic if this fails */ if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL) return NULL; dev->resid = parent->type; dev->directory = dir.base; dev->board = board; while (nubus_readdir(&dir, &ent) != -1) { switch(ent.type) { case NUBUS_RESID_TYPE: { unsigned short nbtdata[4]; nubus_get_rsrc_mem(nbtdata, &ent, 8); dev->category = nbtdata[0]; dev->type = nbtdata[1]; dev->dr_sw = nbtdata[2]; dev->dr_hw = nbtdata[3]; printk(KERN_INFO " type: [cat 0x%x type 0x%x hw 0x%x sw 0x%x]\n", nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]); break; } case NUBUS_RESID_NAME: { nubus_get_rsrc_str(dev->name, &ent, 64); printk(KERN_INFO " name: %s\n", dev->name); break; } case NUBUS_RESID_DRVRDIR: { /* MacOS driver. If we were NetBSD we might use this :-) */ struct nubus_dir drvr_dir; struct nubus_dirent drvr_ent; nubus_get_subdir(&ent, &drvr_dir); nubus_readdir(&drvr_dir, &drvr_ent); dev->driver = nubus_dirptr(&drvr_ent); printk(KERN_INFO " driver at: 0x%p\n", dev->driver); break; } case NUBUS_RESID_MINOR_BASEOS: /* We will need this in order to support multiple framebuffers. It might be handy for Ethernet as well */ nubus_get_rsrc_mem(&dev->iobase, &ent, 4); printk(KERN_INFO " memory offset: 0x%08lx\n", dev->iobase); break; case NUBUS_RESID_MINOR_LENGTH: /* Ditto */ nubus_get_rsrc_mem(&dev->iosize, &ent, 4); printk(KERN_INFO " memory length: 0x%08lx\n", dev->iosize); break; case NUBUS_RESID_FLAGS: dev->flags = ent.data; printk(KERN_INFO " flags: 0x%06x\n", dev->flags); break; case NUBUS_RESID_HWDEVID: dev->hwdevid = ent.data; printk(KERN_INFO " hwdevid: 0x%06x\n", dev->hwdevid); break; default: /* Local/Private resources have their own function */ nubus_show_private_resource(dev, &ent); } } return dev; } /* This is cool. */ static int __init nubus_get_vidnames(struct nubus_board* board, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; /* FIXME: obviously we want to put this in a header file soon */ struct vidmode { u32 size; /* Don't know what this is yet */ u16 id; /* Longest one I've seen so far is 26 characters */ char name[32]; }; printk(KERN_INFO " video modes supported:\n"); nubus_get_subdir(parent, &dir); if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_vidnames: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); while(nubus_readdir(&dir, &ent) != -1) { struct vidmode mode; u32 size; /* First get the length */ nubus_get_rsrc_mem(&size, &ent, 4); /* Now clobber the whole thing */ if (size > sizeof(mode) - 1) size = sizeof(mode) - 1; memset(&mode, 0, sizeof(mode)); nubus_get_rsrc_mem(&mode, &ent, size); printk (KERN_INFO " %02X: (%02X) %s\n", ent.type, mode.id, mode.name); } return 0; } /* This is *really* cool. */ static int __init nubus_get_icon(struct nubus_board* board, const struct nubus_dirent* ent) { /* Should be 32x32 if my memory serves me correctly */ unsigned char icon[128]; int x, y; nubus_get_rsrc_mem(&icon, ent, 128); printk(KERN_INFO " icon:\n"); /* We should actually plot these somewhere in the framebuffer init. This is just to demonstrate that they do, in fact, exist */ for (y = 0; y < 32; y++) { printk(KERN_INFO " "); for (x = 0; x < 32; x++) { if (icon[y*4 + x/8] & (0x80 >> (x%8))) printk("*"); else printk(" "); } printk("\n"); } return 0; } static int __init nubus_get_vendorinfo(struct nubus_board* board, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; static char* vendor_fields[6] = {"ID", "serial", "revision", "part", "date", "unknown field"}; printk(KERN_INFO " vendor info:\n"); nubus_get_subdir(parent, &dir); if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_vendorinfo: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); while(nubus_readdir(&dir, &ent) != -1) { char name[64]; /* These are all strings, we think */ nubus_get_rsrc_str(name, &ent, 64); if (ent.type > 5) ent.type = 5; printk(KERN_INFO " %s: %s\n", vendor_fields[ent.type-1], name); } return 0; } static int __init nubus_get_board_resource(struct nubus_board* board, int slot, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; nubus_get_subdir(parent, &dir); if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_board_resource: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); while(nubus_readdir(&dir, &ent) != -1) { switch (ent.type) { case NUBUS_RESID_TYPE: { unsigned short nbtdata[4]; /* This type is always the same, and is not useful except insofar as it tells us that we really are looking at a board resource. */ nubus_get_rsrc_mem(nbtdata, &ent, 8); printk(KERN_INFO " type: [cat 0x%x type 0x%x hw 0x%x sw 0x%x]\n", nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]); if (nbtdata[0] != 1 || nbtdata[1] != 0 || nbtdata[2] != 0 || nbtdata[3] != 0) printk(KERN_ERR "this sResource is not a board resource!\n"); break; } case NUBUS_RESID_NAME: nubus_get_rsrc_str(board->name, &ent, 64); printk(KERN_INFO " name: %s\n", board->name); break; case NUBUS_RESID_ICON: nubus_get_icon(board, &ent); break; case NUBUS_RESID_BOARDID: printk(KERN_INFO " board id: 0x%x\n", ent.data); break; case NUBUS_RESID_PRIMARYINIT: printk(KERN_INFO " primary init offset: 0x%06x\n", ent.data); break; case NUBUS_RESID_VENDORINFO: nubus_get_vendorinfo(board, &ent); break; case NUBUS_RESID_FLAGS: printk(KERN_INFO " flags: 0x%06x\n", ent.data); break; case NUBUS_RESID_HWDEVID: printk(KERN_INFO " hwdevid: 0x%06x\n", ent.data); break; case NUBUS_RESID_SECONDINIT: printk(KERN_INFO " secondary init offset: 0x%06x\n", ent.data); break; /* WTF isn't this in the functional resources? */ case NUBUS_RESID_VIDNAMES: nubus_get_vidnames(board, &ent); break; /* Same goes for this */ case NUBUS_RESID_VIDMODES: printk(KERN_INFO " video mode parameter directory offset: 0x%06x\n", ent.data); break; default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent.type, ent.data); } } return 0; } /* Attempt to bypass the somewhat non-obvious arrangement of sResources in the motherboard ROM */ static void __init nubus_find_rom_dir(struct nubus_board* board) { unsigned char* rp; unsigned char* romdir; struct nubus_dir dir; struct nubus_dirent ent; /* Check for the extra directory just under the format block */ rp = board->fblock; nubus_rewind(&rp, 4, board->lanes); if (nubus_get_rom(&rp, 4, board->lanes) != NUBUS_TEST_PATTERN) { /* OK, the ROM was telling the truth */ board->directory = board->fblock; nubus_move(&board->directory, nubus_expand32(board->doffset), board->lanes); return; } /* On "slot zero", you have to walk down a few more directories to get to the equivalent of a real card's root directory. We don't know what they were smoking when they came up with this. */ romdir = nubus_rom_addr(board->slot); nubus_rewind(&romdir, ROM_DIR_OFFSET, board->lanes); dir.base = dir.ptr = romdir; dir.done = 0; dir.mask = board->lanes; /* This one points to an "Unknown Macintosh" directory */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_INFO "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* This one takes us to where we want to go. */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); nubus_get_subdir(&ent, &dir); /* Resource ID 01, also an "Unknown Macintosh" */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* FIXME: the first one is *not* always the right one. We suspect this has something to do with the ROM revision. "The HORROR ROM" (LC-series) uses 0x7e, while "The HORROR Continues" (Q630) uses 0x7b. The DAFB Macs evidently use something else. Please run "Slots" on your Mac (see include/linux/nubus.h for where to get this program) and tell us where the 'SiDirPtr' for Slot 0 is. If you feel brave, you should also use MacsBug to walk down the ROM directories like this function does and try to find the path to that address... */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* Bwahahahaha... */ nubus_get_subdir(&ent, &dir); board->directory = dir.base; return; /* Even more evil laughter... */ badrom: board->directory = board->fblock; nubus_move(&board->directory, nubus_expand32(board->doffset), board->lanes); printk(KERN_ERR "nubus_get_rom_dir: ROM weirdness! Notify the developers...\n"); } /* Add a board (might be many devices) to the list */ static struct nubus_board* __init nubus_add_board(int slot, int bytelanes) { struct nubus_board* board; struct nubus_board** boardp; unsigned char *rp; unsigned long dpat; struct nubus_dir dir; struct nubus_dirent ent; /* Move to the start of the format block */ rp = nubus_rom_addr(slot); nubus_rewind(&rp, FORMAT_BLOCK_SIZE, bytelanes); /* Actually we should probably panic if this fails */ if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL) return NULL; board->fblock = rp; /* Dump the format block for debugging purposes */ if (console_loglevel >= 10) { int i; printk(KERN_DEBUG "Slot %X, format block at 0x%p\n", slot, rp); printk(KERN_DEBUG "Format block: "); for (i = 0; i < FORMAT_BLOCK_SIZE; i += 4) { unsigned short foo, bar; foo = nubus_get_rom(&rp, 2, bytelanes); bar = nubus_get_rom(&rp, 2, bytelanes); printk("%04x %04x ", foo, bar); } printk("\n"); rp = board->fblock; } board->slot = slot; board->slot_addr = (unsigned long) nubus_slot_addr(slot); board->doffset = nubus_get_rom(&rp, 4, bytelanes); /* rom_length is *supposed* to be the total length of the * ROM. In practice it is the "amount of ROM used to compute * the CRC." So some jokers decide to set it to zero and * set the crc to zero so they don't have to do any math. * See the Performa 460 ROM, for example. Those Apple "engineers". */ board->rom_length = nubus_get_rom(&rp, 4, bytelanes); board->crc = nubus_get_rom(&rp, 4, bytelanes); board->rev = nubus_get_rom(&rp, 1, bytelanes); board->format = nubus_get_rom(&rp,1, bytelanes); board->lanes = bytelanes; /* Directory offset should be small and negative... */ if(!(board->doffset & 0x00FF0000)) printk(KERN_WARNING "Dodgy doffset!\n"); dpat = nubus_get_rom(&rp, 4, bytelanes); if(dpat != NUBUS_TEST_PATTERN) printk(KERN_WARNING "Wrong test pattern %08lx!\n", dpat); /* * I wonder how the CRC is meant to work - * any takers ? * CSA: According to MAC docs, not all cards pass the CRC anyway, * since the initial Macintosh ROM releases skipped the check. */ /* Attempt to work around slot zero weirdness */ nubus_find_rom_dir(board); nubus_get_root_dir(board, &dir); /* We're ready to rock */ printk(KERN_INFO "Slot %X:\n", slot); /* Each slot should have one board resource and any number of functional resources. So we'll fill in some fields in the struct nubus_board from the board resource, then walk down the list of functional resources, spinning out a nubus_dev for each of them. */ if (nubus_readdir(&dir, &ent) == -1) { /* We can't have this! */ printk(KERN_ERR "Board resource not found!\n"); return NULL; } else { printk(KERN_INFO " Board resource:\n"); nubus_get_board_resource(board, slot, &ent); } /* Aaaarrrrgghh! The LC III motherboard has *two* board resources. I have no idea WTF to do about this. */ while (nubus_readdir(&dir, &ent) != -1) { struct nubus_dev* dev; struct nubus_dev** devp; dev = nubus_get_functional_resource(board, slot, &ent); if (dev == NULL) continue; /* We zeroed this out above */ if (board->first_dev == NULL) board->first_dev = dev; /* Put it on the global NuBus device chain. Keep entries in order. */ for (devp=&nubus_devices; *devp!=NULL; devp=&((*devp)->next)) /* spin */; *devp = dev; dev->next = NULL; } /* Put it on the global NuBus board chain. Keep entries in order. */ for (boardp=&nubus_boards; *boardp!=NULL; boardp=&((*boardp)->next)) /* spin */; *boardp = board; board->next = NULL; return board; } void __init nubus_probe_slot(int slot) { unsigned char dp; unsigned char* rp; int i; rp = nubus_rom_addr(slot); for(i = 4; i; i--) { unsigned long flags; int card_present; rp--; local_irq_save(flags); card_present = hwreg_present(rp); local_irq_restore(flags); if (!card_present) continue; printk(KERN_DEBUG "Now probing slot %X at %p\n", slot, rp); dp = *rp; if(dp == 0) continue; /* The last byte of the format block consists of two nybbles which are "mirror images" of each other. These show us the valid bytelanes */ if ((((dp>>4) ^ dp) & 0x0F) != 0x0F) continue; /* Check that this value is actually *on* one of the bytelanes it claims are valid! */ if ((dp & 0x0F) >= (1<<i)) continue; /* Looks promising. Let's put it on the list. */ nubus_add_board(slot, dp); return; } } #if defined(CONFIG_PROC_FS) /* /proc/nubus stuff */ static int sprint_nubus_board(struct nubus_board* board, char* ptr, int len) { if(len < 100) return -1; sprintf(ptr, "Slot %X: %s\n", board->slot, board->name); return strlen(ptr); } static int nubus_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int nprinted, len, begin = 0; int size = PAGE_SIZE; struct nubus_board* board; len = sprintf(page, "Nubus devices found:\n"); /* Walk the list of NuBus boards */ for (board = nubus_boards; board != NULL; board = board->next) { nprinted = sprint_nubus_board(board, page + len, size - len); if (nprinted < 0) break; len += nprinted; if (len+begin < off) { begin += len; len = 0; } if (len+begin >= off+count) break; } if (len+begin < off) *eof = 1; off -= begin; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } #endif void __init nubus_scan_bus(void) { int slot; /* This might not work on your machine */ #ifdef I_WANT_TO_PROBE_SLOT_ZERO nubus_probe_slot(0); #endif for(slot = 9; slot < 15; slot++) { nubus_probe_slot(slot); } } static int __init nubus_init(void) { if (!MACH_IS_MAC) return 0; /* Initialize the NuBus interrupts */ if (oss_present) { oss_nubus_init(); } else { via_nubus_init(); } #ifdef TRY_TO_DODGE_WSOD /* Rogue Ethernet interrupts can kill the machine if we don't do this. Obviously this is bogus. Hopefully the local VIA gurus can fix the real cause of the problem. */ mdelay(1000); #endif /* And probe */ printk("NuBus: Scanning NuBus slots.\n"); nubus_devices = NULL; nubus_boards = NULL; nubus_scan_bus(); #ifdef CONFIG_PROC_FS create_proc_read_entry("nubus", 0, NULL, nubus_read_proc, NULL); nubus_proc_init(); #endif return 0; } subsys_initcall(nubus_init);
gpl-2.0
MattCrystal/HTCOneLinaro
sound/pcmcia/vx/vxp_mixer.c
14608
4301
/* * Driver for Digigram VXpocket soundcards * * VX-pocket mixer * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include "vxpocket.h" #define MIC_LEVEL_MIN 0 #define MIC_LEVEL_MAX 8 /* * mic level control (for VXPocket) */ static int vx_mic_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; ucontrol->value.integer.value[0] = chip->mic_level; return 0; } static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; unsigned int val = ucontrol->value.integer.value[0]; if (val > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->mic_level != ucontrol->value.integer.value[0]) { vx_set_mic_level(_chip, ucontrol->value.integer.value[0]); chip->mic_level = ucontrol->value.integer.value[0]; mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } static const DECLARE_TLV_DB_SCALE(db_scale_mic, -21, 3, 0); static struct snd_kcontrol_new vx_control_mic_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Mic Capture Volume", .info = vx_mic_level_info, .get = vx_mic_level_get, .put = vx_mic_level_put, .tlv = { .p = db_scale_mic }, }; /* * mic boost level control (for VXP440) */ #define vx_mic_boost_info snd_ctl_boolean_mono_info static int vx_mic_boost_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; ucontrol->value.integer.value[0] = chip->mic_level; return 0; } static int vx_mic_boost_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; int val = !!ucontrol->value.integer.value[0]; mutex_lock(&_chip->mixer_mutex); if (chip->mic_level != val) { vx_set_mic_boost(_chip, val); chip->mic_level = val; mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } static struct snd_kcontrol_new vx_control_mic_boost = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Boost", .info = vx_mic_boost_info, .get = vx_mic_boost_get, .put = vx_mic_boost_put, }; int vxp_add_mic_controls(struct vx_core *_chip) { struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip; int err; /* mute input levels */ chip->mic_level = 0; switch (_chip->type) { case VX_TYPE_VXPOCKET: vx_set_mic_level(_chip, 0); break; case VX_TYPE_VXP440: vx_set_mic_boost(_chip, 0); break; } /* mic level */ switch (_chip->type) { case VX_TYPE_VXPOCKET: if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip))) < 0) return err; break; case VX_TYPE_VXP440: if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_boost, chip))) < 0) return err; break; } return 0; }
gpl-2.0
JesusFreke/nook-linux-kernel
fs/9p/vfs_inode.c
17
25755
/* * linux/fs/9p/vfs_inode.c * * This file contains vfs inode ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/inet.h> #include <linux/namei.h> #include <linux/idr.h> #include <linux/sched.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" static const struct inode_operations v9fs_dir_inode_operations; static const struct inode_operations v9fs_dir_inode_operations_ext; static const struct inode_operations v9fs_file_inode_operations; static const struct inode_operations v9fs_symlink_inode_operations; /** * unixmode2p9mode - convert unix mode bits to plan 9 * @v9ses: v9fs session information * @mode: mode to convert * */ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode) { int res; res = mode & 0777; if (S_ISDIR(mode)) res |= P9_DMDIR; if (v9fs_extended(v9ses)) { if (S_ISLNK(mode)) res |= P9_DMSYMLINK; if (v9ses->nodev == 0) { if (S_ISSOCK(mode)) res |= P9_DMSOCKET; if (S_ISFIFO(mode)) res |= P9_DMNAMEDPIPE; if (S_ISBLK(mode)) res |= P9_DMDEVICE; if (S_ISCHR(mode)) res |= P9_DMDEVICE; } if ((mode & S_ISUID) == S_ISUID) res |= P9_DMSETUID; if ((mode & S_ISGID) == S_ISGID) res |= P9_DMSETGID; if ((mode & S_ISVTX) == S_ISVTX) res |= P9_DMSETVTX; if ((mode & P9_DMLINK)) res |= P9_DMLINK; } return res; } /** * p9mode2unixmode- convert plan9 mode bits to unix mode bits * @v9ses: v9fs session information * @mode: mode to convert * */ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) { int res; res = mode & 0777; if ((mode & P9_DMDIR) == P9_DMDIR) res |= S_IFDIR; else if ((mode & P9_DMSYMLINK) && (v9fs_extended(v9ses))) res |= S_IFLNK; else if ((mode & P9_DMSOCKET) && (v9fs_extended(v9ses)) && (v9ses->nodev == 0)) res |= S_IFSOCK; else if ((mode & P9_DMNAMEDPIPE) && (v9fs_extended(v9ses)) && (v9ses->nodev == 0)) res |= S_IFIFO; else if ((mode & P9_DMDEVICE) && (v9fs_extended(v9ses)) && (v9ses->nodev == 0)) res |= S_IFBLK; else res |= S_IFREG; if (v9fs_extended(v9ses)) { if ((mode & P9_DMSETUID) == P9_DMSETUID) res |= S_ISUID; if ((mode & P9_DMSETGID) == P9_DMSETGID) res |= S_ISGID; if ((mode & P9_DMSETVTX) == P9_DMSETVTX) res |= S_ISVTX; } return res; } /** * v9fs_uflags2omode- convert posix open flags to plan 9 mode bits * @uflags: flags to convert * @extended: if .u extensions are active */ int v9fs_uflags2omode(int uflags, int extended) { int ret; ret = 0; switch (uflags&3) { default: case O_RDONLY: ret = P9_OREAD; break; case O_WRONLY: ret = P9_OWRITE; break; case O_RDWR: ret = P9_ORDWR; break; } if (uflags & O_TRUNC) ret |= P9_OTRUNC; if (extended) { if (uflags & O_EXCL) ret |= P9_OEXCL; if (uflags & O_APPEND) ret |= P9_OAPPEND; } return ret; } /** * v9fs_blank_wstat - helper function to setup a 9P stat structure * @v9ses: 9P session info (for determining extended mode) * @wstat: structure to initialize * */ static void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; wstat->dev = ~0; wstat->qid.type = ~0; wstat->qid.version = ~0; *((long long *)&wstat->qid.path) = ~0; wstat->mode = ~0; wstat->atime = ~0; wstat->mtime = ~0; wstat->length = ~0; wstat->name = NULL; wstat->uid = NULL; wstat->gid = NULL; wstat->muid = NULL; wstat->n_uid = ~0; wstat->n_gid = ~0; wstat->n_muid = ~0; wstat->extension = NULL; } /** * v9fs_get_inode - helper function to setup an inode * @sb: superblock * @mode: mode to setup inode with * */ struct inode *v9fs_get_inode(struct super_block *sb, int mode) { struct inode *inode; struct v9fs_session_info *v9ses = sb->s_fs_info; P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode); inode = new_inode(sb); if (inode) { inode->i_mode = mode; inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; inode->i_blocks = 0; inode->i_rdev = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mapping->a_ops = &v9fs_addr_operations; switch (mode & S_IFMT) { case S_IFIFO: case S_IFBLK: case S_IFCHR: case S_IFSOCK: if (!v9fs_extended(v9ses)) { P9_DPRINTK(P9_DEBUG_ERROR, "special files without extended mode\n"); return ERR_PTR(-EINVAL); } init_special_inode(inode, inode->i_mode, inode->i_rdev); break; case S_IFREG: inode->i_op = &v9fs_file_inode_operations; inode->i_fop = &v9fs_file_operations; break; case S_IFLNK: if (!v9fs_extended(v9ses)) { P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used w/o 9P2000.u\n"); return ERR_PTR(-EINVAL); } inode->i_op = &v9fs_symlink_inode_operations; break; case S_IFDIR: inc_nlink(inode); if (v9fs_extended(v9ses)) inode->i_op = &v9fs_dir_inode_operations_ext; else inode->i_op = &v9fs_dir_inode_operations; inode->i_fop = &v9fs_dir_operations; break; default: P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n", mode, mode & S_IFMT); return ERR_PTR(-EINVAL); } } else { P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n"); return ERR_PTR(-ENOMEM); } return inode; } /* static struct v9fs_fid* v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) { int err; int nfid; struct v9fs_fid *ret; struct v9fs_fcall *fcall; nfid = v9fs_get_idpool(&v9ses->fidpool); if (nfid < 0) { eprintk(KERN_WARNING, "no free fids available\n"); return ERR_PTR(-ENOSPC); } err = v9fs_t_walk(v9ses, fid, nfid, (char *) dentry->d_name.name, &fcall); if (err < 0) { if (fcall && fcall->id == RWALK) goto clunk_fid; PRINT_FCALL_ERROR("walk error", fcall); v9fs_put_idpool(nfid, &v9ses->fidpool); goto error; } kfree(fcall); fcall = NULL; ret = v9fs_fid_create(v9ses, nfid); if (!ret) { err = -ENOMEM; goto clunk_fid; } err = v9fs_fid_insert(ret, dentry); if (err < 0) { v9fs_fid_destroy(ret); goto clunk_fid; } return ret; clunk_fid: v9fs_t_clunk(v9ses, nfid); error: kfree(fcall); return ERR_PTR(err); } */ /** * v9fs_inode_from_fid - populate an inode by issuing a attribute request * @v9ses: session information * @fid: fid to issue attribute request for * @sb: superblock on which to create inode * */ static struct inode * v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb) { int err, umode; struct inode *ret; struct p9_stat *st; ret = NULL; st = p9_client_stat(fid); if (IS_ERR(st)) { err = PTR_ERR(st); st = NULL; goto error; } umode = p9mode2unixmode(v9ses, st->mode); ret = v9fs_get_inode(sb, umode); if (IS_ERR(ret)) { err = PTR_ERR(ret); ret = NULL; goto error; } v9fs_stat2inode(st, ret, sb); ret->i_ino = v9fs_qid2ino(&st->qid); kfree(st); return ret; error: kfree(st); if (ret) iput(ret); return ERR_PTR(err); } /** * v9fs_remove - helper function to remove files and directories * @dir: directory inode that is being deleted * @file: dentry that is being deleted * @rmdir: removing a directory * */ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir) { struct inode *file_inode; struct v9fs_session_info *v9ses; struct p9_fid *v9fid; P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %d\n", dir, file, rmdir); file_inode = file->d_inode; v9ses = v9fs_inode2v9ses(file_inode); v9fid = v9fs_fid_clone(file); if (IS_ERR(v9fid)) return PTR_ERR(v9fid); return p9_client_remove(v9fid); } static int v9fs_open_created(struct inode *inode, struct file *file) { return 0; } /** * v9fs_create - Create a file * @v9ses: session information * @dir: directory that dentry is being created in * @dentry: dentry that is being created * @perm: create permissions * @mode: open mode * @extension: 9p2000.u extension string to support devices, etc. * */ static struct p9_fid * v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, struct dentry *dentry, char *extension, u32 perm, u8 mode) { int err; char *name; struct p9_fid *dfid, *ofid, *fid; struct inode *inode; err = 0; ofid = NULL; fid = NULL; name = (char *) dentry->d_name.name; dfid = v9fs_fid_clone(dentry->d_parent); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); dfid = NULL; goto error; } /* clone a fid to use for creation */ ofid = p9_client_walk(dfid, 0, NULL, 1); if (IS_ERR(ofid)) { err = PTR_ERR(ofid); ofid = NULL; goto error; } err = p9_client_fcreate(ofid, name, perm, mode, extension); if (err < 0) goto error; /* now walk from the parent so we can get unopened fid */ fid = p9_client_walk(dfid, 1, &name, 0); if (IS_ERR(fid)) { err = PTR_ERR(fid); fid = NULL; goto error; } else dfid = NULL; /* instantiate inode and assign the unopened fid to the dentry */ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } if (v9ses->cache) dentry->d_op = &v9fs_cached_dentry_operations; else dentry->d_op = &v9fs_dentry_operations; d_instantiate(dentry, inode); v9fs_fid_add(dentry, fid); return ofid; error: if (dfid) p9_client_clunk(dfid); if (ofid) p9_client_clunk(ofid); if (fid) p9_client_clunk(fid); return ERR_PTR(err); } /** * v9fs_vfs_create - VFS hook to create files * @dir: directory inode that is being created * @dentry: dentry that is being deleted * @mode: create permissions * @nd: path information * */ static int v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { int err; u32 perm; int flags; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct file *filp; err = 0; fid = NULL; v9ses = v9fs_inode2v9ses(dir); perm = unixmode2p9mode(v9ses, mode); if (nd && nd->flags & LOOKUP_OPEN) flags = nd->intent.open.flags - 1; else flags = O_RDWR; fid = v9fs_create(v9ses, dir, dentry, NULL, perm, v9fs_uflags2omode(flags, v9fs_extended(v9ses))); if (IS_ERR(fid)) { err = PTR_ERR(fid); fid = NULL; goto error; } /* if we are opening a file, assign the open fid to the file */ if (nd && nd->flags & LOOKUP_OPEN) { filp = lookup_instantiate_filp(nd, dentry, v9fs_open_created); if (IS_ERR(filp)) { err = PTR_ERR(filp); goto error; } filp->private_data = fid; } else p9_client_clunk(fid); return 0; error: if (fid) p9_client_clunk(fid); return err; } /** * v9fs_vfs_mkdir - VFS mkdir hook to create a directory * @dir: inode that is being unlinked * @dentry: dentry that is being unlinked * @mode: mode for new directory * */ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { int err; u32 perm; struct v9fs_session_info *v9ses; struct p9_fid *fid; P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); err = 0; v9ses = v9fs_inode2v9ses(dir); perm = unixmode2p9mode(v9ses, mode | S_IFDIR); fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_OREAD); if (IS_ERR(fid)) { err = PTR_ERR(fid); fid = NULL; } if (fid) p9_client_clunk(fid); return err; } /** * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode * @dir: inode that is being walked from * @dentry: dentry that is being walked to? * @nameidata: path data * */ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nameidata) { struct super_block *sb; struct v9fs_session_info *v9ses; struct p9_fid *dfid, *fid; struct inode *inode; char *name; int result = 0; P9_DPRINTK(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n", dir, dentry->d_name.name, dentry, nameidata); sb = dir->i_sb; v9ses = v9fs_inode2v9ses(dir); dfid = v9fs_fid_lookup(dentry->d_parent); if (IS_ERR(dfid)) return ERR_CAST(dfid); name = (char *) dentry->d_name.name; fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { result = PTR_ERR(fid); if (result == -ENOENT) { d_add(dentry, NULL); return NULL; } return ERR_PTR(result); } inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { result = PTR_ERR(inode); inode = NULL; goto error; } result = v9fs_fid_add(dentry, fid); if (result < 0) goto error; if ((fid->qid.version) && (v9ses->cache)) dentry->d_op = &v9fs_cached_dentry_operations; else dentry->d_op = &v9fs_dentry_operations; d_add(dentry, inode); return NULL; error: p9_client_clunk(fid); return ERR_PTR(result); } /** * v9fs_vfs_unlink - VFS unlink hook to delete an inode * @i: inode that is being unlinked * @d: dentry that is being unlinked * */ static int v9fs_vfs_unlink(struct inode *i, struct dentry *d) { return v9fs_remove(i, d, 0); } /** * v9fs_vfs_rmdir - VFS unlink hook to delete a directory * @i: inode that is being unlinked * @d: dentry that is being unlinked * */ static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) { return v9fs_remove(i, d, 1); } /** * v9fs_vfs_rename - VFS hook to rename an inode * @old_dir: old dir inode * @old_dentry: old dentry * @new_dir: new dir inode * @new_dentry: new dentry * */ static int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode; struct v9fs_session_info *v9ses; struct p9_fid *oldfid; struct p9_fid *olddirfid; struct p9_fid *newdirfid; struct p9_wstat wstat; int retval; P9_DPRINTK(P9_DEBUG_VFS, "\n"); retval = 0; old_inode = old_dentry->d_inode; v9ses = v9fs_inode2v9ses(old_inode); oldfid = v9fs_fid_lookup(old_dentry); if (IS_ERR(oldfid)) return PTR_ERR(oldfid); olddirfid = v9fs_fid_clone(old_dentry->d_parent); if (IS_ERR(olddirfid)) { retval = PTR_ERR(olddirfid); goto done; } newdirfid = v9fs_fid_clone(new_dentry->d_parent); if (IS_ERR(newdirfid)) { retval = PTR_ERR(newdirfid); goto clunk_olddir; } /* 9P can only handle file rename in the same directory */ if (memcmp(&olddirfid->qid, &newdirfid->qid, sizeof(newdirfid->qid))) { P9_DPRINTK(P9_DEBUG_ERROR, "old dir and new dir are different\n"); retval = -EXDEV; goto clunk_newdir; } v9fs_blank_wstat(&wstat); wstat.muid = v9ses->uname; wstat.name = (char *) new_dentry->d_name.name; retval = p9_client_wstat(oldfid, &wstat); clunk_newdir: p9_client_clunk(newdirfid); clunk_olddir: p9_client_clunk(olddirfid); done: return retval; } /** * v9fs_vfs_getattr - retrieve file metadata * @mnt: mount information * @dentry: file to get attributes on * @stat: metadata structure to populate * */ static int v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { int err; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_stat *st; P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); err = -EPERM; v9ses = v9fs_inode2v9ses(dentry->d_inode); if (v9ses->cache == CACHE_LOOSE) return simple_getattr(mnt, dentry, stat); fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); st = p9_client_stat(fid); if (IS_ERR(st)) return PTR_ERR(st); v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); generic_fillattr(dentry->d_inode, stat); kfree(st); return 0; } /** * v9fs_vfs_setattr - set file metadata * @dentry: file whose metadata to set * @iattr: metadata assignment structure * */ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) { int retval; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_wstat wstat; P9_DPRINTK(P9_DEBUG_VFS, "\n"); retval = -EPERM; v9ses = v9fs_inode2v9ses(dentry->d_inode); fid = v9fs_fid_lookup(dentry); if(IS_ERR(fid)) return PTR_ERR(fid); v9fs_blank_wstat(&wstat); if (iattr->ia_valid & ATTR_MODE) wstat.mode = unixmode2p9mode(v9ses, iattr->ia_mode); if (iattr->ia_valid & ATTR_MTIME) wstat.mtime = iattr->ia_mtime.tv_sec; if (iattr->ia_valid & ATTR_ATIME) wstat.atime = iattr->ia_atime.tv_sec; if (iattr->ia_valid & ATTR_SIZE) wstat.length = iattr->ia_size; if (v9fs_extended(v9ses)) { if (iattr->ia_valid & ATTR_UID) wstat.n_uid = iattr->ia_uid; if (iattr->ia_valid & ATTR_GID) wstat.n_gid = iattr->ia_gid; } retval = p9_client_wstat(fid, &wstat); if (retval >= 0) retval = inode_setattr(dentry->d_inode, iattr); return retval; } /** * v9fs_stat2inode - populate an inode structure with mistat info * @stat: Plan 9 metadata (mistat) structure * @inode: inode to populate * @sb: superblock of filesystem * */ void v9fs_stat2inode(struct p9_stat *stat, struct inode *inode, struct super_block *sb) { int n; char ext[32]; struct v9fs_session_info *v9ses = sb->s_fs_info; inode->i_nlink = 1; inode->i_atime.tv_sec = stat->atime; inode->i_mtime.tv_sec = stat->mtime; inode->i_ctime.tv_sec = stat->mtime; inode->i_uid = v9ses->dfltuid; inode->i_gid = v9ses->dfltgid; if (v9fs_extended(v9ses)) { inode->i_uid = stat->n_uid; inode->i_gid = stat->n_gid; } inode->i_mode = p9mode2unixmode(v9ses, stat->mode); if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) { char type = 0; int major = -1; int minor = -1; n = stat->extension.len; if (n > sizeof(ext)-1) n = sizeof(ext)-1; memmove(ext, stat->extension.str, n); ext[n] = 0; sscanf(ext, "%c %u %u", &type, &major, &minor); switch (type) { case 'c': inode->i_mode &= ~S_IFBLK; inode->i_mode |= S_IFCHR; break; case 'b': break; default: P9_DPRINTK(P9_DEBUG_ERROR, "Unknown special type %c (%.*s)\n", type, stat->extension.len, stat->extension.str); }; inode->i_rdev = MKDEV(major, minor); } else inode->i_rdev = 0; inode->i_size = stat->length; /* not real number of blocks, but 512 byte ones ... */ inode->i_blocks = (inode->i_size + 512 - 1) >> 9; } /** * v9fs_qid2ino - convert qid into inode number * @qid: qid to hash * * BUG: potential for inode number collisions? */ ino_t v9fs_qid2ino(struct p9_qid *qid) { u64 path = qid->path + 2; ino_t i = 0; if (sizeof(ino_t) == sizeof(path)) memcpy(&i, &path, sizeof(ino_t)); else i = (ino_t) (path ^ (path >> 32)); return i; } /** * v9fs_readlink - read a symlink's location (internal version) * @dentry: dentry for symlink * @buffer: buffer to load symlink location into * @buflen: length of buffer * */ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) { int retval; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_stat *st; P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name); retval = -EPERM; v9ses = v9fs_inode2v9ses(dentry->d_inode); fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); if (!v9fs_extended(v9ses)) return -EBADF; st = p9_client_stat(fid); if (IS_ERR(st)) return PTR_ERR(st); if (!(st->mode & P9_DMSYMLINK)) { retval = -EINVAL; goto done; } /* copy extension buffer into buffer */ if (st->extension.len < buflen) buflen = st->extension.len + 1; memmove(buffer, st->extension.str, buflen - 1); buffer[buflen-1] = 0; P9_DPRINTK(P9_DEBUG_VFS, "%s -> %.*s (%s)\n", dentry->d_name.name, st->extension.len, st->extension.str, buffer); retval = buflen; done: kfree(st); return retval; } /** * v9fs_vfs_readlink - read a symlink's location * @dentry: dentry for symlink * @buffer: buffer to load symlink location into * @buflen: length of buffer * */ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, int buflen) { int retval; int ret; char *link = __getname(); if (unlikely(!link)) return -ENOMEM; if (buflen > PATH_MAX) buflen = PATH_MAX; P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); retval = v9fs_readlink(dentry, link, buflen); if (retval > 0) { if ((ret = copy_to_user(buffer, link, retval)) != 0) { P9_DPRINTK(P9_DEBUG_ERROR, "problem copying to user: %d\n", ret); retval = ret; } } __putname(link); return retval; } /** * v9fs_vfs_follow_link - follow a symlink path * @dentry: dentry for symlink * @nd: nameidata * */ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) { int len = 0; char *link = __getname(); P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name); if (!link) link = ERR_PTR(-ENOMEM); else { len = v9fs_readlink(dentry, link, PATH_MAX); if (len < 0) { __putname(link); link = ERR_PTR(len); } else link[len] = 0; } nd_set_link(nd, link); return NULL; } /** * v9fs_vfs_put_link - release a symlink path * @dentry: dentry for symlink * @nd: nameidata * @p: unused * */ static void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, s); if (!IS_ERR(s)) __putname(s); } /** * v9fs_vfs_mkspecial - create a special file * @dir: inode to create special file in * @dentry: dentry to create * @mode: mode to create special file * @extension: 9p2000.u format extension string representing special file * */ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, int mode, const char *extension) { u32 perm; struct v9fs_session_info *v9ses; struct p9_fid *fid; v9ses = v9fs_inode2v9ses(dir); if (!v9fs_extended(v9ses)) { P9_DPRINTK(P9_DEBUG_ERROR, "not extended\n"); return -EPERM; } perm = unixmode2p9mode(v9ses, mode); fid = v9fs_create(v9ses, dir, dentry, (char *) extension, perm, P9_OREAD); if (IS_ERR(fid)) return PTR_ERR(fid); p9_client_clunk(fid); return 0; } /** * v9fs_vfs_symlink - helper function to create symlinks * @dir: directory inode containing symlink * @dentry: dentry for symlink * @symname: symlink data * * See Also: 9P2000.u RFC for more information * */ static int v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name, symname); return v9fs_vfs_mkspecial(dir, dentry, S_IFLNK, symname); } /** * v9fs_vfs_link - create a hardlink * @old_dentry: dentry for file to link to * @dir: inode destination for new link * @dentry: dentry for link * */ static int v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int retval; struct p9_fid *oldfid; char *name; P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name, old_dentry->d_name.name); oldfid = v9fs_fid_clone(old_dentry); if (IS_ERR(oldfid)) return PTR_ERR(oldfid); name = __getname(); if (unlikely(!name)) { retval = -ENOMEM; goto clunk_fid; } sprintf(name, "%d\n", oldfid->fid); retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name); __putname(name); clunk_fid: p9_client_clunk(oldfid); return retval; } /** * v9fs_vfs_mknod - create a special file * @dir: inode destination for new link * @dentry: dentry for file * @mode: mode for creation * @rdev: device associated with special file * */ static int v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { int retval; char *name; P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev)); if (!new_valid_dev(rdev)) return -EINVAL; name = __getname(); if (!name) return -ENOMEM; /* build extension */ if (S_ISBLK(mode)) sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev)); else if (S_ISCHR(mode)) sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev)); else if (S_ISFIFO(mode)) *name = 0; else { __putname(name); return -EINVAL; } retval = v9fs_vfs_mkspecial(dir, dentry, mode, name); __putname(name); return retval; } static const struct inode_operations v9fs_dir_inode_operations_ext = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, .symlink = v9fs_vfs_symlink, .link = v9fs_vfs_link, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, .readlink = v9fs_vfs_readlink, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; static const struct inode_operations v9fs_dir_inode_operations = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; static const struct inode_operations v9fs_file_inode_operations = { .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; static const struct inode_operations v9fs_symlink_inode_operations = { .readlink = v9fs_vfs_readlink, .follow_link = v9fs_vfs_follow_link, .put_link = v9fs_vfs_put_link, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, };
gpl-2.0
botioni/aml_linux_kernel
drivers/amlogic/bluetooth/bt-device.c
17
5394
/* * * arch/arm/mach-meson/bcm-bt.c * * Copyright (C) 2010 AMLOGIC, INC. * * License terms: GNU General Public License (GPL) version 2 * Platform machine definition. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/rfkill.h> #include <linux/bt-device.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> static struct early_suspend bt_early_suspend; #endif extern struct bt_dev_data bt_dev; void rfkill_switch_all(enum rfkill_type type, bool blocked); #if 0 static unsigned long bt_baud; extern int get_baud(int line); extern void set_baud(int line, unsigned long newbaud); static struct delayed_work btwork; static void bt_reset_workqueue(struct work_struct *work) { struct hci_dev *hdev; printk("bt reset\n"); if( hdev = hci_dev_get(0)){ hci_resume_dev(hdev); } } #endif static int bt_set_block(void *data, bool blocked) { pr_info("BT_RADIO going: %s\n", blocked ? "off" : "on"); if (!blocked) { pr_info("BCM_BT: going ON\n"); if (NULL != bt_dev.bt_dev_on) { bt_dev.bt_dev_on(); } } else { pr_info("BCM_BT: going OFF\n"); if (NULL != bt_dev.bt_dev_off) { bt_dev.bt_dev_off(); } } return 0; } static const struct rfkill_ops bt_rfkill_ops = { .set_block = bt_set_block, }; static int bt_earlysuspend(struct platform_device *pdev, pm_message_t state) { #if 0 struct hci_dev *hdev; pr_info("BCM_BT: going early suspend\n"); if( hdev = hci_dev_get(0)){ if (NULL != bt_dev.bt_dev_suspend) { bt_dev.bt_dev_suspend(); } } #endif return 0; } static int bt_lateresume(struct platform_device *pdev) { #if 0 struct hci_dev *hdev; pr_info("BCM_BT: going later resume\n"); if( hdev = hci_dev_get(0)){ if (NULL != bt_dev.bt_dev_resume) { bt_dev.bt_dev_resume(); } /* when call the hci_dev_open after hci_dev_close, the bt will be restart */ //hci_dev_open(0); } #endif return 0; } static int bt_suspend(struct platform_device *pdev, pm_message_t state) { #if 0 struct hci_dev *hdev; pr_info("BCM_BT: going suspend\n"); if( hdev = hci_dev_get(0)){ if (NULL != bt_dev.bt_dev_off) { bt_dev.bt_dev_off(); } /* if we do not power off bt , we should restore uart baud */ //bt_baud = get_baud(1); } #endif return 0; } static int bt_resume(struct platform_device *pdev) { #if 0 struct hci_dev *hdev; pr_info("BCM_BT: going resume\n"); if( hdev = hci_dev_get(0)){ if (NULL != bt_dev.bt_dev_on) { bt_dev.bt_dev_on(); } //set_baud(1, bt_baud); //hci_dev_close(0); schedule_delayed_work(&btwork, 100); } #endif return 0; } static int __init bt_probe(struct platform_device *pdev) { int rc = 0; struct rfkill *bt_rfk; #if 0 INIT_DELAYED_WORK(&btwork, bt_reset_workqueue); #endif /* default to bluetooth off */ //rfkill_switch_all(RFKILL_TYPE_BLUETOOTH, 1); if (NULL != bt_dev.bt_dev_off) { bt_dev.bt_dev_off(); } bt_rfk = rfkill_alloc("bt-dev", &pdev->dev, RFKILL_TYPE_BLUETOOTH, &bt_rfkill_ops, NULL); if (!bt_rfk) { printk("rfk alloc fail\n"); rc = -ENOMEM; goto err_rfk_alloc; } /* if not set false, the bt_set_block will call when rfkill class resume */ rfkill_init_sw_state(bt_rfk, false); //we want to reset bt when system resume rc = rfkill_register(bt_rfk); if (rc){ printk("rfkill_register fail\n"); goto err_rfkill; } platform_set_drvdata(pdev, bt_rfk); #ifdef CONFIG_HAS_EARLYSUSPEND bt_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; bt_early_suspend.suspend = bt_earlysuspend; bt_early_suspend.resume = bt_lateresume; bt_early_suspend.param = pdev; register_early_suspend(&bt_early_suspend); #endif return 0; err_rfkill: rfkill_destroy(bt_rfk); err_rfk_alloc: return rc; } static int bt_remove(struct platform_device *pdev) { struct rfkill *rfk = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (rfk) { rfkill_unregister(rfk); rfkill_destroy(rfk); } rfk = NULL; return 0; } static struct platform_driver bt_driver = { .driver = { .name = "bt-dev", }, .probe = bt_probe, .remove = bt_remove, .suspend = bt_suspend, .resume = bt_resume, }; static int __init bt_init(void) { printk("amlogic rfkill init\n"); if (NULL != bt_dev.bt_dev_init) { bt_dev.bt_dev_init(); } return platform_driver_register(&bt_driver); } static void __exit bt_exit(void) { platform_driver_unregister(&bt_driver); } module_init(bt_init); module_exit(bt_exit); MODULE_DESCRIPTION("bt rfkill"); MODULE_AUTHOR(""); MODULE_LICENSE("GPL");
gpl-2.0
IngenicSemiconductor/KERNEL-WARRIOR
drivers/net/wireless/rtl818x/rtl8188eu/os_dep/osdep_service.c
17
37111
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _OSDEP_SERVICE_C_ #include <drv_conf.h> #include <osdep_service.h> #include <drv_types.h> #include <recv_osdep.h> #ifdef PLATFORM_LINUX #include <linux/vmalloc.h> #endif #ifdef PLATFORM_FREEBSD #include <sys/malloc.h> #include <sys/time.h> #endif /* PLATFORM_FREEBSD */ #ifdef RTK_DMP_PLATFORM #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,12)) #include <linux/pageremap.h> #endif #endif #define RT_TAG '1178' #ifdef DBG_MEMORY_LEAK #ifdef PLATFORM_LINUX #include <asm/atomic.h> atomic_t _malloc_cnt = ATOMIC_INIT(0); atomic_t _malloc_size = ATOMIC_INIT(0); #endif #endif /* DBG_MEMORY_LEAK */ #if defined(PLATFORM_LINUX) /* * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE * @return: one of RTW_STATUS_CODE */ inline int RTW_STATUS_CODE(int error_code){ if(error_code >=0) return _SUCCESS; switch(error_code) { //case -ETIMEDOUT: // return RTW_STATUS_TIMEDOUT; default: return _FAIL; } } #else inline int RTW_STATUS_CODE(int error_code){ return error_code; } #endif u32 rtw_atoi(u8* s) { int num=0,flag=0; int i; for(i=0;i<=strlen(s);i++) { if(s[i] >= '0' && s[i] <= '9') num = num * 10 + s[i] -'0'; else if(s[0] == '-' && i==0) flag =1; else break; } if(flag == 1) num = num * -1; return(num); } inline u8* _rtw_vmalloc(u32 sz) { u8 *pbuf; #ifdef PLATFORM_LINUX pbuf = vmalloc(sz); #endif #ifdef PLATFORM_FREEBSD pbuf = malloc(sz,M_DEVBUF,M_NOWAIT); #endif #ifdef PLATFORM_WINDOWS NdisAllocateMemoryWithTag(&pbuf,sz, RT_TAG); #endif #ifdef DBG_MEMORY_LEAK #ifdef PLATFORM_LINUX if ( pbuf != NULL) { atomic_inc(&_malloc_cnt); atomic_add(sz, &_malloc_size); } #endif #endif /* DBG_MEMORY_LEAK */ return pbuf; } inline u8* _rtw_zvmalloc(u32 sz) { u8 *pbuf; #ifdef PLATFORM_LINUX pbuf = _rtw_vmalloc(sz); if (pbuf != NULL) memset(pbuf, 0, sz); #endif #ifdef PLATFORM_FREEBSD pbuf = malloc(sz,M_DEVBUF,M_ZERO|M_NOWAIT); #endif #ifdef PLATFORM_WINDOWS NdisAllocateMemoryWithTag(&pbuf,sz, RT_TAG); if (pbuf != NULL) NdisFillMemory(pbuf, sz, 0); #endif return pbuf; } inline void _rtw_vmfree(u8 *pbuf, u32 sz) { #ifdef PLATFORM_LINUX vfree(pbuf); #endif #ifdef PLATFORM_FREEBSD free(pbuf,M_DEVBUF); #endif #ifdef PLATFORM_WINDOWS NdisFreeMemory(pbuf,sz, 0); #endif #ifdef DBG_MEMORY_LEAK #ifdef PLATFORM_LINUX atomic_dec(&_malloc_cnt); atomic_sub(sz, &_malloc_size); #endif #endif /* DBG_MEMORY_LEAK */ } u8* _rtw_malloc(u32 sz) { u8 *pbuf=NULL; #ifdef PLATFORM_LINUX #ifdef RTK_DMP_PLATFORM if(sz > 0x4000) pbuf = (u8 *)dvr_malloc(sz); else #endif pbuf = kmalloc(sz,in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); #endif #ifdef PLATFORM_FREEBSD pbuf = malloc(sz,M_DEVBUF,M_NOWAIT); #endif #ifdef PLATFORM_WINDOWS NdisAllocateMemoryWithTag(&pbuf,sz, RT_TAG); #endif #ifdef DBG_MEMORY_LEAK #ifdef PLATFORM_LINUX if ( pbuf != NULL) { atomic_inc(&_malloc_cnt); atomic_add(sz, &_malloc_size); } #endif #endif /* DBG_MEMORY_LEAK */ return pbuf; } u8* _rtw_zmalloc(u32 sz) { #ifdef PLATFORM_FREEBSD return malloc(sz,M_DEVBUF,M_ZERO|M_NOWAIT); #else // PLATFORM_FREEBSD u8 *pbuf = _rtw_malloc(sz); if (pbuf != NULL) { #ifdef PLATFORM_LINUX memset(pbuf, 0, sz); #endif #ifdef PLATFORM_WINDOWS NdisFillMemory(pbuf, sz, 0); #endif } return pbuf; #endif // PLATFORM_FREEBSD } void _rtw_mfree(u8 *pbuf, u32 sz) { #ifdef PLATFORM_LINUX #ifdef RTK_DMP_PLATFORM if(sz > 0x4000) dvr_free(pbuf); else #endif kfree(pbuf); #endif #ifdef PLATFORM_FREEBSD free(pbuf,M_DEVBUF); #endif #ifdef PLATFORM_WINDOWS NdisFreeMemory(pbuf,sz, 0); #endif #ifdef DBG_MEMORY_LEAK #ifdef PLATFORM_LINUX atomic_dec(&_malloc_cnt); atomic_sub(sz, &_malloc_size); #endif #endif /* DBG_MEMORY_LEAK */ } #ifdef DBG_MEM_ALLOC struct rtw_dbg_mem_stat { ATOMIC_T vir_alloc; // the memory bytes we allocate now ATOMIC_T vir_peak; // the peak memory bytes we allocate ATOMIC_T vir_alloc_err; // the error times we fail to allocate memory ATOMIC_T phy_alloc; ATOMIC_T phy_peak; ATOMIC_T phy_alloc_err; ATOMIC_T tx_alloc; ATOMIC_T tx_peak; ATOMIC_T tx_alloc_err; ATOMIC_T rx_alloc; ATOMIC_T rx_peak; ATOMIC_T rx_alloc_err; } rtw_dbg_mem_stat; void rtw_dump_mem_stat (void) { int vir_alloc, vir_peak, vir_alloc_err, phy_alloc, phy_peak, phy_alloc_err; int tx_alloc, tx_peak, tx_alloc_err, rx_alloc, rx_peak, rx_alloc_err; vir_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.vir_alloc); vir_peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak); vir_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.vir_alloc_err); phy_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.phy_alloc); phy_peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak); phy_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.phy_alloc_err); tx_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.tx_alloc); tx_peak=ATOMIC_READ(&rtw_dbg_mem_stat.tx_peak); tx_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.tx_alloc_err); rx_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.rx_alloc); rx_peak=ATOMIC_READ(&rtw_dbg_mem_stat.rx_peak); rx_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.rx_alloc_err); DBG_871X( "vir_alloc:%d, vir_peak:%d, vir_alloc_err:%d\n" "phy_alloc:%d, phy_peak:%d, phy_alloc_err:%d\n" "tx_alloc:%d, tx_peak:%d, tx_alloc_err:%d\n" "rx_alloc:%d, rx_peak:%d, rx_alloc_err:%d\n" , vir_alloc, vir_peak, vir_alloc_err , phy_alloc, phy_peak, phy_alloc_err , tx_alloc, tx_peak, tx_alloc_err , rx_alloc, rx_peak, rx_alloc_err ); } void rtw_update_mem_stat(u8 flag, u32 sz) { static u32 update_time = 0; int peak, alloc; if(!update_time) { ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc,0); ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak,0); ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc_err,0); ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc,0); ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak,0); ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc_err,0); } switch(flag) { case MEM_STAT_VIR_ALLOC_SUCCESS: alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz); peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak); if (peak<alloc) ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak, alloc); break; case MEM_STAT_VIR_ALLOC_FAIL: ATOMIC_INC(&rtw_dbg_mem_stat.vir_alloc_err); break; case MEM_STAT_VIR_FREE: alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz); break; case MEM_STAT_PHY_ALLOC_SUCCESS: alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz); peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak); if (peak<alloc) ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak, alloc); break; case MEM_STAT_PHY_ALLOC_FAIL: ATOMIC_INC(&rtw_dbg_mem_stat.phy_alloc_err); break; case MEM_STAT_PHY_FREE: alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz); break; case MEM_STAT_TX_ALLOC_SUCCESS: alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.tx_alloc, sz); peak=ATOMIC_READ(&rtw_dbg_mem_stat.tx_peak); if (peak<alloc) ATOMIC_SET(&rtw_dbg_mem_stat.tx_peak, alloc); break; case MEM_STAT_TX_ALLOC_FAIL: ATOMIC_INC(&rtw_dbg_mem_stat.tx_alloc_err); break; case MEM_STAT_TX_FREE: alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.tx_alloc, sz); break; case MEM_STAT_RX_ALLOC_SUCCESS: alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.rx_alloc, sz); peak=ATOMIC_READ(&rtw_dbg_mem_stat.rx_peak); if (peak<alloc) ATOMIC_SET(&rtw_dbg_mem_stat.rx_peak, alloc); break; case MEM_STAT_RX_ALLOC_FAIL: ATOMIC_INC(&rtw_dbg_mem_stat.rx_alloc_err); break; case MEM_STAT_RX_FREE: alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.rx_alloc, sz); break; }; if (rtw_get_passing_time_ms(update_time) > 5000) { rtw_dump_mem_stat(); update_time=rtw_get_current_time(); } } inline u8* dbg_rtw_vmalloc(u32 sz, const char *func, int line) { u8 *p; //DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz)); p=_rtw_vmalloc((sz)); rtw_update_mem_stat( p ? MEM_STAT_VIR_ALLOC_SUCCESS : MEM_STAT_VIR_ALLOC_FAIL , sz ); return p; } inline u8* dbg_rtw_zvmalloc(u32 sz, const char *func, int line) { u8 *p; //DBG_871X("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz)); p=_rtw_zvmalloc((sz)); rtw_update_mem_stat( p ? MEM_STAT_VIR_ALLOC_SUCCESS : MEM_STAT_VIR_ALLOC_FAIL , sz ); return p; } inline void dbg_rtw_vmfree(u8 *pbuf, u32 sz, const char *func, int line) { //DBG_871X("DBG_MEM_ALLOC %s:%d %s(%p,%d)\n", func, line, __FUNCTION__, (pbuf), (sz)); _rtw_vmfree((pbuf), (sz)); rtw_update_mem_stat( MEM_STAT_VIR_FREE , sz ); } inline u8* dbg_rtw_malloc(u32 sz, const char *func, int line) { u8 *p; if((sz)>4096) DBG_871X("DBG_MEM_ALLOC !!!!!!!!!!!!!! %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz)); p=_rtw_malloc((sz)); rtw_update_mem_stat( p ? MEM_STAT_PHY_ALLOC_SUCCESS : MEM_STAT_PHY_ALLOC_FAIL , sz ); return p; } inline u8* dbg_rtw_zmalloc(u32 sz, const char *func, int line) { u8 *p; if((sz)>4096) DBG_871X("DBG_MEM_ALLOC !!!!!!!!!!!!!! %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz)); p = _rtw_zmalloc((sz)); rtw_update_mem_stat( p ? MEM_STAT_PHY_ALLOC_SUCCESS : MEM_STAT_PHY_ALLOC_FAIL , sz ); return p; } inline void dbg_rtw_mfree(u8 *pbuf, u32 sz, const char *func, int line) { if((sz)>4096) DBG_871X("DBG_MEM_ALLOC !!!!!!!!!!!!!! %s:%d %s(%p,%d)\n", func, line, __FUNCTION__, (pbuf), (sz)); _rtw_mfree((pbuf), (sz)); rtw_update_mem_stat( MEM_STAT_PHY_FREE , sz ); } #endif void* rtw_malloc2d(int h, int w, int size) { int j; void **a = (void **) rtw_zmalloc( h*sizeof(void *) + h*w*size ); if(a == NULL) { DBG_871X("%s: alloc memory fail!\n", __FUNCTION__); return NULL; } for( j=0; j<h; j++ ) a[j] = ((char *)(a+h)) + j*w*size; return a; } void rtw_mfree2d(void *pbuf, int h, int w, int size) { rtw_mfree((u8 *)pbuf, h*sizeof(void*) + w*h*size); } void _rtw_memcpy(void* dst, void* src, u32 sz) { #if defined (PLATFORM_LINUX)|| defined (PLATFORM_FREEBSD) memcpy(dst, src, sz); #endif #ifdef PLATFORM_WINDOWS NdisMoveMemory(dst, src, sz); #endif } int _rtw_memcmp(void *dst, void *src, u32 sz) { #if defined (PLATFORM_LINUX)|| defined (PLATFORM_FREEBSD) //under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 if (!(memcmp(dst, src, sz))) return _TRUE; else return _FALSE; #endif #ifdef PLATFORM_WINDOWS //under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 if (NdisEqualMemory (dst, src, sz)) return _TRUE; else return _FALSE; #endif } void _rtw_memset(void *pbuf, int c, u32 sz) { #if defined (PLATFORM_LINUX)|| defined (PLATFORM_FREEBSD) memset(pbuf, c, sz); #endif #ifdef PLATFORM_WINDOWS #if 0 NdisZeroMemory(pbuf, sz); if (c != 0) memset(pbuf, c, sz); #else NdisFillMemory(pbuf, sz, c); #endif #endif } #ifdef PLATFORM_FREEBSD static inline void __list_add(_list *pnew, _list *pprev, _list *pnext) { pnext->prev = pnew; pnew->next = pnext; pnew->prev = pprev; pprev->next = pnew; } //review again struct sk_buff * dev_alloc_skb(unsigned int size) { struct sk_buff *skb=NULL; u8 *data=NULL; //skb = (struct sk_buff *)_rtw_zmalloc(sizeof(struct sk_buff)); // for skb->len, etc. skb = (struct sk_buff *)_rtw_malloc(sizeof(struct sk_buff)); if(!skb) goto out; data = _rtw_malloc(size); if(!data) goto nodata; skb->head = (unsigned char*)data; skb->data = (unsigned char*)data; skb->tail = (unsigned char*)data; skb->end = (unsigned char*)data + size; skb->len = 0; //printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); out: return skb; nodata: _rtw_mfree((u8 *)skb, sizeof(struct sk_buff)); skb = NULL; goto out; } void dev_kfree_skb_any(struct sk_buff *skb) { //printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); if(skb->head) _rtw_mfree(skb->head, 0); //printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); if(skb) _rtw_mfree((u8 *)skb, 0); } struct sk_buff *skb_clone(const struct sk_buff *skb) { return NULL; } #endif void _rtw_init_listhead(_list *list) { #ifdef PLATFORM_LINUX INIT_LIST_HEAD(list); #endif #ifdef PLATFORM_FREEBSD list->next = list; list->prev = list; #endif #ifdef PLATFORM_WINDOWS NdisInitializeListHead(list); #endif } /* For the following list_xxx operations, caller must guarantee the atomic context. Otherwise, there will be racing condition. */ u32 rtw_is_list_empty(_list *phead) { #ifdef PLATFORM_LINUX if (list_empty(phead)) return _TRUE; else return _FALSE; #endif #ifdef PLATFORM_FREEBSD if (phead->next == phead) return _TRUE; else return _FALSE; #endif #ifdef PLATFORM_WINDOWS if (IsListEmpty(phead)) return _TRUE; else return _FALSE; #endif } void rtw_list_insert_head(_list *plist, _list *phead) { #ifdef PLATFORM_LINUX list_add(plist, phead); #endif #ifdef PLATFORM_FREEBSD __list_add(plist, phead, phead->next); #endif #ifdef PLATFORM_WINDOWS InsertHeadList(phead, plist); #endif } void rtw_list_insert_tail(_list *plist, _list *phead) { #ifdef PLATFORM_LINUX list_add_tail(plist, phead); #endif #ifdef PLATFORM_FREEBSD __list_add(plist, phead->prev, phead); #endif #ifdef PLATFORM_WINDOWS InsertTailList(phead, plist); #endif } /* Caller must check if the list is empty before calling rtw_list_delete */ void _rtw_init_sema(_sema *sema, int init_val) { #ifdef PLATFORM_LINUX sema_init(sema, init_val); #endif #ifdef PLATFORM_FREEBSD sema_init(sema, init_val, "rtw_drv"); #endif #ifdef PLATFORM_OS_XP KeInitializeSemaphore(sema, init_val, SEMA_UPBND); // count=0; #endif #ifdef PLATFORM_OS_CE if(*sema == NULL) *sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL); #endif } void _rtw_free_sema(_sema *sema) { #ifdef PLATFORM_FREEBSD sema_destroy(sema); #endif #ifdef PLATFORM_OS_CE CloseHandle(*sema); #endif } void _rtw_up_sema(_sema *sema) { #ifdef PLATFORM_LINUX up(sema); #endif #ifdef PLATFORM_FREEBSD sema_post(sema); #endif #ifdef PLATFORM_OS_XP KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1, FALSE ); #endif #ifdef PLATFORM_OS_CE ReleaseSemaphore(*sema, 1, NULL ); #endif } u32 _rtw_down_sema(_sema *sema) { #ifdef PLATFORM_LINUX if (down_interruptible(sema)) return _FAIL; else return _SUCCESS; #endif #ifdef PLATFORM_FREEBSD sema_wait(sema); return _SUCCESS; #endif #ifdef PLATFORM_OS_XP if(STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL)) return _SUCCESS; else return _FAIL; #endif #ifdef PLATFORM_OS_CE if(WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE )) return _SUCCESS; else return _FAIL; #endif } void _rtw_mutex_init(_mutex *pmutex) { #ifdef PLATFORM_LINUX #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) mutex_init(pmutex); #else init_MUTEX(pmutex); #endif #endif #ifdef PLATFORM_FREEBSD mtx_init(pmutex, "", NULL, MTX_DEF|MTX_RECURSE); #endif #ifdef PLATFORM_OS_XP KeInitializeMutex(pmutex, 0); #endif #ifdef PLATFORM_OS_CE *pmutex = CreateMutex( NULL, _FALSE, NULL); #endif } void _rtw_mutex_free(_mutex *pmutex); void _rtw_mutex_free(_mutex *pmutex) { #ifdef PLATFORM_LINUX #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) mutex_destroy(pmutex); #else #endif #ifdef PLATFORM_FREEBSD sema_destroy(pmutex); #endif #endif #ifdef PLATFORM_OS_XP #endif #ifdef PLATFORM_OS_CE #endif } void _rtw_spinlock_init(_lock *plock) { #ifdef PLATFORM_LINUX spin_lock_init(plock); #endif #ifdef PLATFORM_FREEBSD mtx_init(plock, "", NULL, MTX_DEF|MTX_RECURSE); #endif #ifdef PLATFORM_WINDOWS NdisAllocateSpinLock(plock); #endif } void _rtw_spinlock_free(_lock *plock) { #ifdef PLATFORM_FREEBSD mtx_destroy(plock); #endif #ifdef PLATFORM_WINDOWS NdisFreeSpinLock(plock); #endif } #ifdef PLATFORM_FREEBSD extern PADAPTER prtw_lock; void rtw_mtx_lock(_lock *plock){ if(prtw_lock){ mtx_lock(&prtw_lock->glock); } else{ printf("%s prtw_lock==NULL",__FUNCTION__); } } void rtw_mtx_unlock(_lock *plock){ if(prtw_lock){ mtx_unlock(&prtw_lock->glock); } else{ printf("%s prtw_lock==NULL",__FUNCTION__); } } #endif //PLATFORM_FREEBSD void _rtw_spinlock(_lock *plock) { #ifdef PLATFORM_LINUX spin_lock(plock); #endif #ifdef PLATFORM_FREEBSD mtx_lock(plock); #endif #ifdef PLATFORM_WINDOWS NdisAcquireSpinLock(plock); #endif } void _rtw_spinunlock(_lock *plock) { #ifdef PLATFORM_LINUX spin_unlock(plock); #endif #ifdef PLATFORM_FREEBSD mtx_unlock(plock); #endif #ifdef PLATFORM_WINDOWS NdisReleaseSpinLock(plock); #endif } void _rtw_spinlock_ex(_lock *plock) { #ifdef PLATFORM_LINUX spin_lock(plock); #endif #ifdef PLATFORM_FREEBSD mtx_lock(plock); #endif #ifdef PLATFORM_WINDOWS NdisDprAcquireSpinLock(plock); #endif } void _rtw_spinunlock_ex(_lock *plock) { #ifdef PLATFORM_LINUX spin_unlock(plock); #endif #ifdef PLATFORM_FREEBSD mtx_unlock(plock); #endif #ifdef PLATFORM_WINDOWS NdisDprReleaseSpinLock(plock); #endif } void _rtw_init_queue(_queue *pqueue) { _rtw_init_listhead(&(pqueue->queue)); _rtw_spinlock_init(&(pqueue->lock)); } u32 _rtw_queue_empty(_queue *pqueue) { return (rtw_is_list_empty(&(pqueue->queue))); } u32 rtw_end_of_queue_search(_list *head, _list *plist) { if (head == plist) return _TRUE; else return _FALSE; } u32 rtw_get_current_time(void) { #ifdef PLATFORM_LINUX return jiffies; #endif #ifdef PLATFORM_FREEBSD struct timeval tvp; getmicrotime(&tvp); return tvp.tv_sec; #endif #ifdef PLATFORM_WINDOWS LARGE_INTEGER SystemTime; NdisGetCurrentSystemTime(&SystemTime); return (u32)(SystemTime.LowPart);// count of 100-nanosecond intervals #endif } inline u32 rtw_systime_to_ms(u32 systime) { #ifdef PLATFORM_LINUX return systime * 1000 / HZ; #endif #ifdef PLATFORM_FREEBSD return systime * 1000; #endif #ifdef PLATFORM_WINDOWS return systime / 10000 ; #endif } inline u32 rtw_ms_to_systime(u32 ms) { #ifdef PLATFORM_LINUX return ms * HZ / 1000; #endif #ifdef PLATFORM_FREEBSD return ms /1000; #endif #ifdef PLATFORM_WINDOWS return ms / 10000 ; #endif } // the input parameter start use the same unit as returned by rtw_get_current_time inline s32 rtw_get_passing_time_ms(u32 start) { #ifdef PLATFORM_LINUX return rtw_systime_to_ms(jiffies-start); #endif #ifdef PLATFORM_FREEBSD return rtw_systime_to_ms(rtw_get_current_time()); #endif #ifdef PLATFORM_WINDOWS LARGE_INTEGER SystemTime; NdisGetCurrentSystemTime(&SystemTime); return rtw_systime_to_ms((u32)(SystemTime.LowPart) - start) ; #endif } inline s32 rtw_get_time_interval_ms(u32 start, u32 end) { #ifdef PLATFORM_LINUX return rtw_systime_to_ms(end-start); #endif #ifdef PLATFORM_FREEBSD return rtw_systime_to_ms(rtw_get_current_time()); #endif #ifdef PLATFORM_WINDOWS return rtw_systime_to_ms(end-start); #endif } void rtw_sleep_schedulable(int ms) { #ifdef PLATFORM_LINUX u32 delta; delta = (ms * HZ)/1000;//(ms) if (delta == 0) { delta = 1;// 1 ms } set_current_state(TASK_INTERRUPTIBLE); if (schedule_timeout(delta) != 0) { return ; } return; #endif #ifdef PLATFORM_FREEBSD DELAY(ms*1000); return ; #endif #ifdef PLATFORM_WINDOWS NdisMSleep(ms*1000); //(us)*1000=(ms) #endif } void rtw_msleep_os(int ms) { #ifdef PLATFORM_LINUX msleep((unsigned int)ms); #endif #ifdef PLATFORM_FREEBSD //Delay for delay microseconds DELAY(ms*1000); return ; #endif #ifdef PLATFORM_WINDOWS NdisMSleep(ms*1000); //(us)*1000=(ms) #endif } void rtw_usleep_os(int us) { #ifdef PLATFORM_LINUX // msleep((unsigned int)us); if ( 1 < (us/1000) ) msleep(1); else msleep( (us/1000) + 1); #endif #ifdef PLATFORM_FREEBSD //Delay for delay microseconds DELAY(us); return ; #endif #ifdef PLATFORM_WINDOWS NdisMSleep(us); //(us) #endif } #ifdef DBG_DELAY_OS void _rtw_mdelay_os(int ms, const char *func, const int line) { #if 0 if(ms>10) DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms); rtw_msleep_os(ms); return; #endif DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms); #if defined(PLATFORM_LINUX) mdelay((unsigned long)ms); #elif defined(PLATFORM_WINDOWS) NdisStallExecution(ms*1000); //(us)*1000=(ms) #endif } void _rtw_udelay_os(int us, const char *func, const int line) { #if 0 if(us > 1000) { DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, us); rtw_usleep_os(us); return; } #endif DBG_871X("%s:%d %s(%d)\n", func, line, __FUNCTION__, us); #if defined(PLATFORM_LINUX) udelay((unsigned long)us); #elif defined(PLATFORM_WINDOWS) NdisStallExecution(us); //(us) #endif } #else void rtw_mdelay_os(int ms) { #ifdef PLATFORM_LINUX mdelay((unsigned long)ms); #endif #ifdef PLATFORM_FREEBSD DELAY(ms*1000); return ; #endif #ifdef PLATFORM_WINDOWS NdisStallExecution(ms*1000); //(us)*1000=(ms) #endif } void rtw_udelay_os(int us) { #ifdef PLATFORM_LINUX udelay((unsigned long)us); #endif #ifdef PLATFORM_FREEBSD //Delay for delay microseconds DELAY(us); return ; #endif #ifdef PLATFORM_WINDOWS NdisStallExecution(us); //(us) #endif } #endif void rtw_yield_os() { #ifdef PLATFORM_LINUX yield(); #endif #ifdef PLATFORM_FREEBSD yield(); #endif #ifdef PLATFORM_WINDOWS SwitchToThread(); #endif } #define RTW_SUSPEND_LOCK_NAME "rtw_wifi" #ifdef CONFIG_WAKELOCK static struct wake_lock rtw_suspend_lock; #elif defined(CONFIG_ANDROID_POWER) static android_suspend_lock_t rtw_suspend_lock ={ .name = RTW_SUSPEND_LOCK_NAME }; #endif inline void rtw_suspend_lock_init() { #ifdef CONFIG_WAKELOCK wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME); #elif defined(CONFIG_ANDROID_POWER) android_init_suspend_lock(&rtw_suspend_lock); #endif } inline void rtw_suspend_lock_uninit() { #ifdef CONFIG_WAKELOCK wake_lock_destroy(&rtw_suspend_lock); #elif defined(CONFIG_ANDROID_POWER) android_uninit_suspend_lock(&rtw_suspend_lock); #endif } inline void rtw_lock_suspend() { #ifdef CONFIG_WAKELOCK wake_lock(&rtw_suspend_lock); #elif defined(CONFIG_ANDROID_POWER) android_lock_suspend(&rtw_suspend_lock); #endif #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER) //DBG_871X("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); #endif } inline void rtw_unlock_suspend() { #ifdef CONFIG_WAKELOCK wake_unlock(&rtw_suspend_lock); #elif defined(CONFIG_ANDROID_POWER) android_unlock_suspend(&rtw_suspend_lock); #endif #if defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER) //DBG_871X("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); #endif } #ifdef CONFIG_WOWLAN inline void rtw_lock_suspend_timeout(long timeout) { #ifdef CONFIG_WAKELOCK wake_lock_timeout(&rtw_suspend_lock, timeout); #elif defined(CONFIG_ANDROID_POWER) android_lock_suspend_auto_expire(&rtw_suspend_lock, timeout); #endif } #endif //CONFIG_WOWLAN inline void ATOMIC_SET(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX atomic_set(v,i); #elif defined(PLATFORM_WINDOWS) *v=i;// other choice???? #elif defined(PLATFORM_FREEBSD) atomic_set_int(v,i); #endif } inline int ATOMIC_READ(ATOMIC_T *v) { #ifdef PLATFORM_LINUX return atomic_read(v); #elif defined(PLATFORM_WINDOWS) return *v; // other choice???? #elif defined(PLATFORM_FREEBSD) return atomic_load_acq_32(v); #endif } inline void ATOMIC_ADD(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX atomic_add(i,v); #elif defined(PLATFORM_WINDOWS) InterlockedAdd(v,i); #elif defined(PLATFORM_FREEBSD) atomic_add_int(v,i); #endif } inline void ATOMIC_SUB(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX atomic_sub(i,v); #elif defined(PLATFORM_WINDOWS) InterlockedAdd(v,-i); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,i); #endif } inline void ATOMIC_INC(ATOMIC_T *v) { #ifdef PLATFORM_LINUX atomic_inc(v); #elif defined(PLATFORM_WINDOWS) InterlockedIncrement(v); #elif defined(PLATFORM_FREEBSD) atomic_add_int(v,1); #endif } inline void ATOMIC_DEC(ATOMIC_T *v) { #ifdef PLATFORM_LINUX atomic_dec(v); #elif defined(PLATFORM_WINDOWS) InterlockedDecrement(v); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,1); #endif } inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX return atomic_add_return(i,v); #elif defined(PLATFORM_WINDOWS) return InterlockedAdd(v,i); #elif defined(PLATFORM_FREEBSD) atomic_add_int(v,i); return atomic_load_acq_32(v); #endif } inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX return atomic_sub_return(i,v); #elif defined(PLATFORM_WINDOWS) return InterlockedAdd(v,-i); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,i); return atomic_load_acq_32(v); #endif } inline int ATOMIC_INC_RETURN(ATOMIC_T *v) { #ifdef PLATFORM_LINUX return atomic_inc_return(v); #elif defined(PLATFORM_WINDOWS) return InterlockedIncrement(v); #elif defined(PLATFORM_FREEBSD) atomic_add_int(v,1); return atomic_load_acq_32(v); #endif } inline int ATOMIC_DEC_RETURN(ATOMIC_T *v) { #ifdef PLATFORM_LINUX return atomic_dec_return(v); #elif defined(PLATFORM_WINDOWS) return InterlockedDecrement(v); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,1); return atomic_load_acq_32(v); #endif } #ifdef PLATFORM_LINUX /* * Open a file with the specific @param path, @param flag, @param mode * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success * @param path the path of the file to open * @param flag file operation flags, please refer to linux document * @param mode please refer to linux document * @return Linux specific error code */ static int openFile(struct file **fpp, char *path, int flag, int mode) { struct file *fp; fp=filp_open(path, flag, mode); if(IS_ERR(fp)) { *fpp=NULL; return PTR_ERR(fp); } else { *fpp=fp; return 0; } } /* * Close the file with the specific @param fp * @param fp the pointer of struct file to close * @return always 0 */ static int closeFile(struct file *fp) { filp_close(fp,NULL); return 0; } static int readFile(struct file *fp,char *buf,int len) { int rlen=0, sum=0; if (!fp->f_op || !fp->f_op->read) return -EPERM; while(sum<len) { rlen=fp->f_op->read(fp,buf+sum,len-sum, &fp->f_pos); if(rlen>0) sum+=rlen; else if(0 != rlen) return rlen; else break; } return sum; } static int writeFile(struct file *fp,char *buf,int len) { int wlen=0, sum=0; if (!fp->f_op || !fp->f_op->write) return -EPERM; while(sum<len) { wlen=fp->f_op->write(fp,buf+sum,len-sum, &fp->f_pos); if(wlen>0) sum+=wlen; else if(0 != wlen) return wlen; else break; } return sum; } /* * Test if the specifi @param path is a file and readable * @param path the path of the file to test * @return Linux specific error code */ static int isFileReadable(char *path) { struct file *fp; int ret = 0; mm_segment_t oldfs; char buf; fp=filp_open(path, O_RDONLY, 0); if(IS_ERR(fp)) { ret = PTR_ERR(fp); } else { oldfs = get_fs(); set_fs(get_ds()); if(1!=readFile(fp, &buf, 1)) ret = PTR_ERR(fp); set_fs(oldfs); filp_close(fp,NULL); } return ret; } /* * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most * @param path the path of the file to open and read * @param buf the starting address of the buffer to store file content * @param sz how many bytes to read at most * @return the byte we've read, or Linux specific error code */ static int retriveFromFile(char *path, u8* buf, u32 sz) { int ret =-1; mm_segment_t oldfs; struct file *fp; if(path && buf) { if( 0 == (ret=openFile(&fp,path, O_RDONLY, 0)) ){ DBG_871X("%s openFile path:%s fp=%p\n",__FUNCTION__, path ,fp); oldfs = get_fs(); set_fs(get_ds()); ret=readFile(fp, buf, sz); set_fs(oldfs); closeFile(fp); DBG_871X("%s readFile, ret:%d\n",__FUNCTION__, ret); } else { DBG_871X("%s openFile path:%s Fail, ret:%d\n",__FUNCTION__, path, ret); } } else { DBG_871X("%s NULL pointer\n",__FUNCTION__); ret = -EINVAL; } return ret; } /* * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file * @param path the path of the file to open and write * @param buf the starting address of the data to write into file * @param sz how many bytes to write at most * @return the byte we've written, or Linux specific error code */ static int storeToFile(char *path, u8* buf, u32 sz) { int ret =0; mm_segment_t oldfs; struct file *fp; if(path && buf) { if( 0 == (ret=openFile(&fp, path, O_CREAT|O_WRONLY, 0666)) ) { DBG_871X("%s openFile path:%s fp=%p\n",__FUNCTION__, path ,fp); oldfs = get_fs(); set_fs(get_ds()); ret=writeFile(fp, buf, sz); set_fs(oldfs); closeFile(fp); DBG_871X("%s writeFile, ret:%d\n",__FUNCTION__, ret); } else { DBG_871X("%s openFile path:%s Fail, ret:%d\n",__FUNCTION__, path, ret); } } else { DBG_871X("%s NULL pointer\n",__FUNCTION__); ret = -EINVAL; } return ret; } #endif //PLATFORM_LINUX /* * Test if the specifi @param path is a file and readable * @param path the path of the file to test * @return _TRUE or _FALSE */ int rtw_is_file_readable(char *path) { #ifdef PLATFORM_LINUX if(isFileReadable(path) == 0) return _TRUE; else return _FALSE; #else //Todo... return _FALSE; #endif } /* * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most * @param path the path of the file to open and read * @param buf the starting address of the buffer to store file content * @param sz how many bytes to read at most * @return the byte we've read */ int rtw_retrive_from_file(char *path, u8* buf, u32 sz) { #ifdef PLATFORM_LINUX int ret =retriveFromFile(path, buf, sz); return ret>=0?ret:0; #else //Todo... return 0; #endif } /* * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file * @param path the path of the file to open and write * @param buf the starting address of the data to write into file * @param sz how many bytes to write at most * @return the byte we've written */ int rtw_store_to_file(char *path, u8* buf, u32 sz) { #ifdef PLATFORM_LINUX int ret =storeToFile(path, buf, sz); return ret>=0?ret:0; #else //Todo... return 0; #endif } #if 1 //#ifdef MEM_ALLOC_REFINE_ADAPTOR #ifdef PLATFORM_LINUX struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv) { struct net_device *pnetdev; struct rtw_netdev_priv_indicator *pnpi; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4); #else pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator)); #endif if (!pnetdev) goto RETURN; pnpi = netdev_priv(pnetdev); pnpi->priv=old_priv; pnpi->sizeof_priv=sizeof_priv; RETURN: return pnetdev; } struct net_device *rtw_alloc_etherdev(int sizeof_priv) { struct net_device *pnetdev; struct rtw_netdev_priv_indicator *pnpi; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4); #else pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator)); #endif if (!pnetdev) goto RETURN; pnpi = netdev_priv(pnetdev); pnpi->priv = rtw_zvmalloc(sizeof_priv); if (!pnpi->priv) { free_netdev(pnetdev); pnetdev = NULL; goto RETURN; } pnpi->sizeof_priv=sizeof_priv; RETURN: return pnetdev; } void rtw_free_netdev(struct net_device * netdev) { struct rtw_netdev_priv_indicator *pnpi; if(!netdev) goto RETURN; pnpi = netdev_priv(netdev); if(!pnpi->priv) goto RETURN; rtw_vmfree(pnpi->priv, pnpi->sizeof_priv); free_netdev(netdev); RETURN: return; } /* * Jeff: this function should be called under ioctl (rtnl_lock is accquired) while * LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) */ int rtw_change_ifname(_adapter *padapter, const char *ifname) { struct net_device *pnetdev; struct net_device *cur_pnetdev = padapter->pnetdev; struct rereg_nd_name_data *rereg_priv; int ret; if(!padapter) goto error; rereg_priv = &padapter->rereg_nd_name_priv; //free the old_pnetdev if(rereg_priv->old_pnetdev) { free_netdev(rereg_priv->old_pnetdev); rereg_priv->old_pnetdev = NULL; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) if(!rtnl_is_locked()) unregister_netdev(cur_pnetdev); else #endif unregister_netdevice(cur_pnetdev); rtw_proc_remove_one(cur_pnetdev); rereg_priv->old_pnetdev=cur_pnetdev; pnetdev = rtw_init_netdev(padapter); if (!pnetdev) { ret = -1; goto error; } SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter))); rtw_init_netdev_name(pnetdev, ifname); _rtw_memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) if(!rtnl_is_locked()) ret = register_netdev(pnetdev); else #endif ret = register_netdevice(pnetdev); if ( ret != 0) { RT_TRACE(_module_hci_intfs_c_,_drv_err_,("register_netdev() failed\n")); goto error; } rtw_proc_init_one(pnetdev); return 0; error: return -1; } #endif #endif //MEM_ALLOC_REFINE_ADAPTOR #ifdef PLATFORM_FREEBSD /* * Copy a buffer from userspace and write into kernel address * space. * * This emulation just calls the FreeBSD copyin function (to * copy data from user space buffer into a kernel space buffer) * and is designed to be used with the above io_write_wrapper. * * This function should return the number of bytes not copied. * I.e. success results in a zero value. * Negative error values are not returned. */ unsigned long copy_from_user(void *to, const void *from, unsigned long n) { if ( copyin(from, to, n) != 0 ) { /* Any errors will be treated as a failure to copy any of the requested bytes */ return n; } return 0; } unsigned long copy_to_user(void *to, const void *from, unsigned long n) { if ( copyout(from, to, n) != 0 ) { /* Any errors will be treated as a failure to copy any of the requested bytes */ return n; } return 0; } /* * The usb_register and usb_deregister functions are used to register * usb drivers with the usb subsystem. In this compatibility layer * emulation a list of drivers (struct usb_driver) is maintained * and is used for probing/attaching etc. * * usb_register and usb_deregister simply call these functions. */ int usb_register(struct usb_driver *driver) { rtw_usb_linux_register(driver); return 0; } int usb_deregister(struct usb_driver *driver) { rtw_usb_linux_deregister(driver); return 0; } void module_init_exit_wrapper(void *arg) { int (*func)(void) = arg; func(); return; } #endif //PLATFORM_FREEBSD #ifdef CONFIG_PLATFORM_SPRD #ifdef do_div #undef do_div #endif #include <asm-generic/div64.h> #endif u64 rtw_modular64(u64 x, u64 y) { #ifdef PLATFORM_LINUX return do_div(x, y); #elif defined(PLATFORM_WINDOWS) return (x % y); #elif defined(PLATFORM_FREEBSD) return (x %y); #endif } u64 rtw_division64(u64 x, u64 y) { #ifdef PLATFORM_LINUX do_div(x, y); return x; #elif defined(PLATFORM_WINDOWS) return (x / y); #elif defined(PLATFORM_FREEBSD) return (x / y); #endif } void rtw_buf_free(u8 **buf, u32 *buf_len) { u32 ori_len; if (!buf || !buf_len) return; ori_len = *buf_len; if (*buf) { *buf_len = 0; _rtw_mfree(*buf, *buf_len); *buf = NULL; } } void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len) { u32 ori_len = 0, dup_len = 0; u8 *ori = NULL; u8 *dup = NULL; if (!buf || !buf_len) return; if (!src || !src_len) goto keep_ori; /* duplicate src */ dup = rtw_malloc(src_len); if (dup) { dup_len = src_len; _rtw_memcpy(dup, src, dup_len); } keep_ori: ori = *buf; ori_len = *buf_len; /* replace buf with dup */ *buf_len = 0; *buf = dup; *buf_len = dup_len; /* free ori */ if (ori && ori_len > 0) _rtw_mfree(ori, ori_len); }
gpl-2.0
neuesleben123/rt-thread
bsp/efm32/Libraries/CMSIS/DSP_Lib/Source/StatisticsFunctions/arm_std_q15.c
17
6762
/* ---------------------------------------------------------------------- * Copyright (C) 2010 ARM Limited. All rights reserved. * * $Date: 15. February 2012 * $Revision: V1.1.0 * * Project: CMSIS DSP Library * Title: arm_std_q15.c * * Description: Standard deviation of an array of Q15 type. * * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 * * Version 1.1.0 2012/02/15 * Updated with more optimizations, bug fixes and minor API changes. * * Version 1.0.10 2011/7/15 * Big Endian support added and Merged M0 and M3/M4 Source code. * * Version 1.0.3 2010/11/29 * Re-organized the CMSIS folders and updated documentation. * * Version 1.0.2 2010/11/11 * Documentation updated. * * Version 1.0.1 2010/10/05 * Production release and review comments incorporated. * * Version 1.0.0 2010/09/20 * Production release and review comments incorporated. * -------------------------------------------------------------------- */ #include "arm_math.h" /** * @ingroup groupStats */ /** * @addtogroup STD * @{ */ /** * @brief Standard deviation of the elements of a Q15 vector. * @param[in] *pSrc points to the input vector * @param[in] blockSize length of the input vector * @param[out] *pResult standard deviation value returned here * @return none. * * @details * <b>Scaling and Overflow Behavior:</b> * * \par * The function is implemented using a 64-bit internal accumulator. * The input is represented in 1.15 format. * Intermediate multiplication yields a 2.30 format, and this * result is added without saturation to a 64-bit accumulator in 34.30 format. * With 33 guard bits in the accumulator, there is no risk of overflow, and the * full precision of the intermediate multiplication is preserved. * Finally, the 34.30 result is truncated to 34.15 format by discarding the lower * 15 bits, and then saturated to yield a result in 1.15 format. */ void arm_std_q15( q15_t * pSrc, uint32_t blockSize, q15_t * pResult) { q31_t sum = 0; /* Accumulator */ q31_t meanOfSquares, squareOfMean; /* square of mean and mean of square */ q15_t mean; /* mean */ uint32_t blkCnt; /* loop counter */ q15_t t; /* Temporary variable */ q63_t sumOfSquares = 0; /* Accumulator */ #ifndef ARM_MATH_CM0 /* Run the below code for Cortex-M4 and Cortex-M3 */ q31_t in; /* input value */ q15_t in1; /* input value */ /*loop Unrolling */ blkCnt = blockSize >> 2u; /* First part of the processing with loop unrolling. Compute 4 outputs at a time. ** a second loop below computes the remaining 1 to 3 samples. */ while(blkCnt > 0u) { /* C = (A[0] * A[0] + A[1] * A[1] + ... + A[blockSize-1] * A[blockSize-1]) */ /* Compute Sum of squares of the input samples * and then store the result in a temporary variable, sum. */ in = *__SIMD32(pSrc)++; sum += ((in << 16) >> 16); sum += (in >> 16); sumOfSquares = __SMLALD(in, in, sumOfSquares); in = *__SIMD32(pSrc)++; sum += ((in << 16) >> 16); sum += (in >> 16); sumOfSquares = __SMLALD(in, in, sumOfSquares); /* Decrement the loop counter */ blkCnt--; } /* If the blockSize is not a multiple of 4, compute any remaining output samples here. ** No loop unrolling is used. */ blkCnt = blockSize % 0x4u; while(blkCnt > 0u) { /* C = (A[0] * A[0] + A[1] * A[1] + ... + A[blockSize-1] * A[blockSize-1]) */ /* Compute Sum of squares of the input samples * and then store the result in a temporary variable, sum. */ in1 = *pSrc++; sumOfSquares = __SMLALD(in1, in1, sumOfSquares); sum += in1; /* Decrement the loop counter */ blkCnt--; } /* Compute Mean of squares of the input samples * and then store the result in a temporary variable, meanOfSquares. */ t = (q15_t) ((1.0 / (blockSize - 1)) * 16384LL); sumOfSquares = __SSAT((sumOfSquares >> 15u), 16u); meanOfSquares = (q31_t) ((sumOfSquares * t) >> 14u); /* Compute mean of all input values */ t = (q15_t) ((1.0 / (blockSize * (blockSize - 1))) * 32768LL); mean = (q15_t) __SSAT(sum, 16u); /* Compute square of mean */ squareOfMean = ((q31_t) mean * mean) >> 15; squareOfMean = (q31_t) (((q63_t) squareOfMean * t) >> 15); /* mean of the squares minus the square of the mean. */ in1 = (q15_t) (meanOfSquares - squareOfMean); /* Compute standard deviation and store the result to the destination */ arm_sqrt_q15(in1, pResult); #else /* Run the below code for Cortex-M0 */ q15_t in; /* input value */ /* Loop over blockSize number of values */ blkCnt = blockSize; while(blkCnt > 0u) { /* C = (A[0] * A[0] + A[1] * A[1] + ... + A[blockSize-1] * A[blockSize-1]) */ /* Compute Sum of squares of the input samples * and then store the result in a temporary variable, sumOfSquares. */ in = *pSrc++; sumOfSquares += (in * in); /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ /* Compute sum of all input values and then store the result in a temporary variable, sum. */ sum += in; /* Decrement the loop counter */ blkCnt--; } /* Compute Mean of squares of the input samples * and then store the result in a temporary variable, meanOfSquares. */ t = (q15_t) ((1.0 / (blockSize - 1)) * 16384LL); sumOfSquares = __SSAT((sumOfSquares >> 15u), 16u); meanOfSquares = (q31_t) ((sumOfSquares * t) >> 14u); /* Compute mean of all input values */ mean = (q15_t) __SSAT(sum, 16u); /* Compute square of mean of the input samples * and then store the result in a temporary variable, squareOfMean.*/ t = (q15_t) ((1.0 / (blockSize * (blockSize - 1))) * 32768LL); squareOfMean = ((q31_t) mean * mean) >> 15; squareOfMean = (q31_t) (((q63_t) squareOfMean * t) >> 15); /* mean of the squares minus the square of the mean. */ in = (q15_t) (meanOfSquares - squareOfMean); /* Compute standard deviation and store the result to the destination */ arm_sqrt_q15(in, pResult); #endif /* #ifndef ARM_MATH_CM0 */ } /** * @} end of STD group */
gpl-2.0
rickyzhang82/linux-odroid-u2
drivers/media/video/samsung/fimc/fimc_capture.c
17
68804
/* linux/drivers/media/video/samsung/fimc_capture.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * V4L2 Capture device support file for Samsung Camera Interface (FIMC) driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/videodev2_samsung.h> #include <linux/clk.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/uaccess.h> #include <plat/media.h> #include <plat/clock.h> #include <plat/fimc.h> #include <linux/delay.h> #include <asm/cacheflush.h> #include "fimc.h" static const struct v4l2_fmtdesc capture_fmts[] = { { .index = 0, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PACKED, .description = "RGB-5-6-5", .pixelformat = V4L2_PIX_FMT_RGB565, }, { .index = 1, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PACKED, .description = "RGB-8-8-8, unpacked 24 bpp", .pixelformat = V4L2_PIX_FMT_RGB32, }, { .index = 2, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PACKED, .description = "YUV 4:2:2 packed, YCbYCr", .pixelformat = V4L2_PIX_FMT_YUYV, }, { .index = 3, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PACKED, .description = "YUV 4:2:2 packed, CbYCrY", .pixelformat = V4L2_PIX_FMT_UYVY, }, { .index = 4, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PACKED, .description = "YUV 4:2:2 packed, CrYCbY", .pixelformat = V4L2_PIX_FMT_VYUY, }, { .index = 5, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PACKED, .description = "YUV 4:2:2 packed, YCrYCb", .pixelformat = V4L2_PIX_FMT_YVYU, }, { .index = 6, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:2 planar, Y/Cb/Cr", .pixelformat = V4L2_PIX_FMT_YUV422P, }, { .index = 7, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:0 planar, Y/CbCr", .pixelformat = V4L2_PIX_FMT_NV12, }, { .index = 8, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:0 planar, Y/CbCr, Tiled", .pixelformat = V4L2_PIX_FMT_NV12T, }, { .index = 9, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:0 planar, Y/CrCb", .pixelformat = V4L2_PIX_FMT_NV21, }, { .index = 10, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:2 planar, Y/CbCr", .pixelformat = V4L2_PIX_FMT_NV16, }, { .index = 11, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:2 planar, Y/CrCb", .pixelformat = V4L2_PIX_FMT_NV61, }, { .index = 12, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:0 planar, Y/Cb/Cr", .pixelformat = V4L2_PIX_FMT_YUV420, }, { .index = 13, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = FORMAT_FLAGS_PLANAR, .description = "YUV 4:2:0 planar, Y/Cr/Cb", .pixelformat = V4L2_PIX_FMT_YVU420, }, { .index = 14, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .description = "JPEG encoded data", .pixelformat = V4L2_PIX_FMT_JPEG, }, }; static const struct v4l2_queryctrl fimc_controls[] = { { .id = V4L2_CID_ROTATION, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Roataion", .minimum = 0, .maximum = 270, .step = 90, .default_value = 0, }, { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Horizontal Flip", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Vertical Flip", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_PADDR_Y, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Physical address Y", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = V4L2_CTRL_FLAG_READ_ONLY, }, { .id = V4L2_CID_PADDR_CB, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Physical address Cb", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = V4L2_CTRL_FLAG_READ_ONLY, }, { .id = V4L2_CID_PADDR_CR, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Physical address Cr", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = V4L2_CTRL_FLAG_READ_ONLY, }, { .id = V4L2_CID_PADDR_CBCR, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Physical address CbCr", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = V4L2_CTRL_FLAG_READ_ONLY, }, { .id = V4L2_CID_CACHEABLE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Cacheable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, }; #ifndef CONFIG_VIDEO_FIMC_MIPI void s3c_csis_start(int csis_id, int lanes, int settle, \ int align, int width, int height, int pixel_format) {} void s3c_csis_stop(int csis_id) {} void s3c_csis_enable_pktdata(int csis_id, bool enable) {} #endif static int fimc_init_camera(struct fimc_control *ctrl) { struct fimc_global *fimc = get_fimc_dev(); struct s3c_platform_fimc *pdata; struct s3c_platform_camera *cam; int ret = 0, retry_cnt = 0; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) struct platform_device *pdev = to_platform_device(ctrl->dev); #endif pdata = to_fimc_plat(ctrl->dev); cam = ctrl->cam; /* do nothing if already initialized */ if (ctrl->cam->initialized) return 0; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) if (ctrl->power_status == FIMC_POWER_OFF) pm_runtime_get_sync(&pdev->dev); #endif /* * WriteBack mode doesn't need to set clock and power, * but it needs to set source width, height depend on LCD resolution. */ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) { ret = s3cfb_direct_ioctl(0, S3CFB_GET_LCD_WIDTH, (unsigned long)&cam->width); if (ret) { fimc_err("fail to get LCD size\n"); #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) pm_runtime_put_sync(&pdev->dev); #endif return ret; } ret = s3cfb_direct_ioctl(0, S3CFB_GET_LCD_HEIGHT, (unsigned long)&cam->height); if (ret) { fimc_err("fail to get LCD size\n"); #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) pm_runtime_put_sync(&pdev->dev); #endif return ret; } cam->window.width = cam->width; cam->window.height = cam->height; cam->initialized = 1; return 0; } retry: /* set rate for mclk */ if ((clk_get_rate(cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) { clk_set_rate(cam->clk, cam->clk_rate); clk_enable(cam->clk); fimc->mclk_status = CAM_MCLK_ON; fimc_info1("clock for camera: %d\n", cam->clk_rate); } /* enable camera power if needed */ if (cam->cam_power) { ret = cam->cam_power(1); if (unlikely(ret < 0)) fimc_err("\nfail to power on\n"); } /* "0" argument means preview init for s5k4ea */ ret = v4l2_subdev_call(cam->sd, core, init, 0); /* Retry camera power-up if first i2c fails. */ if (unlikely(ret < 0)) { if (cam->cam_power) cam->cam_power(0); if (fimc->mclk_status == CAM_MCLK_ON) { clk_disable(ctrl->cam->clk); fimc->mclk_status = CAM_MCLK_OFF; } // if (retry_cnt++ < 3) { // msleep(100); // fimc_err("Retry power on(%d/3)\n\n", retry_cnt); // goto retry; // } cam->initialized = 0; } else { cam->initialized = 1; } return ret; } static int fimc_camera_get_jpeg_memsize(struct fimc_control *ctrl) { int ret = 0; struct v4l2_control cam_ctrl; cam_ctrl.id = V4L2_CID_CAM_JPEG_MEMSIZE; ret = v4l2_subdev_call(ctrl->cam->sd, core, g_ctrl, &cam_ctrl); if (ret < 0) { fimc_err("%s: Subdev doesn't support JEPG encoding.\n", \ __func__); return 0; } return cam_ctrl.value; } static int fimc_capture_scaler_info(struct fimc_control *ctrl) { struct fimc_scaler *sc = &ctrl->sc; struct v4l2_rect *window = &ctrl->cam->window; int tx, ty, sx, sy; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int rot = 0; if (!ctrl->cam->use_isp) { sx = window->width; sy = window->height; } else { sx = ctrl->is.fmt.width; sy = ctrl->is.fmt.height; } sc->real_width = sx; sc->real_height = sy; rot = fimc_mapping_rot_flip(ctrl->cap->rotate, ctrl->cap->flip); if (rot & FIMC_ROT) { tx = ctrl->cap->fmt.height; ty = ctrl->cap->fmt.width; } else { tx = ctrl->cap->fmt.width; ty = ctrl->cap->fmt.height; } fimc_dbg("%s: CamOut (%d, %d), TargetOut (%d, %d)\n", __func__, sx, sy, tx, ty); if (sx <= 0 || sy <= 0) { fimc_err("%s: invalid source size\n", __func__); return -EINVAL; } if (tx <= 0 || ty <= 0) { fimc_err("%s: invalid target size\n", __func__); return -EINVAL; } fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor); fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor); if (sx == sy) { if (sx*10/tx >= 15 && sx*10/tx < 20) { sc->pre_hratio = 2; sc->hfactor = 1; } if (sy*10/ty >= 15 && sy*10/ty < 20) { sc->pre_vratio = 2; sc->vfactor = 1; } } sc->pre_dst_width = sx / sc->pre_hratio; sc->pre_dst_height = sy / sc->pre_vratio; if (pdata->hw_ver >= 0x50) { sc->main_hratio = (sx << 14) / (tx << sc->hfactor); sc->main_vratio = (sy << 14) / (ty << sc->vfactor); } else { sc->main_hratio = (sx << 8) / (tx << sc->hfactor); sc->main_vratio = (sy << 8) / (ty << sc->vfactor); } sc->scaleup_h = (tx >= sx) ? 1 : 0; sc->scaleup_v = (ty >= sy) ? 1 : 0; return 0; } static int fimc_capture_change_scaler_info(struct fimc_control *ctrl) { struct fimc_scaler *sc = &ctrl->sc; struct v4l2_rect *window = &ctrl->cam->window; int tx, ty, sx, sy; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int rot = 0; if (!ctrl->cam->use_isp) { sx = window->width; sy = window->height; } else { sx = ctrl->is.zoom_in_width; sy = ctrl->is.zoom_in_height; } sc->real_width = sx; sc->real_height = sy; rot = fimc_mapping_rot_flip(ctrl->cap->rotate, ctrl->cap->flip); if (rot & FIMC_ROT) { tx = ctrl->cap->fmt.height; ty = ctrl->cap->fmt.width; } else { tx = ctrl->cap->fmt.width; ty = ctrl->cap->fmt.height; } fimc_dbg("%s: CamOut (%d, %d), TargetOut (%d, %d)\n", __func__, sx, sy, tx, ty); if (sx <= 0 || sy <= 0) { fimc_err("%s: invalid source size\n", __func__); return -EINVAL; } if (tx <= 0 || ty <= 0) { fimc_err("%s: invalid target size\n", __func__); return -EINVAL; } fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor); fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor); sc->pre_dst_width = sx / sc->pre_hratio; sc->pre_dst_height = sy / sc->pre_vratio; if (pdata->hw_ver >= 0x50) { sc->main_hratio = (sx << 14) / (tx << sc->hfactor); sc->main_vratio = (sy << 14) / (ty << sc->vfactor); } else { sc->main_hratio = (sx << 8) / (tx << sc->hfactor); sc->main_vratio = (sy << 8) / (ty << sc->vfactor); } sc->scaleup_h = (tx >= sx) ? 1 : 0; sc->scaleup_v = (ty >= sy) ? 1 : 0; return 0; } int fimc_start_zoom_capture(struct fimc_control *ctrl) { fimc_dbg("%s\n", __func__); fimc_hwset_start_scaler(ctrl); fimc_hwset_enable_capture(ctrl, ctrl->sc.bypass); fimc_hwset_disable_frame_end_irq(ctrl); return 0; } int fimc_stop_zoom_capture(struct fimc_control *ctrl) { fimc_dbg("%s\n", __func__); if (!ctrl->cam) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } if (!ctrl->cap) { fimc_err("%s: No cappure format.\n", __func__); return -ENODEV; } if (ctrl->cap->lastirq) { fimc_hwset_enable_lastirq(ctrl); fimc_hwset_disable_capture(ctrl); fimc_hwset_disable_lastirq(ctrl); } else { fimc_hwset_disable_capture(ctrl); fimc_hwset_enable_frame_end_irq(ctrl); } fimc_hwset_stop_scaler(ctrl); return 0; } static int fimc_add_inqueue(struct fimc_control *ctrl, int i) { struct fimc_capinfo *cap = ctrl->cap; struct fimc_buf_set *tmp_buf; struct list_head *count; /* PINGPONG_2ADDR_MODE Only */ list_for_each(count, &cap->inq) { tmp_buf = list_entry(count, struct fimc_buf_set, list); /* skip list_add_tail if already buffer is in cap->inq list*/ if (tmp_buf->id == i) return 0; } list_add_tail(&cap->bufs[i].list, &cap->inq); return 0; } static int fimc_add_outqueue(struct fimc_control *ctrl, int i) { struct fimc_capinfo *cap = ctrl->cap; struct fimc_buf_set *buf; unsigned int mask = 0x2; /* PINGPONG_2ADDR_MODE Only */ /* pair_buf_index stands for pair index of i. (0<->2) (1<->3) */ int pair_buf_index = (i^mask); /* FIMC have 4 h/w registers */ if (i < 0 || i >= FIMC_PHYBUFS) { fimc_err("%s: invalid queue index : %d\n", __func__, i); return -ENOENT; } if (list_empty(&cap->inq)) return -ENOENT; buf = list_first_entry(&cap->inq, struct fimc_buf_set, list); /* pair index buffer should be allocated first */ cap->outq[pair_buf_index] = buf->id; fimc_hwset_output_address(ctrl, buf, pair_buf_index); cap->outq[i] = buf->id; fimc_hwset_output_address(ctrl, buf, i); list_del(&buf->list); return 0; } int fimc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = 0; fimc_dbg("%s\n", __func__); /* WriteBack doesn't have subdev_call */ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) return 0; mutex_lock(&ctrl->v4l2_lock); ret = v4l2_subdev_call(ctrl->cam->sd, video, g_parm, a); mutex_unlock(&ctrl->v4l2_lock); return ret; } int fimc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int ret = 0; int new_fps = a->parm.capture.timeperframe.denominator / a->parm.capture.timeperframe.numerator; fimc_info2("%s fimc%d, %d\n", __func__, ctrl->id, new_fps); /* WriteBack doesn't have subdev_call */ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) return 0; mutex_lock(&ctrl->v4l2_lock); if (ctrl->cam->sd && fimc_cam_use) ret = v4l2_subdev_call(ctrl->cam->sd, video, s_parm, a); else if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, video, s_parm, a); mutex_unlock(&ctrl->v4l2_lock); return ret; } /* Enumerate controls */ int fimc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int i, ret; fimc_dbg("%s\n", __func__); for (i = 0; i < ARRAY_SIZE(fimc_controls); i++) { if (fimc_controls[i].id == qc->id) { memcpy(qc, &fimc_controls[i], sizeof(struct v4l2_queryctrl)); return 0; } } mutex_lock(&ctrl->v4l2_lock); ret = v4l2_subdev_call(ctrl->cam->sd, core, queryctrl, qc); mutex_unlock(&ctrl->v4l2_lock); return ret; } /* Menu control items */ int fimc_querymenu(struct file *file, void *fh, struct v4l2_querymenu *qm) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = 0; fimc_dbg("%s\n", __func__); mutex_lock(&ctrl->v4l2_lock); ret = v4l2_subdev_call(ctrl->cam->sd, core, querymenu, qm); mutex_unlock(&ctrl->v4l2_lock); return ret; } int fimc_enum_input(struct file *file, void *fh, struct v4l2_input *inp) { struct fimc_global *fimc = get_fimc_dev(); struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; fimc_dbg("%s: index %d\n", __func__, inp->index); if (inp->index >= FIMC_MAXCAMS) { fimc_err("%s: invalid input index, received = %d\n", __func__, inp->index); return -EINVAL; } if (!fimc->camera_isvalid[inp->index]) return -EINVAL; mutex_lock(&ctrl->v4l2_lock); if (fimc->camera[inp->index]->use_isp && !(fimc->camera[inp->index]->info)) strcpy(inp->name, "ISP Camera"); else strcpy(inp->name, fimc->camera[inp->index]->info->type); inp->type = V4L2_INPUT_TYPE_CAMERA; mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_g_input(struct file *file, void *fh, unsigned int *i) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct fimc_global *fimc = get_fimc_dev(); /* In case of isueing g_input before s_input */ if (!ctrl->cam) { fimc_err("no camera device selected yet. do VIDIOC_S_INPUT first\n"); return -ENODEV; } mutex_lock(&ctrl->v4l2_lock); *i = (unsigned int) fimc->active_camera; mutex_unlock(&ctrl->v4l2_lock); fimc_dbg("%s: index %d\n", __func__, *i); return 0; } int fimc_release_subdev(struct fimc_control *ctrl) { struct fimc_global *fimc = get_fimc_dev(); struct i2c_client *client; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int ret; if (ctrl->cam->sd && fimc_cam_use) { fimc_dbg("%s called\n", __func__); /* WriteBack doesn't need clock setting */ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) { ctrl->cam->initialized = 0; ctrl->cam = NULL; fimc->active_camera = -1; return 0; } client = v4l2_get_subdevdata(ctrl->cam->sd); i2c_unregister_device(client); ctrl->cam->sd = NULL; if (ctrl->cam->cam_power) ctrl->cam->cam_power(0); /* shutdown the MCLK */ if (fimc->mclk_status == CAM_MCLK_ON) { clk_disable(ctrl->cam->clk); fimc->mclk_status = CAM_MCLK_OFF; } ctrl->cam->initialized = 0; ctrl->cam = NULL; fimc->active_camera = -1; } if (ctrl->flite_sd && fimc_cam_use) { ret = v4l2_subdev_call(ctrl->flite_sd, core, s_power, 0); if (ret) fimc_err("s_power failed: %d", ret); ctrl->flite_sd = NULL; } return 0; } static int fimc_configure_subdev(struct fimc_control *ctrl) { struct i2c_adapter *i2c_adap; struct i2c_board_info *i2c_info; struct v4l2_subdev *sd; unsigned short addr; char *name; int ret = 0; i2c_adap = i2c_get_adapter(ctrl->cam->i2c_busnum); if (!i2c_adap) { fimc_err("subdev i2c_adapter missing-skip registration\n"); return -ENODEV; } i2c_info = ctrl->cam->info; if (!i2c_info) { fimc_err("%s: subdev i2c board info missing\n", __func__); return -ENODEV; } name = i2c_info->type; if (!name) { fimc_err("subdev i2c driver name missing-skip registration\n"); return -ENODEV; } addr = i2c_info->addr; if (!addr) { fimc_err("subdev i2c address missing-skip registration\n"); return -ENODEV; } /* * NOTE: first time subdev being registered, * s_config is called and try to initialize subdev device * but in this point, we are not giving MCLK and power to subdev * so nothing happens but pass platform data through */ sd = v4l2_i2c_new_subdev_board(&ctrl->v4l2_dev, i2c_adap, i2c_info, &addr); if (!sd) { fimc_err("%s: v4l2 subdev board registering failed\n", __func__); } /* Assign subdev to proper camera device pointer */ ctrl->cam->sd = sd; if (!ctrl->cam->initialized) { ret = fimc_init_camera(ctrl); if (ret < 0) { fimc_err("%s: fail to initialize subdev\n", __func__); return ret; } } return 0; } static int flite_register_callback(struct device *dev, void *p) { struct v4l2_subdev **sd_list = p; struct v4l2_subdev *sd = NULL; sd = dev_get_drvdata(dev); if (sd) { struct platform_device *pdev = v4l2_get_subdev_hostdata(sd); *(sd_list + pdev->id) = sd; } return 0; /* non-zero value stops iteration */ } static struct v4l2_subdev *exynos_flite_get_subdev(int id) { const char *module_name = "exynos-fimc-lite"; struct device_driver *drv; struct v4l2_subdev *sd[FLITE_MAX_NUM] = {NULL,}; int ret; drv = driver_find(module_name, &platform_bus_type); if (!drv) { request_module(module_name); drv = driver_find(module_name, &platform_bus_type); } if (!drv) return ERR_PTR(-ENODEV); ret = driver_for_each_device(drv, NULL, &sd[0], flite_register_callback); put_driver(drv); return ret ? NULL : sd[id]; } int fimc_subdev_attatch(struct fimc_control *ctrl) { int ret = 0; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); ctrl->flite_sd = exynos_flite_get_subdev(ctrl->cam->flite_id); if (IS_ERR_OR_NULL(ctrl->flite_sd)) { ctrl->flite_sd = NULL; return PTR_ERR(ctrl->flite_sd); } else { if (fimc_cam_use) { ret = v4l2_subdev_call(ctrl->flite_sd, core, s_power, 1); if (ret) fimc_err("s_power failed: %d", ret); } } return 0; } static int fimc_is_register_callback(struct device *dev, void *p) { struct v4l2_subdev **sd = p; *sd = dev_get_drvdata(dev); if (!*sd) return -EINVAL; return 0; /* non-zero value stops iteration */ } int fimc_is_release_subdev(struct fimc_control *ctrl) { int ret; struct fimc_global *fimc = get_fimc_dev(); struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); if (ctrl->is.sd && ctrl->cam && fimc_cam_use) { if (ctrl->cam->cam_power) ctrl->cam->cam_power(0); /* shutdown the MCLK */ if (fimc->mclk_status == CAM_MCLK_ON) { clk_disable(ctrl->cam->clk); fimc->mclk_status = CAM_MCLK_OFF; } ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, 0); if (ret < 0) { fimc_dbg("FIMC-IS init failed"); return -ENODEV; } v4l2_device_unregister_subdev(ctrl->is.sd); ctrl->is.sd = NULL; ctrl->cam->initialized = 0; ctrl->cam = NULL; fimc->active_camera = -1; } else if (ctrl->is.sd && ctrl->cam) { v4l2_device_unregister_subdev(ctrl->is.sd); ctrl->is.sd = NULL; ctrl->cam->initialized = 0; ctrl->cam = NULL; fimc->active_camera = -1; } return 0; } static struct v4l2_subdev *fimc_is_get_subdev(int id) { const char *module_name = "exynos4-fimc-is"; struct device_driver *drv; struct v4l2_subdev *sd = NULL; int ret; drv = driver_find(module_name, &platform_bus_type); if (!drv) { request_module(module_name); drv = driver_find(module_name, &platform_bus_type); } if (!drv) return ERR_PTR(-ENODEV); ret = driver_for_each_device(drv, NULL, &sd, fimc_is_register_callback); put_driver(drv); return ret ? NULL : sd; } static int fimc_is_init_cam(struct fimc_control *ctrl) { struct fimc_global *fimc = get_fimc_dev(); struct s3c_platform_camera *cam; int ret = 0; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) struct platform_device *pdev = to_platform_device(ctrl->dev); #endif cam = ctrl->cam; /* Do noting if already initialized */ if (ctrl->cam->initialized) return 0; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) if (ctrl->power_status == FIMC_POWER_OFF) pm_runtime_get_sync(&pdev->dev); #endif /* set rate for mclk */ if ((clk_get_rate(cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) { clk_set_rate(cam->clk, cam->clk_rate); clk_enable(cam->clk); fimc->mclk_status = CAM_MCLK_ON; fimc_info1("clock for camera (FIMC-IS): %d\n", cam->clk_rate); } /* enable camera power if needed */ if (cam->cam_power) { ret = cam->cam_power(1); if (unlikely(ret < 0)) fimc_err("\nfail to power on\n"); } cam->initialized = 1; return ret; } int fimc_s_input(struct file *file, void *fh, unsigned int i) { struct fimc_global *fimc = get_fimc_dev(); struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int ret = 0; fimc_dbg("%s: index %d\n", __func__, i); if (i >= FIMC_MAXCAMS) { fimc_err("%s: invalid input index\n", __func__); return -EINVAL; } if (!fimc->camera_isvalid[i]) return -EINVAL; if (fimc->camera[i]->sd && fimc_cam_use) { fimc_err("%s: Camera already in use.\n", __func__); return -EBUSY; } mutex_lock(&ctrl->v4l2_lock); /* If ctrl->cam is not NULL, there is one subdev already registered. * We need to unregister that subdev first. */ if (i != fimc->active_camera) { fimc_info1("\n\nfimc_s_input activating subdev\n"); if (ctrl->cam && (ctrl->cam->sd || ctrl->flite_sd)) fimc_release_subdev(ctrl); else if (ctrl->is.sd) fimc_is_release_subdev(ctrl); ctrl->cam = fimc->camera[i]; if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B) && (!ctrl->cam->use_isp) && fimc_cam_use) { ret = fimc_configure_subdev(ctrl); if (ret < 0) { mutex_unlock(&ctrl->v4l2_lock); fimc_err("%s: Could not register camera" \ " sensor with V4L2.\n", __func__); return -ENODEV; } } fimc->active_camera = i; fimc_info2("fimc_s_input activated subdev = %d\n", i); } if (!fimc_cam_use) { if (i == fimc->active_camera) { ctrl->cam = fimc->camera[i]; fimc_info2("fimc_s_input activating subdev FIMC%d\n", ctrl->id); } else { mutex_unlock(&ctrl->v4l2_lock); return -EINVAL; } } if (ctrl->cam->use_isp) { /* fimc-lite attatch */ ret = fimc_subdev_attatch(ctrl); if (ret) { fimc_err("subdev_attatch failed\n"); mutex_unlock(&ctrl->v4l2_lock); return -ENODEV; } /* fimc-is attatch */ ctrl->is.sd = fimc_is_get_subdev(i); if (IS_ERR_OR_NULL(ctrl->is.sd)) { fimc_err("fimc-is subdev_attatch failed\n"); mutex_unlock(&ctrl->v4l2_lock); return -ENODEV; } ctrl->is.fmt.width = ctrl->cam->width; ctrl->is.fmt.height = ctrl->cam->height; ctrl->is.frame_count = 0; if (fimc_cam_use) { ret = fimc_is_init_cam(ctrl); if (ret < 0) { fimc_dbg("FIMC-IS init clock failed"); mutex_unlock(&ctrl->v4l2_lock); return -ENODEV; } ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, 1); if (ret < 0) { fimc_dbg("FIMC-IS init failed"); mutex_unlock(&ctrl->v4l2_lock); return -ENODEV; } ret = v4l2_subdev_call(ctrl->is.sd, core, load_fw); if (ret < 0) { fimc_dbg("FIMC-IS init failed"); mutex_unlock(&ctrl->v4l2_lock); return -ENODEV; } ret = v4l2_subdev_call(ctrl->is.sd, core, init, ctrl->cam->sensor_index); if (ret < 0) { fimc_dbg("FIMC-IS init failed"); mutex_unlock(&ctrl->v4l2_lock); return -ENODEV; } } } mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_enum_fmt_vid_capture(struct file *file, void *fh, struct v4l2_fmtdesc *f) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int i = f->index; fimc_dbg("%s\n", __func__); mutex_lock(&ctrl->v4l2_lock); memset(f, 0, sizeof(*f)); memcpy(f, &capture_fmts[i], sizeof(*f)); mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_g_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; fimc_dbg("%s\n", __func__); if (!ctrl->cap) { fimc_err("%s: no capture device info\n", __func__); return -EINVAL; } mutex_lock(&ctrl->v4l2_lock); memset(&f->fmt.pix, 0, sizeof(f->fmt.pix)); memcpy(&f->fmt.pix, &ctrl->cap->fmt, sizeof(f->fmt.pix)); mutex_unlock(&ctrl->v4l2_lock); return 0; } /* * Check for whether the requested format * can be streamed out from FIMC * depends on FIMC node */ static int fimc_fmt_avail(struct fimc_control *ctrl, struct v4l2_pix_format *f) { int i; /* * TODO: check for which FIMC is used. * Available fmt should be varied for each FIMC */ for (i = 0; i < ARRAY_SIZE(capture_fmts); i++) { if (capture_fmts[i].pixelformat == f->pixelformat) return 0; } fimc_info1("Not supported pixelformat requested\n"); return -1; } /* * figures out the depth of requested format */ static int fimc_fmt_depth(struct fimc_control *ctrl, struct v4l2_pix_format *f) { int err, depth = 0; /* First check for available format or not */ err = fimc_fmt_avail(ctrl, f); if (err < 0) return -1; /* handles only supported pixelformats */ switch (f->pixelformat) { case V4L2_PIX_FMT_RGB32: depth = 32; fimc_dbg("32bpp\n"); break; case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_YUV422P: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: depth = 16; fimc_dbg("16bpp\n"); break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV12T: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: depth = 12; fimc_dbg("12bpp\n"); break; case V4L2_PIX_FMT_JPEG: depth = -1; fimc_dbg("Compressed format.\n"); break; default: fimc_dbg("why am I here?\n"); break; } return depth; } int fimc_s_fmt_vid_private(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); struct v4l2_mbus_framefmt *mbus_fmt; int ret = 0; fimc_dbg("%s\n", __func__); if (ctrl->cam->sd) { struct v4l2_pix_format *pix = &f->fmt.pix; int depth; fimc_info1("%s %d:\n", __func__, __LINE__); mbus_fmt = &ctrl->cap->mbus_fmt; mbus_fmt->width = pix->width; mbus_fmt->height = pix->height; depth = fimc_fmt_depth(ctrl, pix); if (depth == 0) { fimc_err("%s: Invalid pixel format\n", __func__); return -EINVAL; } else if (depth < 0) { /* JPEG */ mbus_fmt->code = V4L2_MBUS_FMT_JPEG_1X8; mbus_fmt->colorspace = V4L2_COLORSPACE_JPEG; } else { mbus_fmt->code = V4L2_MBUS_FMT_VYUY8_2X8; } if (fimc_cam_use) { ret = v4l2_subdev_call(ctrl->cam->sd, video, s_mbus_fmt, mbus_fmt); if (ret) { fimc_err("%s: fail to s_mbus_fmt\n", __func__); return ret; } } return 0; } else { mbus_fmt = kzalloc(sizeof(*mbus_fmt), GFP_KERNEL); if (!mbus_fmt) { fimc_err("%s: no memory for " "mbus_fmt\n", __func__); return -ENOMEM; } ctrl->is.fmt.width = f->fmt.pix.width; ctrl->is.fmt.height = f->fmt.pix.height; ctrl->is.fmt.pixelformat = f->fmt.pix.pixelformat; mbus_fmt->width = f->fmt.pix.width; mbus_fmt->height = f->fmt.pix.height; mbus_fmt->code = V4L2_MBUS_FMT_YUYV8_2X8; /*dummy*/ mbus_fmt->field = f->fmt.pix.field; mbus_fmt->colorspace = V4L2_COLORSPACE_SRGB; if (fimc_cam_use) ret = v4l2_subdev_call(ctrl->is.sd, video, s_mbus_fmt, mbus_fmt); kfree(mbus_fmt); return ret; } return -EINVAL; } int fimc_s_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct fimc_capinfo *cap = ctrl->cap; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) struct platform_device *pdev = to_platform_device(ctrl->dev); #endif struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int ret = 0; int depth; struct v4l2_control is_ctrl; is_ctrl.id = 0; is_ctrl.value = 0; fimc_dbg("%s\n", __func__); /* * The first time alloc for struct cap_info, and will be * released at the file close. * Anyone has better idea to do this? */ if (!cap) { cap = kzalloc(sizeof(*cap), GFP_KERNEL); if (!cap) { fimc_err("%s: no memory for " "capture device info\n", __func__); return -ENOMEM; } /* assign to ctrl */ ctrl->cap = cap; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) if (ctrl->power_status == FIMC_POWER_OFF) pm_runtime_get_sync(&pdev->dev); #endif } else { memset(cap, 0, sizeof(*cap)); } mutex_lock(&ctrl->v4l2_lock); memset(&cap->fmt, 0, sizeof(cap->fmt)); memcpy(&cap->fmt, &f->fmt.pix, sizeof(cap->fmt)); /* * Note that expecting format only can be with * available output format from FIMC * Following items should be handled in driver * bytesperline = width * depth / 8 * sizeimage = bytesperline * height */ /* This function may return 0 or -1 in case of error, * hence need to check here. */ depth = fimc_fmt_depth(ctrl, &cap->fmt); if (depth == 0) { mutex_unlock(&ctrl->v4l2_lock); fimc_err("%s: Invalid pixel format\n", __func__); return -EINVAL; } else if (depth < 0) { /* * When the pixelformat is JPEG, * the application is requesting for data * in JPEG compressed format */ cap->fmt.colorspace = V4L2_COLORSPACE_JPEG; cap->fmt.priv = V4L2_PIX_FMT_MODE_CAPTURE; } else { cap->fmt.bytesperline = (cap->fmt.width * depth) >> 3; cap->fmt.sizeimage = (cap->fmt.bytesperline * cap->fmt.height); cap->fmt.priv = V4L2_PIX_FMT_MODE_PREVIEW; } if (cap->fmt.colorspace == V4L2_COLORSPACE_JPEG) { ctrl->sc.bypass = 1; cap->lastirq = 0; fimc_info1("fimc_s_fmt_vid_capture V4L2_COLORSPACE_JPEG\n"); } else { ctrl->sc.bypass = 0; cap->lastirq = 0; } fimc_info1("s_fmt width = %d, height = %d\n", \ cap->fmt.width, cap->fmt.height); /* WriteBack doesn't have subdev_call */ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) { mutex_unlock(&ctrl->v4l2_lock); return 0; } if (ctrl->cam->use_isp) { ctrl->is.mbus_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10; is_ctrl.id = V4L2_CID_IS_GET_SENSOR_WIDTH; is_ctrl.value = 0; v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl); ctrl->is.fmt.width = ctrl->is.mbus_fmt.width = is_ctrl.value; is_ctrl.id = V4L2_CID_IS_GET_SENSOR_HEIGHT; is_ctrl.value = 0; v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl); ctrl->is.fmt.height = ctrl->is.mbus_fmt.height = is_ctrl.value; /* default offset values */ ctrl->is.offset_x = 16; ctrl->is.offset_y = 12; } fimc_hwset_reset(ctrl); mutex_unlock(&ctrl->v4l2_lock); fimc_dbg("%s -- FIMC%d\n", __func__, ctrl->id); return ret; } int fimc_try_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f) { /* Not implement */ return -ENOTTY; } static int fimc_alloc_buffers(struct fimc_control *ctrl, int plane, int size, int align, int bpp, int use_paddingbuf, int pad_size) { struct fimc_capinfo *cap = ctrl->cap; int i, j; int plane_length[4] = {0, }; switch (plane) { case 1: if (align) { plane_length[0] = PAGE_ALIGN((size*bpp) >> 3); plane_length[1] = 0; plane_length[2] = 0; } else { plane_length[0] = (size*bpp) >> 3; plane_length[1] = 0; plane_length[2] = 0; } break; /* In case of 2, only NV12 and NV12T is supported. */ case 2: if (align) { plane_length[0] = PAGE_ALIGN((size*8) >> 3); plane_length[1] = PAGE_ALIGN((size*(bpp-8)) >> 3); plane_length[2] = 0; fimc_info2("plane_length[0] = %d, plane_length[1] = %d\n" \ , plane_length[0], plane_length[1]); } else { plane_length[0] = ((size*8) >> 3); plane_length[1] = ((size*(bpp-8)) >> 3); plane_length[2] = 0; fimc_info2("plane_length[0] = %d, plane_length[1] = %d\n" \ , plane_length[0], plane_length[1]); } break; /* In case of 3 * YUV422 : 8 / 4 / 4 (bits) * YUV420 : 8 / 2 / 2 (bits) * 3rd plane have to consider page align for mmap */ case 3: if (align) { plane_length[0] = (size*8) >> 3; plane_length[1] = (size*((bpp-8)/2)) >> 3; plane_length[2] = PAGE_ALIGN((size*bpp)>>3) - plane_length[0] - plane_length[1]; } else { plane_length[0] = (size*8) >> 3; plane_length[1] = (size*((bpp-8)/2)) >> 3; plane_length[2] = ((size*bpp)>>3) - plane_length[0] - plane_length[1]; } break; default: fimc_err("impossible!\n"); return -ENOMEM; } if (use_paddingbuf) { plane_length[plane] = pad_size; cap->pktdata_plane = plane; } else plane_length[plane] = 0; for (i = 0; i < cap->nr_bufs; i++) { for (j = 0; j < plane; j++) { cap->bufs[i].length[j] = plane_length[j]; fimc_dma_alloc(ctrl, &cap->bufs[i], j, align); if (!cap->bufs[i].base[j]) goto err_alloc; } if (use_paddingbuf) { cap->bufs[i].length[plane] = plane_length[plane]; fimc_dma_alloc(ctrl, &cap->bufs[i], plane, align); cap->bufs[i].vaddr_pktdata = phys_to_virt(cap->bufs[i].base[plane]); /* printk(KERN_INFO "pktdata address = 0x%x, 0x%x\n" ,cap->bufs[i].base[1], cap->bufs[i].vaddr_pktdata ); */ if (!cap->bufs[i].base[plane]) goto err_alloc; } cap->bufs[i].state = VIDEOBUF_PREPARED; } return 0; err_alloc: for (i = 0; i < cap->nr_bufs; i++) { for (j = 0; j < plane; j++) { if (cap->bufs[i].base[j]) fimc_dma_free(ctrl, &cap->bufs[i], j); } if (use_paddingbuf) { if (cap->bufs[i].base[plane]) fimc_dma_free(ctrl, &cap->bufs[i], plane); } memset(&cap->bufs[i], 0, sizeof(cap->bufs[i])); } return -ENOMEM; } static void fimc_free_buffers(struct fimc_control *ctrl) { struct fimc_capinfo *cap; int i; if (ctrl && ctrl->cap) cap = ctrl->cap; else return; for (i = 0; i < FIMC_PHYBUFS; i++) { memset(&cap->bufs[i], 0, sizeof(cap->bufs[i])); cap->bufs[i].state = VIDEOBUF_NEEDS_INIT; } ctrl->mem.curr = ctrl->mem.base; } int fimc_reqbufs_capture_mmap(void *fh, struct v4l2_requestbuffers *b) { struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) struct platform_device *pdev = to_platform_device(ctrl->dev); #endif int ret = 0, i; int bpp = 0; int size = 0; if (!cap) { fimc_err("%s: no capture device info\n", __func__); return -ENODEV; } mutex_lock(&ctrl->v4l2_lock); /* A count value of zero frees all buffers */ if ((b->count == 0) || (b->count >= FIMC_CAPBUFS)) { /* aborting or finishing any DMA in progress */ if (ctrl->status == FIMC_STREAMON) fimc_streamoff_capture(fh); for (i = 0; i < FIMC_CAPBUFS; i++) { fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 0); fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 1); fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 2); } mutex_unlock(&ctrl->v4l2_lock); return 0; } /* free previous buffers */ if ((cap->nr_bufs >= 0) && (cap->nr_bufs < FIMC_CAPBUFS)) { fimc_info1("%s : remained previous buffer count is %d\n", __func__, cap->nr_bufs); for (i = 0; i < cap->nr_bufs; i++) { fimc_dma_free(ctrl, &cap->bufs[i], 0); fimc_dma_free(ctrl, &cap->bufs[i], 1); fimc_dma_free(ctrl, &cap->bufs[i], 2); } } fimc_free_buffers(ctrl); cap->nr_bufs = b->count; if (pdata->hw_ver >= 0x51) { #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) if (ctrl->power_status == FIMC_POWER_OFF) { pm_runtime_get_sync(&pdev->dev); } #endif fimc_hw_reset_output_buf_sequence(ctrl); for (i = 0; i < cap->nr_bufs; i++) { fimc_hwset_output_buf_sequence(ctrl, i, 1); cap->bufs[i].id = i; cap->bufs[i].state = VIDEOBUF_NEEDS_INIT; /* initialize list */ INIT_LIST_HEAD(&cap->bufs[i].list); } fimc_info1("%s: requested %d buffers\n", __func__, b->count); fimc_info1("%s: sequence[%d]\n", __func__, fimc_hwget_output_buf_sequence(ctrl)); INIT_LIST_HEAD(&cap->outgoing_q); } if (pdata->hw_ver < 0x51) { INIT_LIST_HEAD(&cap->inq); for (i = 0; i < cap->nr_bufs; i++) { cap->bufs[i].id = i; cap->bufs[i].state = VIDEOBUF_NEEDS_INIT; /* initialize list */ INIT_LIST_HEAD(&cap->bufs[i].list); } } if (cap->pktdata_enable) cap->pktdata_size = 0x1000; bpp = fimc_fmt_depth(ctrl, &cap->fmt); switch (cap->fmt.pixelformat) { case V4L2_PIX_FMT_RGB32: /* fall through */ case V4L2_PIX_FMT_RGB565: /* fall through */ case V4L2_PIX_FMT_YUYV: /* fall through */ case V4L2_PIX_FMT_UYVY: /* fall through */ case V4L2_PIX_FMT_VYUY: /* fall through */ case V4L2_PIX_FMT_YVYU: /* fall through */ fimc_info1("%s : 1plane\n", __func__); ret = fimc_alloc_buffers(ctrl, 1, cap->fmt.width * cap->fmt.height, SZ_4K, bpp, cap->pktdata_enable, cap->pktdata_size); break; case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: /* fall through */ case V4L2_PIX_FMT_NV61: /* fall through */ fimc_info1("%s : 2plane for NV21 w %d h %d\n", __func__, cap->fmt.width, cap->fmt.height); ret = fimc_alloc_buffers(ctrl, 2, cap->fmt.width * cap->fmt.height, 0, bpp, cap->pktdata_enable, cap->pktdata_size); break; case V4L2_PIX_FMT_NV12: fimc_info1("%s : 2plane for NV12\n", __func__); ret = fimc_alloc_buffers(ctrl, 2, cap->fmt.width * cap->fmt.height, SZ_64K, bpp, cap->pktdata_enable, cap->pktdata_size); break; case V4L2_PIX_FMT_NV12T: fimc_info1("%s : 2plane for NV12T\n", __func__); ret = fimc_alloc_buffers(ctrl, 2, ALIGN(cap->fmt.width, 128) * ALIGN(cap->fmt.height, 32), SZ_64K, bpp, cap->pktdata_enable, cap->pktdata_size); break; case V4L2_PIX_FMT_YUV422P: /* fall through */ case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: fimc_info1("%s : 3plane\n", __func__); ret = fimc_alloc_buffers(ctrl, 3, cap->fmt.width * cap->fmt.height, 0, bpp, cap->pktdata_enable, cap->pktdata_size); break; case V4L2_PIX_FMT_JPEG: fimc_info1("%s : JPEG 1plane\n", __func__); size = fimc_camera_get_jpeg_memsize(ctrl); fimc_info2("%s : JPEG 1plane size = %x\n", __func__, size); ret = fimc_alloc_buffers(ctrl, 1, size, 0, 8, cap->pktdata_enable, cap->pktdata_size); break; default: break; } if (ret) { fimc_err("%s: no memory for capture buffer\n", __func__); mutex_unlock(&ctrl->v4l2_lock); return -ENOMEM; } mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_reqbufs_capture_userptr(void *fh, struct v4l2_requestbuffers *b) { struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) struct platform_device *pdev = to_platform_device(ctrl->dev); #endif int i; if (!cap) { fimc_err("%s: no capture device info\n", __func__); return -ENODEV; } mutex_lock(&ctrl->v4l2_lock); /* A count value of zero frees all buffers */ if ((b->count == 0) || (b->count >= FIMC_CAPBUFS)) { /* aborting or finishing any DMA in progress */ if (ctrl->status == FIMC_STREAMON) fimc_streamoff_capture(fh); fimc_free_buffers(ctrl); mutex_unlock(&ctrl->v4l2_lock); return 0; } /* free previous buffers */ if ((cap->nr_bufs >= 0) && (cap->nr_bufs < FIMC_CAPBUFS)) { fimc_info1("%s: prev buf cnt(%d)\n", __func__, cap->nr_bufs); fimc_free_buffers(ctrl); } cap->nr_bufs = b->count; #if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)) if (ctrl->power_status == FIMC_POWER_OFF) { pm_runtime_get_sync(&pdev->dev); } #endif fimc_hw_reset_output_buf_sequence(ctrl); for (i = 0; i < cap->nr_bufs; i++) { fimc_hwset_output_buf_sequence(ctrl, i, 1); cap->bufs[i].id = i; cap->bufs[i].state = VIDEOBUF_IDLE; /* initialize list */ INIT_LIST_HEAD(&cap->bufs[i].list); } fimc_info1("%s: requested %d buffers\n", __func__, b->count); fimc_info1("%s: sequence[%d]\n", __func__, fimc_hwget_output_buf_sequence(ctrl)); INIT_LIST_HEAD(&cap->outgoing_q); mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_reqbufs_capture(void *fh, struct v4l2_requestbuffers *b) { int ret = 0; if (b->memory == V4L2_MEMORY_MMAP) ret = fimc_reqbufs_capture_mmap(fh, b); else ret = fimc_reqbufs_capture_userptr(fh, b); return ret; } int fimc_querybuf_capture(void *fh, struct v4l2_buffer *b) { struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; if (ctrl->status != FIMC_STREAMOFF) { fimc_err("fimc is running\n"); return -EBUSY; } mutex_lock(&ctrl->v4l2_lock); switch (cap->fmt.pixelformat) { case V4L2_PIX_FMT_JPEG: /* fall through */ case V4L2_PIX_FMT_RGB32: /* fall through */ case V4L2_PIX_FMT_RGB565: /* fall through */ case V4L2_PIX_FMT_YUYV: /* fall through */ case V4L2_PIX_FMT_UYVY: /* fall through */ case V4L2_PIX_FMT_VYUY: /* fall through */ case V4L2_PIX_FMT_YVYU: /* fall through */ b->length = cap->bufs[b->index].length[0]; break; case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: /* fall through */ case V4L2_PIX_FMT_NV61: b->length = ctrl->cap->bufs[b->index].length[0] + ctrl->cap->bufs[b->index].length[1]; break; case V4L2_PIX_FMT_NV12: /* fall through */ case V4L2_PIX_FMT_NV12T: b->length = ALIGN(ctrl->cap->bufs[b->index].length[0], SZ_64K) + ALIGN(ctrl->cap->bufs[b->index].length[1], SZ_64K); break; case V4L2_PIX_FMT_YUV422P: /* fall through */ case V4L2_PIX_FMT_YUV420: /* fall through */ case V4L2_PIX_FMT_YVU420: b->length = ctrl->cap->bufs[b->index].length[0] + ctrl->cap->bufs[b->index].length[1] + ctrl->cap->bufs[b->index].length[2]; break; default: b->length = cap->bufs[b->index].length[0]; break; } if (cap->pktdata_enable) b->length += ctrl->cap->bufs[b->index].length[cap->pktdata_plane]; b->m.offset = b->index * PAGE_SIZE; /* memory field should filled V4L2_MEMORY_MMAP */ b->memory = V4L2_MEMORY_MMAP; ctrl->cap->bufs[b->index].state = VIDEOBUF_IDLE; fimc_dbg("%s: %d bytes with offset: %d\n", __func__, b->length, b->m.offset); mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_g_ctrl_capture(void *fh, struct v4l2_control *c) { struct fimc_control *ctrl = fh; int ret = 0; fimc_dbg("%s\n", __func__); switch (c->id) { case V4L2_CID_ROTATION: c->value = ctrl->cap->rotate; break; case V4L2_CID_HFLIP: c->value = (ctrl->cap->flip & FIMC_XFLIP) ? 1 : 0; break; case V4L2_CID_VFLIP: c->value = (ctrl->cap->flip & FIMC_YFLIP) ? 1 : 0; break; case V4L2_CID_CACHEABLE: c->value = ctrl->cap->cacheable; break; default: /* get ctrl supported by subdev */ /* WriteBack doesn't have subdev_call */ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) break; if (ctrl->cam->sd) ret = v4l2_subdev_call(ctrl->cam->sd, core, g_ctrl, c); if (ctrl->is.sd) ret = v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, c); break; } return ret; } int fimc_s_ctrl_capture(void *fh, struct v4l2_control *c) { struct fimc_control *ctrl = fh; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int ret = 0; fimc_dbg("%s\n", __func__); if (!ctrl->cam || !ctrl->cap || ((!ctrl->cam->sd) && (!ctrl->is.sd))) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } switch (c->id) { case V4L2_CID_ROTATION: ctrl->cap->rotate = c->value; break; case V4L2_CID_HFLIP: if (c->value) ctrl->cap->flip |= FIMC_XFLIP; else ctrl->cap->flip &= ~FIMC_XFLIP; break; case V4L2_CID_VFLIP: if (c->value) ctrl->cap->flip |= FIMC_YFLIP; else ctrl->cap->flip &= ~FIMC_YFLIP; break; case V4L2_CID_PADDR_Y: if (&ctrl->cap->bufs[c->value]) c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_Y]; break; case V4L2_CID_PADDR_CB: /* fall through */ case V4L2_CID_PADDR_CBCR: if (&ctrl->cap->bufs[c->value]) c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_CB]; break; case V4L2_CID_PADDR_CR: if (&ctrl->cap->bufs[c->value]) c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_CR]; break; /* Implementation as per C100 FIMC driver */ case V4L2_CID_STREAM_PAUSE: fimc_hwset_stop_processing(ctrl); break; case V4L2_CID_IMAGE_EFFECT_APPLY: ctrl->fe.ie_on = c->value ? 1 : 0; ctrl->fe.ie_after_sc = 0; ret = fimc_hwset_image_effect(ctrl); break; case V4L2_CID_IMAGE_EFFECT_FN: if (c->value < 0 || c->value > FIMC_EFFECT_FIN_SILHOUETTE) return -EINVAL; ctrl->fe.fin = c->value; ret = 0; break; case V4L2_CID_IMAGE_EFFECT_CB: ctrl->fe.pat_cb = c->value & 0xFF; ret = 0; break; case V4L2_CID_IMAGE_EFFECT_CR: ctrl->fe.pat_cr = c->value & 0xFF; ret = 0; break; case V4L2_CID_IS_LOAD_FW: if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, c->value); break; case V4L2_CID_IS_RESET: if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, core, reset, c->value); break; case V4L2_CID_IS_S_POWER: if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, c->value); break; case V4L2_CID_IS_S_STREAM: if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, video, s_stream, c->value); break; case V4L2_CID_CACHEABLE: ctrl->cap->cacheable = c->value; ret = 0; break; case V4L2_CID_EMBEDDEDDATA_ENABLE: ctrl->cap->pktdata_enable = c->value; ret = 0; break; case V4L2_CID_IS_ZOOM: fimc_is_set_zoom(ctrl, c); break; #ifdef CONFIG_BUSFREQ_OPP case V4L2_CID_CAMERA_BUSFREQ_LOCK: /* lock bus frequency */ dev_lock(ctrl->bus_dev, ctrl->dev, (unsigned long)c->value); break; case V4L2_CID_CAMERA_BUSFREQ_UNLOCK: /* unlock bus frequency */ dev_unlock(ctrl->bus_dev, ctrl->dev); break; #endif default: /* try on subdev */ /* WriteBack doesn't have subdev_call */ if ((ctrl->cam->id == CAMERA_WB) || \ (ctrl->cam->id == CAMERA_WB_B)) break; if (fimc_cam_use) if (ctrl->cam->sd) ret = v4l2_subdev_call(ctrl->cam->sd, core, s_ctrl, c); if (ctrl->is.sd && ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, core, s_ctrl, c); else ret = 0; break; } return ret; } int fimc_g_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = 0; mutex_lock(&ctrl->v4l2_lock); if (ctrl->is.sd) /* try on subdev */ ret = v4l2_subdev_call(ctrl->is.sd, core, g_ext_ctrls, c); mutex_unlock(&ctrl->v4l2_lock); return ret; } int fimc_s_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = 0; mutex_lock(&ctrl->v4l2_lock); if (ctrl->cam->sd) /* try on subdev */ ret = v4l2_subdev_call(ctrl->cam->sd, core, s_ext_ctrls, c); else if (ctrl->is.sd) ret = v4l2_subdev_call(ctrl->is.sd, core, s_ext_ctrls, c); mutex_unlock(&ctrl->v4l2_lock); return ret; } int fimc_cropcap_capture(void *fh, struct v4l2_cropcap *a) { struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; struct fimc_global *fimc = get_fimc_dev(); struct s3c_platform_fimc *pdata; fimc_dbg("%s\n", __func__); if (!ctrl->cam || !ctrl->cam->sd || !ctrl->cap) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } mutex_lock(&ctrl->v4l2_lock); pdata = to_fimc_plat(ctrl->dev); if (!ctrl->cam) ctrl->cam = fimc->camera[pdata->default_cam]; if (!cap) { cap = kzalloc(sizeof(*cap), GFP_KERNEL); if (!cap) { fimc_err("%s: no memory for " "capture device info\n", __func__); return -ENOMEM; } /* assign to ctrl */ ctrl->cap = cap; } /* crop limitations */ cap->cropcap.bounds.left = 0; cap->cropcap.bounds.top = 0; cap->cropcap.bounds.width = ctrl->cam->width; cap->cropcap.bounds.height = ctrl->cam->height; /* crop default values */ cap->cropcap.defrect.left = 0; cap->cropcap.defrect.top = 0; cap->cropcap.defrect.width = ctrl->cam->width; cap->cropcap.defrect.height = ctrl->cam->height; a->bounds = cap->cropcap.bounds; a->defrect = cap->cropcap.defrect; mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_g_crop_capture(void *fh, struct v4l2_crop *a) { struct fimc_control *ctrl = fh; fimc_dbg("%s\n", __func__); if (!ctrl->cap) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } mutex_lock(&ctrl->v4l2_lock); a->c = ctrl->cap->crop; mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_s_crop_capture(void *fh, struct v4l2_crop *a) { struct fimc_control *ctrl = fh; fimc_dbg("%s\n", __func__); mutex_lock(&ctrl->v4l2_lock); ctrl->cap->crop = a->c; mutex_unlock(&ctrl->v4l2_lock); return 0; } int fimc_start_capture(struct fimc_control *ctrl) { fimc_dbg("%s\n", __func__); fimc_reset_status_reg(ctrl); if (!ctrl->sc.bypass) fimc_hwset_start_scaler(ctrl); fimc_hwset_enable_capture(ctrl, ctrl->sc.bypass); fimc_hwset_disable_frame_end_irq(ctrl); return 0; } int fimc_stop_capture(struct fimc_control *ctrl) { fimc_dbg("%s\n", __func__); if (!ctrl->cam) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } if (!ctrl->cap) { fimc_err("%s: No cappure format.\n", __func__); return -ENODEV; } if (ctrl->cap->lastirq) { fimc_hwset_enable_lastirq(ctrl); fimc_hwset_disable_capture(ctrl); fimc_hwset_disable_lastirq(ctrl); } else { fimc_hwset_disable_capture(ctrl); fimc_hwset_enable_frame_end_irq(ctrl); } fimc_hwset_stop_scaler(ctrl); return 0; } static int fimc_check_capture_source(struct fimc_control *ctrl) { if (!ctrl->cam) return -ENODEV; if (ctrl->cam->sd || ctrl->is.sd || !ctrl->flite_sd) return 0; if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) return 0; return -ENODEV; } static int is_scale_up(struct fimc_control *ctrl) { struct v4l2_mbus_framefmt *mbus_fmt = &ctrl->cap->mbus_fmt; struct v4l2_pix_format *pix = &ctrl->cap->fmt; if (!mbus_fmt->width) { fimc_err("%s: sensor resolution isn't selected.\n", __func__); return -EINVAL; } if (ctrl->cap->rotate == 90 || ctrl->cap->rotate == 270) { if (pix->width > mbus_fmt->height || pix->height > mbus_fmt->width) { fimc_err("%s: ScaleUp isn't supported.\n", __func__); return -EINVAL; } } else { if (pix->width > mbus_fmt->width || pix->height > mbus_fmt->height) { fimc_err("%s: ScaleUp isn't supported.\n", __func__); return -EINVAL; } } return 0; } int fimc_streamon_capture(void *fh) { struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; struct v4l2_frmsizeenum cam_frmsize; struct v4l2_control is_ctrl; int rot = 0, i; int ret = 0; struct s3c_platform_camera *cam = NULL; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); fimc_dbg("%s\n", __func__); cam_frmsize.discrete.width = 0; cam_frmsize.discrete.height = 0; is_ctrl.id = 0; is_ctrl.value = 0; if (!ctrl->cam) { fimc_err("%s: ctrl->cam is null\n", __func__); return -EINVAL; } else { cam = ctrl->cam; } if (fimc_check_capture_source(ctrl)) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } if (cam->sd) { if (is_scale_up(ctrl)) 1; // return -EINVAL; /* rapheal 2012.06.27*/ } if (pdata->hw_ver < 0x51) fimc_hw_reset_camera(ctrl); #if (!defined(CONFIG_EXYNOS_DEV_PD) && !defined(CONFIG_PM_RUNTIME)) ctrl->status = FIMC_READY_ON; #endif cap->irq = 0; fimc_hwset_enable_irq(ctrl, 0, 1); if ((cam->id != CAMERA_WB) && (cam->id != CAMERA_WB_B)) { if (fimc_cam_use && cam->sd) { ret = v4l2_subdev_call(cam->sd, video, enum_framesizes, &cam_frmsize); if (ret < 0) { dev_err(ctrl->dev, "%s: enum_framesizes failed\n", __func__); if (ret != -ENOIOCTLCMD) return ret; } else { if (cam_frmsize.discrete.width > 0 && cam_frmsize.discrete.height > 0) { cam->window.left = 0; cam->window.top = 0; cam->width = cam->window.width = cam_frmsize.discrete.width; cam->height = cam->window.height = cam_frmsize.discrete.height; fimc_info2("enum_framesizes width = %d,\ height = %d\n", cam->width, cam->height); } } if (cap->fmt.priv == V4L2_PIX_FMT_MODE_CAPTURE) { ret = v4l2_subdev_call(cam->sd, video, s_stream, 1); if (ret < 0) { dev_err(ctrl->dev, "%s: s_stream failed\n", __func__); return ret; } } if (cam->type == CAM_TYPE_MIPI) { if(cam->id == CAMERA_CSI_C) { s3c_csis_enable_pktdata(CSI_CH_0, cap->pktdata_enable); s3c_csis_start(CSI_CH_0, cam->mipi_lanes, cam->mipi_settle, cam->mipi_align, cam->width, cam->height, cap->fmt.pixelformat); } else { s3c_csis_enable_pktdata(CSI_CH_1, cap->pktdata_enable); s3c_csis_start(CSI_CH_1, cam->mipi_lanes, cam->mipi_settle, cam->mipi_align, cam->width, cam->height, cap->fmt.pixelformat); } } if (cap->fmt.priv != V4L2_PIX_FMT_MODE_CAPTURE) { ret = v4l2_subdev_call(cam->sd, video, s_stream, 1); if (ret < 0) { dev_err(ctrl->dev, "%s: s_stream failed\n", __func__); if (cam->id == CAMERA_CSI_C) s3c_csis_stop(CSI_CH_0); else s3c_csis_stop(CSI_CH_1); return ret; } } } } /* Set FIMD to write back */ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) { if (cam->id == CAMERA_WB) fimc_hwset_sysreg_camblk_fimd0_wb(ctrl); else fimc_hwset_sysreg_camblk_fimd1_wb(ctrl); ret = s3cfb_direct_ioctl(0, S3CFB_SET_WRITEBACK, 1); if (ret) { fimc_err("failed set writeback\n"); return ret; } } if (ctrl->cam->use_isp) { struct platform_device *pdev = to_platform_device(ctrl->dev); struct clk *pxl_async = NULL; is_ctrl.id = V4L2_CID_IS_GET_SENSOR_OFFSET_X; is_ctrl.value = 0; v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl); ctrl->is.offset_x = is_ctrl.value; is_ctrl.id = V4L2_CID_IS_GET_SENSOR_OFFSET_Y; is_ctrl.value = 0; v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl); ctrl->is.offset_y = is_ctrl.value; fimc_dbg("CSI setting width = %d, height = %d\n", ctrl->is.fmt.width + ctrl->is.offset_x, ctrl->is.fmt.height + ctrl->is.offset_y); if (ctrl->flite_sd && fimc_cam_use) { ctrl->is.mbus_fmt.width += ctrl->is.offset_x; ctrl->is.mbus_fmt.height += ctrl->is.offset_y; ret = v4l2_subdev_call(ctrl->flite_sd, video, s_mbus_fmt, &ctrl->is.mbus_fmt); } if (cam->id == CAMERA_CSI_C) { s3c_csis_start(CSI_CH_0, cam->mipi_lanes, cam->mipi_settle, cam->mipi_align, ctrl->is.fmt.width + ctrl->is.offset_x, ctrl->is.fmt.height + ctrl->is.offset_y, V4L2_PIX_FMT_SGRBG10); } else if (cam->id == CAMERA_CSI_D) { s3c_csis_start(CSI_CH_1, cam->mipi_lanes, cam->mipi_settle, cam->mipi_align, ctrl->is.fmt.width + ctrl->is.offset_x, ctrl->is.fmt.height + ctrl->is.offset_y, V4L2_PIX_FMT_SGRBG10); } pxl_async = clk_get(&pdev->dev, "pxl_async1"); if (IS_ERR(pxl_async)) { dev_err(&pdev->dev, "failed to get pxl_async\n"); return -ENODEV; } clk_enable(pxl_async); clk_put(pxl_async); fimc_hwset_sysreg_camblk_isp_wb(ctrl); } if (ctrl->flite_sd && fimc_cam_use) v4l2_subdev_call(ctrl->flite_sd, video, s_stream, 1); fimc_hwset_camera_type(ctrl); fimc_hwset_camera_polarity(ctrl); fimc_hwset_enable_lastend(ctrl); if (cap->fmt.pixelformat != V4L2_PIX_FMT_JPEG) { fimc_hwset_camera_source(ctrl); fimc_hwset_camera_offset(ctrl); fimc_capture_scaler_info(ctrl); fimc_hwset_prescaler(ctrl, &ctrl->sc); fimc_hwset_scaler(ctrl, &ctrl->sc); fimc_hwset_output_colorspace(ctrl, cap->fmt.pixelformat); fimc_hwset_output_addr_style(ctrl, cap->fmt.pixelformat); if (cap->fmt.pixelformat == V4L2_PIX_FMT_RGB32 || cap->fmt.pixelformat == V4L2_PIX_FMT_RGB565) fimc_hwset_output_rgb(ctrl, cap->fmt.pixelformat); else fimc_hwset_output_yuv(ctrl, cap->fmt.pixelformat); fimc_hwset_output_area(ctrl, cap->fmt.width, cap->fmt.height); fimc_hwset_output_scan(ctrl, &cap->fmt); fimc_hwset_output_rot_flip(ctrl, cap->rotate, cap->flip); rot = fimc_mapping_rot_flip(cap->rotate, cap->flip); if (rot & FIMC_ROT) { fimc_hwset_org_output_size(ctrl, cap->fmt.width, cap->fmt.height); fimc_hwset_output_size(ctrl, cap->fmt.height, cap->fmt.width); } else { fimc_hwset_org_output_size(ctrl, cap->fmt.width, cap->fmt.height); fimc_hwset_output_size(ctrl, cap->fmt.width, cap->fmt.height); } fimc_hwset_jpeg_mode(ctrl, false); } else { fimc_hwset_output_size(ctrl, cap->fmt.width, cap->fmt.height); if (rot & FIMC_ROT) fimc_hwset_org_output_size(ctrl, cap->fmt.height, cap->fmt.width); else fimc_hwset_org_output_size(ctrl, cap->fmt.width, cap->fmt.height); fimc_hwset_output_area_size(ctrl, fimc_camera_get_jpeg_memsize(ctrl)); fimc_hwset_jpeg_mode(ctrl, true); } if (pdata->hw_ver >= 0x51) { for (i = 0; i < cap->nr_bufs; i++) fimc_hwset_output_address(ctrl, &cap->bufs[i], i); } else { for (i = 0; i < FIMC_PINGPONG; i++) fimc_add_outqueue(ctrl, i); } if (ctrl->cap->fmt.colorspace == V4L2_COLORSPACE_JPEG) fimc_hwset_scaler_bypass(ctrl); fimc_start_capture(ctrl); ctrl->status = FIMC_STREAMON; if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, video, s_stream, 1); fimc_info1("%s-- fimc%d\n", __func__, ctrl->id); /* if available buffer did not remained */ return 0; } int fimc_streamoff_capture(void *fh) { struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); int ret = 0; if (fimc_check_capture_source(ctrl)) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } ctrl->status = FIMC_READY_OFF; fimc_stop_capture(ctrl); /* wait for stop hardware */ fimc_wait_disable_capture(ctrl); fimc_hwset_disable_irq(ctrl); if (pdata->hw_ver < 0x51) INIT_LIST_HEAD(&cap->inq); ctrl->status = FIMC_STREAMOFF; if (fimc_cam_use) { if (ctrl->cam->use_isp) v4l2_subdev_call(ctrl->is.sd, video, s_stream, 0); if (ctrl->flite_sd) v4l2_subdev_call(ctrl->flite_sd, video, s_stream, 0); if (ctrl->cam->sd) v4l2_subdev_call(ctrl->cam->sd, video, s_stream, 0); if (ctrl->cam->type == CAM_TYPE_MIPI) { if (ctrl->cam->id == CAMERA_CSI_C) s3c_csis_stop(CSI_CH_0); else s3c_csis_stop(CSI_CH_1); } fimc_hwset_reset(ctrl); } else { fimc_hwset_reset(ctrl); } /* Set FIMD to write back */ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) { ret = s3cfb_direct_ioctl(0, S3CFB_SET_WRITEBACK, 0); if (ret) { fimc_err("failed set writeback\n"); return ret; } } /* disable camera power */ /* cam power off should call in the subdev release function */ if (fimc_cam_use) { if (ctrl->cam->reset_camera) { if (ctrl->cam->cam_power) ctrl->cam->cam_power(0); if (ctrl->power_status != FIMC_POWER_SUSPEND) ctrl->cam->initialized = 0; } } fimc_info1("%s -- fimc%d\n", __func__, ctrl->id); return 0; } int fimc_is_set_zoom(struct fimc_control *ctrl, struct v4l2_control *c) { struct v4l2_control is_ctrl; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); struct s3c_platform_camera *cam = NULL; int ret = 0; is_ctrl.id = 0; is_ctrl.value = 0; if (ctrl->cam) cam = ctrl->cam; else return -ENODEV; /* 0. Check zoom width and height */ // if (!c->value) { // ctrl->is.zoom_in_width = ctrl->is.fmt.width; // ctrl->is.zoom_in_height = ctrl->is.fmt.height; // } else { // ctrl->is.zoom_in_width = ctrl->is.fmt.width - (16 * c->value); // ctrl->is.zoom_in_height = // (ctrl->is.zoom_in_width * ctrl->is.fmt.height) // / ctrl->is.fmt.width; // /* bayer crop contraint */ // switch (ctrl->is.zoom_in_height%4) { // case 1: // ctrl->is.zoom_in_height--; // break; // case 2: // ctrl->is.zoom_in_height += 2; // break; // case 3: // ctrl->is.zoom_in_height++; // break; // } // if ((ctrl->is.zoom_in_width < (ctrl->is.fmt.width/4)) // || (ctrl->is.zoom_in_height < (ctrl->is.fmt.height/4))) { // ctrl->is.zoom_in_width = ctrl->is.fmt.width/4; // ctrl->is.zoom_in_height = ctrl->is.fmt.height/4; // } // } /* 1. fimc stop */ fimc_stop_zoom_capture(ctrl); /* 2. Set zoom and calculate new width and height */ if (ctrl->cam->use_isp) { ret = v4l2_subdev_call(ctrl->is.sd, core, s_ctrl, c); /* 2. Set zoom */ is_ctrl.id = V4L2_CID_IS_ZOOM_STATE; is_ctrl.value = 0; while (!is_ctrl.value) { v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl); fimc_dbg("V4L2_CID_IS_ZOOM_STATE - %d", is_ctrl.value); } } /* 2. Change soruce size of FIMC */ fimc_hwset_camera_change_source(ctrl); fimc_capture_change_scaler_info(ctrl); fimc_hwset_prescaler(ctrl, &ctrl->sc); fimc_hwset_scaler(ctrl, &ctrl->sc); /* 4. Start FIMC */ fimc_start_zoom_capture(ctrl); /* 5. FIMC-IS stream on */ if (ctrl->cam->use_isp) ret = v4l2_subdev_call(ctrl->is.sd, video, s_stream, 1); return 0; } static void fimc_buf2bs(struct fimc_buf_set *bs, struct fimc_buf *buf) { bs->base[FIMC_ADDR_Y] = buf->base[FIMC_ADDR_Y]; bs->length[FIMC_ADDR_Y] = buf->length[FIMC_ADDR_Y]; bs->base[FIMC_ADDR_CB] = buf->base[FIMC_ADDR_CB]; bs->length[FIMC_ADDR_CB] = buf->length[FIMC_ADDR_CB]; bs->base[FIMC_ADDR_CR] = buf->base[FIMC_ADDR_CR]; bs->length[FIMC_ADDR_CR] = buf->length[FIMC_ADDR_CR]; } int fimc_qbuf_capture(void *fh, struct v4l2_buffer *b) { struct fimc_control *ctrl = fh; struct fimc_buf *buf = (struct fimc_buf *)b->m.userptr; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); struct fimc_capinfo *cap = ctrl->cap; int idx = b->index; int framecnt_seq; int available_bufnum; size_t length = 0; int i; if (!cap || !ctrl->cam) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } mutex_lock(&ctrl->v4l2_lock); if (pdata->hw_ver >= 0x51) { if (cap->bufs[idx].state != VIDEOBUF_IDLE) { fimc_err("%s: invalid state idx : %d\n", __func__, idx); mutex_unlock(&ctrl->v4l2_lock); return -EINVAL; } else { if (b->memory == V4L2_MEMORY_USERPTR) { fimc_buf2bs(&cap->bufs[idx], buf); fimc_hwset_output_address(ctrl, &cap->bufs[idx], idx); } fimc_hwset_output_buf_sequence(ctrl, idx, FIMC_FRAMECNT_SEQ_ENABLE); cap->bufs[idx].state = VIDEOBUF_QUEUED; if (ctrl->status == FIMC_BUFFER_STOP) { framecnt_seq = fimc_hwget_output_buf_sequence(ctrl); available_bufnum = fimc_hwget_number_of_bits(framecnt_seq); if (available_bufnum >= 2) { fimc_start_capture(ctrl); ctrl->status = FIMC_STREAMON; ctrl->restart = true; } } } } else { fimc_add_inqueue(ctrl, b->index); } mutex_unlock(&ctrl->v4l2_lock); if (!cap->cacheable) return 0; for (i = 0; i < 3; i++) { if (cap->bufs[b->index].base[i]) length += cap->bufs[b->index].length[i]; else break; } if (length > (unsigned long) L2_FLUSH_ALL) { flush_cache_all(); /* L1 */ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1); outer_flush_all(); /* L2 */ } else if (length > (unsigned long) L1_FLUSH_ALL) { flush_cache_all(); /* L1 */ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1); for (i = 0; i < 3; i++) { phys_addr_t start = cap->bufs[b->index].base[i]; phys_addr_t end = cap->bufs[b->index].base[i] + cap->bufs[b->index].length[i] - 1; if (!start) break; outer_flush_range(start, end); /* L2 */ } } else { for (i = 0; i < 3; i++) { phys_addr_t start = cap->bufs[b->index].base[i]; phys_addr_t end = cap->bufs[b->index].base[i] + cap->bufs[b->index].length[i] - 1; if (!start) break; dmac_flush_range(phys_to_virt(start), phys_to_virt(end)); outer_flush_range(start, end); /* L2 */ } } return 0; } static void fimc_bs2buf(struct fimc_buf *buf, struct fimc_buf_set *bs) { buf->base[FIMC_ADDR_Y] = bs->base[FIMC_ADDR_Y]; buf->length[FIMC_ADDR_Y] = bs->length[FIMC_ADDR_Y]; buf->base[FIMC_ADDR_CB] = bs->base[FIMC_ADDR_CB]; buf->length[FIMC_ADDR_CB] = bs->length[FIMC_ADDR_CB]; buf->base[FIMC_ADDR_CR] = bs->base[FIMC_ADDR_CR]; buf->length[FIMC_ADDR_CR] = bs->length[FIMC_ADDR_CR]; } int fimc_dqbuf_capture(void *fh, struct v4l2_buffer *b) { unsigned long spin_flags; struct fimc_control *ctrl = fh; struct fimc_capinfo *cap = ctrl->cap; struct fimc_buf_set *bs; struct fimc_buf *buf = (struct fimc_buf *)b->m.userptr; int pp, ret = 0; struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev); if (!cap || !ctrl->cam) { fimc_err("%s: No capture device.\n", __func__); return -ENODEV; } if (pdata->hw_ver >= 0x51) { spin_lock_irqsave(&ctrl->outq_lock, spin_flags); if (list_empty(&cap->outgoing_q)) { fimc_info2("%s: outgoing_q is empty\n", __func__); spin_unlock_irqrestore(&ctrl->outq_lock, spin_flags); return -EAGAIN; } else { bs = list_first_entry(&cap->outgoing_q, struct fimc_buf_set, list); fimc_info2("%s[%d]: bs->id : %d\n", __func__, ctrl->id, bs->id); b->index = bs->id; bs->state = VIDEOBUF_IDLE; if (b->memory == V4L2_MEMORY_USERPTR) fimc_bs2buf(buf, bs); list_del(&bs->list); } spin_unlock_irqrestore(&ctrl->outq_lock, spin_flags); } else { pp = ((fimc_hwget_frame_count(ctrl) + 2) % 4); if (cap->fmt.field == V4L2_FIELD_INTERLACED_TB) pp &= ~0x1; b->index = cap->outq[pp]; fimc_info2("%s: buffer(%d) outq[%d]\n", __func__, b->index, pp); ret = fimc_add_outqueue(ctrl, pp); if (ret) { b->index = -1; fimc_err("%s: no inqueue buffer\n", __func__); } } return ret; } int fimc_enum_framesizes(struct file *filp, void *fh, struct v4l2_frmsizeenum *fsize) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int i; u32 index = 0; for (i = 0; i < ARRAY_SIZE(capture_fmts); i++) { if (fsize->pixel_format != capture_fmts[i].pixelformat) continue; if (fsize->index == index) { fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; /* this is camera sensor's width, height. * originally this should be filled each file format */ fsize->discrete.width = ctrl->cam->width; fsize->discrete.height = ctrl->cam->height; return 0; } index++; } return -EINVAL; } int fimc_enum_frameintervals(struct file *filp, void *fh, struct v4l2_frmivalenum *fival) { if (fival->index > 0) return -EINVAL; /* temporary only support 30fps */ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete.numerator = 1000; fival->discrete.denominator = 30000; return 0; } /* * only used at mipi power func. */ struct device *fimc_get_active_device(void) { struct fimc_global *fimc = get_fimc_dev(); struct fimc_control *ctrl; if (!fimc || (fimc->active_camera < 0)) return NULL; ctrl = get_fimc_ctrl(fimc->active_camera); return ctrl->dev; }
gpl-2.0
ciwrl/android_kernel_huawei_msm8939
arch/arm/mach-msm/platsmp.c
273
11683
/* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/regulator/krait-regulator.h> #include <soc/qcom/pm.h> #include <soc/qcom/scm-boot.h> #include <soc/qcom/cpu_pwr_ctl.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> #include <soc/qcom/socinfo.h> #include <mach/hardware.h> #include <mach/msm_iomap.h> #include "platsmp.h" #define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0 #define SCSS_CPU1CORE_RESET 0xD80 #define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64 #define MSM8960_SAW2_BASE_ADDR 0x02089000 #define APCS_ALIAS0_BASE_ADDR 0xF9088000 /* * Write pen_release in a way that is guaranteed to be visible to all * observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ void __cpuinit write_pen_release(int val) { pen_release = val; smp_wmb(); __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); } static DEFINE_SPINLOCK(boot_lock); void __cpuinit msm_secondary_init(unsigned int cpu) { WARN_ON(msm_platform_secondary_init(cpu)); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ write_pen_release(-1); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } static int __cpuinit release_secondary_sim(unsigned long base, unsigned int cpu) { void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K); if (!base_ptr) return -ENODEV; writel_relaxed(0x800, base_ptr+0x04); writel_relaxed(0x3FFF, base_ptr+0x14); mb(); iounmap(base_ptr); return 0; } static int __cpuinit scorpion_release_secondary(void) { void *base_ptr = ioremap_nocache(0x00902000, SZ_4K*2); if (!base_ptr) return -EINVAL; writel_relaxed(0, base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL); dmb(); writel_relaxed(0, base_ptr + SCSS_CPU1CORE_RESET); writel_relaxed(3, base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP); mb(); iounmap(base_ptr); return 0; } static int __cpuinit msm8960_release_secondary(unsigned long base, unsigned int cpu) { void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K); if (!base_ptr) return -ENODEV; writel_relaxed(0x109, base_ptr+0x04); writel_relaxed(0x101, base_ptr+0x04); mb(); ndelay(300); writel_relaxed(0x121, base_ptr+0x04); mb(); udelay(2); writel_relaxed(0x120, base_ptr+0x04); mb(); udelay(2); writel_relaxed(0x100, base_ptr+0x04); mb(); udelay(100); writel_relaxed(0x180, base_ptr+0x04); mb(); iounmap(base_ptr); return 0; } static int __cpuinit msm8974_release_secondary(unsigned long base, unsigned int cpu) { void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K); if (!base_ptr) return -ENODEV; secondary_cpu_hs_init(base_ptr, cpu); writel_relaxed(0x021, base_ptr+0x04); mb(); udelay(2); writel_relaxed(0x020, base_ptr+0x04); mb(); udelay(2); writel_relaxed(0x000, base_ptr+0x04); mb(); writel_relaxed(0x080, base_ptr+0x04); mb(); iounmap(base_ptr); return 0; } static int __cpuinit arm_release_secondary(unsigned long base, unsigned int cpu) { void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K); if (!base_ptr) return -ENODEV; writel_relaxed(0x00000033, base_ptr+0x04); mb(); writel_relaxed(0x10000001, base_ptr+0x14); mb(); udelay(2); writel_relaxed(0x00000031, base_ptr+0x04); mb(); writel_relaxed(0x00000039, base_ptr+0x04); mb(); udelay(2); writel_relaxed(0x00020038, base_ptr+0x04); mb(); udelay(2); writel_relaxed(0x00020008, base_ptr+0x04); mb(); writel_relaxed(0x00020088, base_ptr+0x04); mb(); iounmap(base_ptr); return 0; } static int __cpuinit release_from_pen(unsigned int cpu) { unsigned long timeout; /* Set preset_lpj to avoid subsequent lpj recalculations */ preset_lpj = loops_per_jiffy; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } DEFINE_PER_CPU(int, cold_boot_done); int __cpuinit scorpion_boot_secondary(unsigned int cpu, struct task_struct *idle) { pr_debug("Starting secondary CPU %d\n", cpu); if (per_cpu(cold_boot_done, cpu) == false) { scorpion_release_secondary(); per_cpu(cold_boot_done, cpu) = true; } return release_from_pen(cpu); } int __cpuinit msm8960_boot_secondary(unsigned int cpu, struct task_struct *idle) { pr_debug("Starting secondary CPU %d\n", cpu); if (per_cpu(cold_boot_done, cpu) == false) { msm8960_release_secondary(0x02088000, cpu); per_cpu(cold_boot_done, cpu) = true; } return release_from_pen(cpu); } int __cpuinit msm8974_boot_secondary(unsigned int cpu, struct task_struct *idle) { pr_debug("Starting secondary CPU %d\n", cpu); if (per_cpu(cold_boot_done, cpu) == false) { if (of_board_is_sim()) release_secondary_sim(APCS_ALIAS0_BASE_ADDR, cpu); else if (!of_board_is_rumi()) msm8974_release_secondary(APCS_ALIAS0_BASE_ADDR, cpu); per_cpu(cold_boot_done, cpu) = true; } return release_from_pen(cpu); } static int __cpuinit msm8916_boot_secondary(unsigned int cpu, struct task_struct *idle) { pr_debug("Starting secondary CPU %d\n", cpu); if (per_cpu(cold_boot_done, cpu) == false) { if (of_board_is_sim()) release_secondary_sim(0xb088000, cpu); else if (!of_board_is_rumi()) arm_release_secondary(0xb088000, cpu); per_cpu(cold_boot_done, cpu) = true; } return release_from_pen(cpu); } static int __cpuinit msm8936_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret = 0; pr_debug("Starting secondary CPU %d\n", cpu); if (per_cpu(cold_boot_done, cpu) == false) { if (of_board_is_sim()) { ret = msm_unclamp_secondary_arm_cpu_sim(cpu); if (ret) return ret; } else if (!of_board_is_rumi()) { ret = msm_unclamp_secondary_arm_cpu(cpu); if (ret) return ret; } per_cpu(cold_boot_done, cpu) = true; } return release_from_pen(cpu); } int __cpuinit arm_boot_secondary(unsigned int cpu, struct task_struct *idle) { pr_debug("Starting secondary CPU %d\n", cpu); if (per_cpu(cold_boot_done, cpu) == false) { if (of_board_is_sim()) release_secondary_sim(APCS_ALIAS0_BASE_ADDR, cpu); else if (!of_board_is_rumi()) arm_release_secondary(APCS_ALIAS0_BASE_ADDR, cpu); per_cpu(cold_boot_done, cpu) = true; } return release_from_pen(cpu); } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init msm_smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); } static void __init arm_smp_init_cpus(void) { unsigned int i, ncores; ncores = (__raw_readl(MSM_APCS_GCC_BASE + 0x30)) & 0xF; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); } static int cold_boot_flags[] __initdata = { 0, SCM_FLAG_COLDBOOT_CPU1, SCM_FLAG_COLDBOOT_CPU2, SCM_FLAG_COLDBOOT_CPU3, }; static void __init msm_platform_smp_prepare_cpus_mc(unsigned int max_cpus) { int cpu, map; u32 aff0_mask = 0; u32 aff1_mask = 0; u32 aff2_mask = 0; for_each_present_cpu(cpu) { map = cpu_logical_map(cpu); aff0_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 0)); aff1_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 1)); aff2_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 2)); } if (scm_set_boot_addr_mc(virt_to_phys(msm_secondary_startup), aff0_mask, aff1_mask, aff2_mask, SCM_FLAG_COLDBOOT_MC)) pr_warn("Failed to set CPU boot address\n"); } static void __init msm_platform_smp_prepare_cpus(unsigned int max_cpus) { int cpu, map; unsigned int flags = 0; if (scm_is_mc_boot_available()) return msm_platform_smp_prepare_cpus_mc(max_cpus); for_each_present_cpu(cpu) { map = cpu_logical_map(cpu); if (map > ARRAY_SIZE(cold_boot_flags)) { set_cpu_present(cpu, false); __WARN(); continue; } flags |= cold_boot_flags[map]; } if (scm_set_boot_addr(virt_to_phys(msm_secondary_startup), flags)) pr_warn("Failed to set CPU boot address\n"); } int msm_cpu_disable(unsigned int cpu) { return 0; /* support hotplugging any cpu */ } struct smp_operations arm_smp_ops __initdata = { .smp_init_cpus = arm_smp_init_cpus, .smp_prepare_cpus = msm_platform_smp_prepare_cpus, .smp_secondary_init = msm_secondary_init, .smp_boot_secondary = arm_boot_secondary, #ifdef CONFIG_HOTPLUG .cpu_die = msm_cpu_die, .cpu_kill = msm_cpu_kill, #endif }; struct smp_operations msm8916_smp_ops __initdata = { .smp_init_cpus = arm_smp_init_cpus, .smp_prepare_cpus = msm_platform_smp_prepare_cpus, .smp_secondary_init = msm_secondary_init, .smp_boot_secondary = msm8916_boot_secondary, #ifdef CONFIG_HOTPLUG .cpu_die = msm_cpu_die, .cpu_kill = msm_cpu_kill, #endif }; struct smp_operations msm8936_smp_ops __initdata = { .smp_init_cpus = arm_smp_init_cpus, .smp_prepare_cpus = msm_platform_smp_prepare_cpus, .smp_secondary_init = msm_secondary_init, .smp_boot_secondary = msm8936_boot_secondary, #ifdef CONFIG_HOTPLUG .cpu_die = msm_cpu_die, .cpu_kill = msm_cpu_kill, .cpu_disable = msm_cpu_disable, #endif }; struct smp_operations msm8974_smp_ops __initdata = { .smp_init_cpus = msm_smp_init_cpus, .smp_prepare_cpus = msm_platform_smp_prepare_cpus, .smp_secondary_init = msm_secondary_init, .smp_boot_secondary = msm8974_boot_secondary, #ifdef CONFIG_HOTPLUG .cpu_die = msm_cpu_die, .cpu_kill = msm_cpu_kill, #endif }; struct smp_operations msm8960_smp_ops __initdata = { .smp_init_cpus = msm_smp_init_cpus, .smp_prepare_cpus = msm_platform_smp_prepare_cpus, .smp_secondary_init = msm_secondary_init, .smp_boot_secondary = msm8960_boot_secondary, #ifdef CONFIG_HOTPLUG .cpu_die = msm_cpu_die, .cpu_kill = msm_cpu_kill, #endif }; struct smp_operations scorpion_smp_ops __initdata = { .smp_init_cpus = msm_smp_init_cpus, .smp_prepare_cpus = msm_platform_smp_prepare_cpus, .smp_secondary_init = msm_secondary_init, .smp_boot_secondary = scorpion_boot_secondary, #ifdef CONFIG_HOTPLUG .cpu_die = msm_cpu_die, .cpu_kill = msm_cpu_kill, #endif };
gpl-2.0
TheWhisp/android_kernel_samsung_msm8916-caf
drivers/acpi/sleep.c
1553
20706
/* * sleep.c - ACPI sleep support. * * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2000-2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab * * This file is released under the GPLv2. * */ #include <linux/delay.h> #include <linux/irq.h> #include <linux/dmi.h> #include <linux/device.h> #include <linux/suspend.h> #include <linux/reboot.h> #include <linux/acpi.h> #include <linux/module.h> #include <asm/io.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include "internal.h" #include "sleep.h" static u8 sleep_states[ACPI_S_STATE_COUNT]; static void acpi_sleep_tts_switch(u32 acpi_state) { union acpi_object in_arg = { ACPI_TYPE_INTEGER }; struct acpi_object_list arg_list = { 1, &in_arg }; acpi_status status = AE_OK; in_arg.integer.value = acpi_state; status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { /* * OS can't evaluate the _TTS object correctly. Some warning * message will be printed. But it won't break anything. */ printk(KERN_NOTICE "Failure in evaluating _TTS object\n"); } } static int tts_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { acpi_sleep_tts_switch(ACPI_STATE_S5); return NOTIFY_DONE; } static struct notifier_block tts_notifier = { .notifier_call = tts_notify_reboot, .next = NULL, .priority = 0, }; static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { if (!acpi_wakeup_address) return -EFAULT; acpi_set_firmware_waking_vector(acpi_wakeup_address); } ACPI_FLUSH_CPU_CACHE(); #endif printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", acpi_state); acpi_enable_wakeup_devices(acpi_state); acpi_enter_sleep_state_prep(acpi_state); return 0; } static bool acpi_sleep_state_supported(u8 sleep_state) { acpi_status status; u8 type_a, type_b; status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware || (acpi_gbl_FADT.sleep_control.address && acpi_gbl_FADT.sleep_status.address)); } #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; u32 acpi_target_system_state(void) { return acpi_target_sleep_state; } static bool pwr_btn_event_pending; /* * The ACPI specification wants us to save NVS memory regions during hibernation * and to restore them during the subsequent resume. Windows does that also for * suspend to RAM. However, it is known that this mechanism does not work on * all machines, so we allow the user to disable it with the help of the * 'acpi_sleep=nonvs' kernel command line option. */ static bool nvs_nosave; void __init acpi_nvs_nosave(void) { nvs_nosave = true; } /* * The ACPI specification wants us to save NVS memory regions during hibernation * but says nothing about saving NVS during S3. Not all versions of Windows * save NVS on S3 suspend either, and it is clear that not all systems need * NVS to be saved at S3 time. To improve suspend/resume time, allow the * user to disable saving NVS on S3 if their system does not require it, but * continue to save/restore NVS for S4 as specified. */ static bool nvs_nosave_s3; void __init acpi_nvs_nosave_s3(void) { nvs_nosave_s3 = true; } /* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the * user to request that behavior by using the 'acpi_old_suspend_ordering' * kernel command line option that causes the following variable to be set. */ static bool old_suspend_ordering; void __init acpi_old_suspend_ordering(void) { old_suspend_ordering = true; } static int __init init_old_suspend_ordering(const struct dmi_system_id *d) { acpi_old_suspend_ordering(); return 0; } static int __init init_nvs_nosave(const struct dmi_system_id *d) { acpi_nvs_nosave(); return 0; } static struct dmi_system_id __initdata acpisleep_dmi_table[] = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), }, }, { .callback = init_old_suspend_ordering, .ident = "HP xw4600 Workstation", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), }, }, { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Matsushita Electric Industrial Co.,Ltd."), DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW41E_H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW21E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW21M", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB17FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR11M", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), }, }, { .callback = init_nvs_nosave, .ident = "Everex StepNote Series", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB1Z1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-NW130D", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCCW29FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), }, }, { .callback = init_nvs_nosave, .ident = "Averatec AV1020-ED2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus A8N-SLI DELUXE", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus A8N-SLI Premium", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR26GN_P", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB1S1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW520F", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), }, }, { .callback = init_nvs_nosave, .ident = "Asus K54C", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), }, }, { .callback = init_nvs_nosave, .ident = "Asus K54HR", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), }, }, {}, }; static void acpi_sleep_dmi_check(void) { dmi_check_system(acpisleep_dmi_table); } /** * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. */ static int acpi_pm_freeze(void) { acpi_disable_all_gpes(); acpi_os_wait_events_complete(); acpi_ec_block_transactions(); return 0; } /** * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. */ static int acpi_pm_pre_suspend(void) { acpi_pm_freeze(); return suspend_nvs_save(); } /** * __acpi_pm_prepare - Prepare the platform to enter the target state. * * If necessary, set the firmware waking vector and do arch-specific * nastiness to get the wakeup code to the waking vector. */ static int __acpi_pm_prepare(void) { int error = acpi_sleep_prepare(acpi_target_sleep_state); if (error) acpi_target_sleep_state = ACPI_STATE_S0; return error; } /** * acpi_pm_prepare - Prepare the platform to enter the target sleep * state and disable the GPEs. */ static int acpi_pm_prepare(void) { int error = __acpi_pm_prepare(); if (!error) error = acpi_pm_pre_suspend(); return error; } static int find_powerf_dev(struct device *dev, void *data) { struct acpi_device *device = to_acpi_device(dev); const char *hid = acpi_device_hid(device); return !strcmp(hid, ACPI_BUTTON_HID_POWERF); } /** * acpi_pm_finish - Instruct the platform to leave a sleep state. * * This is called after we wake back up (or if entering the sleep state * failed). */ static void acpi_pm_finish(void) { struct device *pwr_btn_dev; u32 acpi_state = acpi_target_sleep_state; acpi_ec_unblock_transactions(); suspend_nvs_free(); if (acpi_state == ACPI_STATE_S0) return; printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", acpi_state); acpi_disable_wakeup_devices(acpi_state); acpi_leave_sleep_state(acpi_state); /* reset firmware waking vector */ acpi_set_firmware_waking_vector((acpi_physical_address) 0); acpi_target_sleep_state = ACPI_STATE_S0; acpi_resume_power_resources(); /* If we were woken with the fixed power button, provide a small * hint to userspace in the form of a wakeup event on the fixed power * button device (if it can be found). * * We delay the event generation til now, as the PM layer requires * timekeeping to be running before we generate events. */ if (!pwr_btn_event_pending) return; pwr_btn_event_pending = false; pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, find_powerf_dev); if (pwr_btn_dev) { pm_wakeup_event(pwr_btn_dev, 0); put_device(pwr_btn_dev); } } /** * acpi_pm_end - Finish up suspend sequence. */ static void acpi_pm_end(void) { /* * This is necessary in case acpi_pm_finish() is not called during a * failing transition to a sleep state. */ acpi_target_sleep_state = ACPI_STATE_S0; acpi_sleep_tts_switch(acpi_target_sleep_state); } #else /* !CONFIG_ACPI_SLEEP */ #define acpi_target_sleep_state ACPI_STATE_S0 static inline void acpi_sleep_dmi_check(void) {} #endif /* CONFIG_ACPI_SLEEP */ #ifdef CONFIG_SUSPEND static u32 acpi_suspend_states[] = { [PM_SUSPEND_ON] = ACPI_STATE_S0, [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, [PM_SUSPEND_MEM] = ACPI_STATE_S3, [PM_SUSPEND_MAX] = ACPI_STATE_S5 }; /** * acpi_suspend_begin - Set the target system sleep state to the state * associated with given @pm_state, if supported. */ static int acpi_suspend_begin(suspend_state_t pm_state) { u32 acpi_state = acpi_suspend_states[pm_state]; int error = 0; error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); if (error) return error; if (sleep_states[acpi_state]) { acpi_target_sleep_state = acpi_state; acpi_sleep_tts_switch(acpi_target_sleep_state); } else { printk(KERN_ERR "ACPI does not support this state: %d\n", pm_state); error = -ENOSYS; } return error; } /** * acpi_suspend_enter - Actually enter a sleep state. * @pm_state: ignored * * Flush caches and go to sleep. For STR we have to call arch-specific * assembly, which in turn call acpi_enter_sleep_state(). * It's unfortunate, but it works. Please fix if you're feeling frisky. */ static int acpi_suspend_enter(suspend_state_t pm_state) { acpi_status status = AE_OK; u32 acpi_state = acpi_target_sleep_state; int error; ACPI_FLUSH_CPU_CACHE(); switch (acpi_state) { case ACPI_STATE_S1: barrier(); status = acpi_enter_sleep_state(acpi_state); break; case ACPI_STATE_S3: error = acpi_suspend_lowlevel(); if (error) return error; pr_info(PREFIX "Low-level resume complete\n"); break; } /* This violates the spec but is required for bug compatibility. */ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); /* Reprogram control registers */ acpi_leave_sleep_state_prep(acpi_state); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the * POWER_BUTTON event should not reach userspace ] * * However, we do generate a small hint for userspace in the form of * a wakeup event. We flag this condition for now and generate the * event later, as we're currently too early in resume to be able to * generate wakeup events. */ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { acpi_event_status pwr_btn_status; acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { acpi_clear_event(ACPI_EVENT_POWER_BUTTON); /* Flag for later */ pwr_btn_event_pending = true; } } /* * Disable and clear GPE status before interrupt is enabled. Some GPEs * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. * acpi_leave_sleep_state will reenable specific GPEs later */ acpi_disable_all_gpes(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions_early(); suspend_nvs_restore(); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static int acpi_suspend_state_valid(suspend_state_t pm_state) { u32 acpi_state; switch (pm_state) { case PM_SUSPEND_ON: case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: acpi_state = acpi_suspend_states[pm_state]; return sleep_states[acpi_state]; default: return 0; } } static const struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, .prepare_late = acpi_pm_prepare, .enter = acpi_suspend_enter, .wake = acpi_pm_finish, .end = acpi_pm_end, }; /** * acpi_suspend_begin_old - Set the target system sleep state to the * state associated with given @pm_state, if supported, and * execute the _PTS control method. This function is used if the * pre-ACPI 2.0 suspend ordering has been requested. */ static int acpi_suspend_begin_old(suspend_state_t pm_state) { int error = acpi_suspend_begin(pm_state); if (!error) error = __acpi_pm_prepare(); return error; } /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ static const struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, .prepare_late = acpi_pm_pre_suspend, .enter = acpi_suspend_enter, .wake = acpi_pm_finish, .end = acpi_pm_end, .recover = acpi_pm_finish, }; static void acpi_sleep_suspend_setup(void) { int i; for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) if (acpi_sleep_state_supported(i)) sleep_states[i] = 1; suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); } #else /* !CONFIG_SUSPEND */ static inline void acpi_sleep_suspend_setup(void) {} #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; static bool nosigcheck; void __init acpi_no_s4_hw_signature(void) { nosigcheck = true; } static int acpi_hibernation_begin(void) { int error; error = nvs_nosave ? 0 : suspend_nvs_alloc(); if (!error) { acpi_target_sleep_state = ACPI_STATE_S4; acpi_sleep_tts_switch(acpi_target_sleep_state); } return error; } static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ status = acpi_enter_sleep_state(ACPI_STATE_S4); /* Reprogram control registers */ acpi_leave_sleep_state_prep(ACPI_STATE_S4); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static void acpi_hibernation_leave(void) { /* * If ACPI is not enabled by the BIOS and the boot kernel, we need to * enable it here. */ acpi_enable(); /* Reprogram control registers */ acpi_leave_sleep_state_prep(ACPI_STATE_S4); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) { printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " "cannot resume!\n"); panic("ACPI S4 hardware signature mismatch"); } /* Restore the NVS memory area */ suspend_nvs_restore(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions_early(); } static void acpi_pm_thaw(void) { acpi_ec_unblock_transactions(); acpi_enable_all_runtime_gpes(); } static const struct platform_hibernation_ops acpi_hibernation_ops = { .begin = acpi_hibernation_begin, .end = acpi_pm_end, .pre_snapshot = acpi_pm_prepare, .finish = acpi_pm_finish, .prepare = acpi_pm_prepare, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, .pre_restore = acpi_pm_freeze, .restore_cleanup = acpi_pm_thaw, }; /** * acpi_hibernation_begin_old - Set the target system sleep state to * ACPI_STATE_S4 and execute the _PTS control method. This * function is used if the pre-ACPI 2.0 suspend ordering has been * requested. */ static int acpi_hibernation_begin_old(void) { int error; /* * The _TTS object should always be evaluated before the _PTS object. * When the old_suspended_ordering is true, the _PTS object is * evaluated in the acpi_sleep_prepare. */ acpi_sleep_tts_switch(ACPI_STATE_S4); error = acpi_sleep_prepare(ACPI_STATE_S4); if (!error) { if (!nvs_nosave) error = suspend_nvs_alloc(); if (!error) acpi_target_sleep_state = ACPI_STATE_S4; } return error; } /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { .begin = acpi_hibernation_begin_old, .end = acpi_pm_end, .pre_snapshot = acpi_pm_pre_suspend, .prepare = acpi_pm_freeze, .finish = acpi_pm_finish, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, .pre_restore = acpi_pm_freeze, .restore_cleanup = acpi_pm_thaw, .recover = acpi_pm_finish, }; static void acpi_sleep_hibernate_setup(void) { if (!acpi_sleep_state_supported(ACPI_STATE_S4)) return; hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; if (nosigcheck) return; acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); if (facs) s4_hardware_signature = facs->hardware_signature; } #else /* !CONFIG_HIBERNATION */ static inline void acpi_sleep_hibernate_setup(void) {} #endif /* !CONFIG_HIBERNATION */ int acpi_suspend(u32 acpi_state) { suspend_state_t states[] = { [1] = PM_SUSPEND_STANDBY, [3] = PM_SUSPEND_MEM, [5] = PM_SUSPEND_MAX }; if (acpi_state < 6 && states[acpi_state]) return pm_suspend(states[acpi_state]); if (acpi_state == 4) return hibernate(); return -EINVAL; } static void acpi_power_off_prepare(void) { /* Prepare to power off the system */ acpi_sleep_prepare(ACPI_STATE_S5); acpi_disable_all_gpes(); } static void acpi_power_off(void) { /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); acpi_enter_sleep_state(ACPI_STATE_S5); } int __init acpi_sleep_init(void) { char supported[ACPI_S_STATE_COUNT * 3 + 1]; char *pos = supported; int i; if (acpi_disabled) return 0; acpi_sleep_dmi_check(); sleep_states[ACPI_STATE_S0] = 1; acpi_sleep_suspend_setup(); acpi_sleep_hibernate_setup(); if (acpi_sleep_state_supported(ACPI_STATE_S5)) { sleep_states[ACPI_STATE_S5] = 1; pm_power_off_prepare = acpi_power_off_prepare; pm_power_off = acpi_power_off; } supported[0] = 0; for (i = 0; i < ACPI_S_STATE_COUNT; i++) { if (sleep_states[i]) pos += sprintf(pos, " S%d", i); } pr_info(PREFIX "(supports%s)\n", supported); /* * Register the tts_notifier to reboot notifier list so that the _TTS * object can also be evaluated when the system enters S5. */ register_reboot_notifier(&tts_notifier); return 0; }
gpl-2.0
iTechnoguy/zwkernel
drivers/staging/imx-drm/ipuv3-crtc.c
2065
13790
/* * i.MX IPUv3 Graphics driver * * Copyright (C) 2011 Sascha Hauer, Pengutronix * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/export.h> #include <linux/device.h> #include <linux/platform_device.h> #include <drm/drmP.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <linux/fb.h> #include <linux/clk.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> #include "ipu-v3/imx-ipu-v3.h" #include "imx-drm.h" #define DRIVER_DESC "i.MX IPUv3 Graphics" struct ipu_framebuffer { struct drm_framebuffer base; void *virt; dma_addr_t phys; size_t len; }; struct ipu_crtc { struct drm_fb_helper fb_helper; struct ipu_framebuffer ifb; int num_crtcs; struct device *dev; struct drm_crtc base; struct imx_drm_crtc *imx_crtc; struct ipuv3_channel *ipu_ch; struct ipu_dc *dc; struct ipu_dp *dp; struct dmfc_channel *dmfc; struct ipu_di *di; int enabled; struct ipu_priv *ipu_priv; struct drm_pending_vblank_event *page_flip_event; struct drm_framebuffer *newfb; int irq; u32 interface_pix_fmt; unsigned long di_clkflags; int di_hsync_pin; int di_vsync_pin; }; #define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base) static int calc_vref(struct drm_display_mode *mode) { unsigned long htotal, vtotal; htotal = mode->htotal; vtotal = mode->vtotal; if (!htotal || !vtotal) return 60; return mode->clock * 1000 / vtotal / htotal; } static int calc_bandwidth(struct drm_display_mode *mode, unsigned int vref) { return mode->hdisplay * mode->vdisplay * vref; } static void ipu_fb_enable(struct ipu_crtc *ipu_crtc) { if (ipu_crtc->enabled) return; ipu_di_enable(ipu_crtc->di); ipu_dmfc_enable_channel(ipu_crtc->dmfc); ipu_idmac_enable_channel(ipu_crtc->ipu_ch); ipu_dc_enable_channel(ipu_crtc->dc); if (ipu_crtc->dp) ipu_dp_enable_channel(ipu_crtc->dp); ipu_crtc->enabled = 1; } static void ipu_fb_disable(struct ipu_crtc *ipu_crtc) { if (!ipu_crtc->enabled) return; if (ipu_crtc->dp) ipu_dp_disable_channel(ipu_crtc->dp); ipu_dc_disable_channel(ipu_crtc->dc); ipu_idmac_disable_channel(ipu_crtc->ipu_ch); ipu_dmfc_disable_channel(ipu_crtc->dmfc); ipu_di_disable(ipu_crtc->di); ipu_crtc->enabled = 0; } static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode); switch (mode) { case DRM_MODE_DPMS_ON: ipu_fb_enable(ipu_crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: ipu_fb_disable(ipu_crtc); break; } } static int ipu_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); int ret; if (ipu_crtc->newfb) return -EBUSY; ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); if (ret) { dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n"); list_del(&event->base.link); return ret; } ipu_crtc->newfb = fb; ipu_crtc->page_flip_event = event; return 0; } static const struct drm_crtc_funcs ipu_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .destroy = drm_crtc_cleanup, .page_flip = ipu_page_flip, }; static int ipu_drm_set_base(struct drm_crtc *crtc, int x, int y) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct drm_gem_cma_object *cma_obj; struct drm_framebuffer *fb = crtc->fb; unsigned long phys; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); if (!cma_obj) { DRM_LOG_KMS("entry is null.\n"); return -EFAULT; } phys = cma_obj->paddr; phys += x * (fb->bits_per_pixel >> 3); phys += y * fb->pitches[0]; dev_dbg(ipu_crtc->dev, "%s: phys: 0x%lx\n", __func__, phys); dev_dbg(ipu_crtc->dev, "%s: xy: %dx%d\n", __func__, x, y); ipu_cpmem_set_stride(ipu_get_cpmem(ipu_crtc->ipu_ch), fb->pitches[0]); ipu_cpmem_set_buffer(ipu_get_cpmem(ipu_crtc->ipu_ch), 0, phys); return 0; } static int ipu_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *orig_mode, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct drm_framebuffer *fb = ipu_crtc->base.fb; int ret; struct ipu_di_signal_cfg sig_cfg = {}; u32 out_pixel_fmt; struct ipu_ch_param __iomem *cpmem = ipu_get_cpmem(ipu_crtc->ipu_ch); int bpp; u32 v4l2_fmt; dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__, mode->hdisplay); dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__, mode->vdisplay); ipu_ch_param_zero(cpmem); switch (fb->pixel_format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: v4l2_fmt = V4L2_PIX_FMT_RGB32; bpp = 32; break; case DRM_FORMAT_RGB565: v4l2_fmt = V4L2_PIX_FMT_RGB565; bpp = 16; break; case DRM_FORMAT_RGB888: v4l2_fmt = V4L2_PIX_FMT_RGB24; bpp = 24; break; default: dev_err(ipu_crtc->dev, "unsupported pixel format 0x%08x\n", fb->pixel_format); return -EINVAL; } out_pixel_fmt = ipu_crtc->interface_pix_fmt; if (mode->flags & DRM_MODE_FLAG_INTERLACE) sig_cfg.interlaced = 1; if (mode->flags & DRM_MODE_FLAG_PHSYNC) sig_cfg.Hsync_pol = 1; if (mode->flags & DRM_MODE_FLAG_PVSYNC) sig_cfg.Vsync_pol = 1; sig_cfg.enable_pol = 1; sig_cfg.clk_pol = 0; sig_cfg.width = mode->hdisplay; sig_cfg.height = mode->vdisplay; sig_cfg.pixel_fmt = out_pixel_fmt; sig_cfg.h_start_width = mode->htotal - mode->hsync_end; sig_cfg.h_sync_width = mode->hsync_end - mode->hsync_start; sig_cfg.h_end_width = mode->hsync_start - mode->hdisplay; sig_cfg.v_start_width = mode->vtotal - mode->vsync_end; sig_cfg.v_sync_width = mode->vsync_end - mode->vsync_start; sig_cfg.v_end_width = mode->vsync_start - mode->vdisplay; sig_cfg.pixelclock = mode->clock * 1000; sig_cfg.clkflags = ipu_crtc->di_clkflags; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin; if (ipu_crtc->dp) { ret = ipu_dp_setup_channel(ipu_crtc->dp, IPUV3_COLORSPACE_RGB, IPUV3_COLORSPACE_RGB); if (ret) { dev_err(ipu_crtc->dev, "initializing display processor failed with %d\n", ret); return ret; } ipu_dp_set_global_alpha(ipu_crtc->dp, 1, 0, 1); } ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, sig_cfg.interlaced, out_pixel_fmt, mode->hdisplay); if (ret) { dev_err(ipu_crtc->dev, "initializing display controller failed with %d\n", ret); return ret; } ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg); if (ret) { dev_err(ipu_crtc->dev, "initializing panel failed with %d\n", ret); return ret; } ipu_cpmem_set_resolution(cpmem, mode->hdisplay, mode->vdisplay); ipu_cpmem_set_fmt(cpmem, v4l2_fmt); ipu_cpmem_set_high_priority(ipu_crtc->ipu_ch); ret = ipu_dmfc_init_channel(ipu_crtc->dmfc, mode->hdisplay); if (ret) { dev_err(ipu_crtc->dev, "initializing dmfc channel failed with %d\n", ret); return ret; } ret = ipu_dmfc_alloc_bandwidth(ipu_crtc->dmfc, calc_bandwidth(mode, calc_vref(mode)), 64); if (ret) { dev_err(ipu_crtc->dev, "allocating dmfc bandwidth failed with %d\n", ret); return ret; } ipu_drm_set_base(crtc, x, y); return 0; } static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) { unsigned long flags; struct drm_device *drm = ipu_crtc->base.dev; spin_lock_irqsave(&drm->event_lock, flags); if (ipu_crtc->page_flip_event) drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); ipu_crtc->page_flip_event = NULL; imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); spin_unlock_irqrestore(&drm->event_lock, flags); } static irqreturn_t ipu_irq_handler(int irq, void *dev_id) { struct ipu_crtc *ipu_crtc = dev_id; imx_drm_handle_vblank(ipu_crtc->imx_crtc); if (ipu_crtc->newfb) { ipu_crtc->base.fb = ipu_crtc->newfb; ipu_crtc->newfb = NULL; ipu_drm_set_base(&ipu_crtc->base, 0, 0); ipu_crtc_handle_pageflip(ipu_crtc); } return IRQ_HANDLED; } static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static void ipu_crtc_prepare(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_fb_disable(ipu_crtc); } static void ipu_crtc_commit(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_fb_enable(ipu_crtc); } static void ipu_crtc_load_lut(struct drm_crtc *crtc) { } static struct drm_crtc_helper_funcs ipu_helper_funcs = { .dpms = ipu_crtc_dpms, .mode_fixup = ipu_crtc_mode_fixup, .mode_set = ipu_crtc_mode_set, .prepare = ipu_crtc_prepare, .commit = ipu_crtc_commit, .load_lut = ipu_crtc_load_lut, }; static int ipu_enable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); enable_irq(ipu_crtc->irq); return 0; } static void ipu_disable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); disable_irq(ipu_crtc->irq); } static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type, u32 pixfmt, int hsync_pin, int vsync_pin) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_crtc->interface_pix_fmt = pixfmt; ipu_crtc->di_hsync_pin = hsync_pin; ipu_crtc->di_vsync_pin = vsync_pin; switch (encoder_type) { case DRM_MODE_ENCODER_DAC: case DRM_MODE_ENCODER_TVDAC: case DRM_MODE_ENCODER_LVDS: ipu_crtc->di_clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT; break; case DRM_MODE_ENCODER_NONE: ipu_crtc->di_clkflags = 0; break; } return 0; } static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = { .enable_vblank = ipu_enable_vblank, .disable_vblank = ipu_disable_vblank, .set_interface_pix_fmt = ipu_set_interface_pix_fmt, .crtc_funcs = &ipu_crtc_funcs, .crtc_helper_funcs = &ipu_helper_funcs, }; static void ipu_put_resources(struct ipu_crtc *ipu_crtc) { if (!IS_ERR_OR_NULL(ipu_crtc->ipu_ch)) ipu_idmac_put(ipu_crtc->ipu_ch); if (!IS_ERR_OR_NULL(ipu_crtc->dmfc)) ipu_dmfc_put(ipu_crtc->dmfc); if (!IS_ERR_OR_NULL(ipu_crtc->dp)) ipu_dp_put(ipu_crtc->dp); if (!IS_ERR_OR_NULL(ipu_crtc->di)) ipu_di_put(ipu_crtc->di); } static int ipu_get_resources(struct ipu_crtc *ipu_crtc, struct ipu_client_platformdata *pdata) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]); if (IS_ERR(ipu_crtc->ipu_ch)) { ret = PTR_ERR(ipu_crtc->ipu_ch); goto err_out; } ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc); if (IS_ERR(ipu_crtc->dc)) { ret = PTR_ERR(ipu_crtc->dc); goto err_out; } ipu_crtc->dmfc = ipu_dmfc_get(ipu, pdata->dma[0]); if (IS_ERR(ipu_crtc->dmfc)) { ret = PTR_ERR(ipu_crtc->dmfc); goto err_out; } if (pdata->dp >= 0) { ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp); if (IS_ERR(ipu_crtc->dp)) { ret = PTR_ERR(ipu_crtc->dp); goto err_out; } } ipu_crtc->di = ipu_di_get(ipu, pdata->di); if (IS_ERR(ipu_crtc->di)) { ret = PTR_ERR(ipu_crtc->di); goto err_out; } return 0; err_out: ipu_put_resources(ipu_crtc); return ret; } static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, struct ipu_client_platformdata *pdata) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ret = ipu_get_resources(ipu_crtc, pdata); if (ret) { dev_err(ipu_crtc->dev, "getting resources failed with %d.\n", ret); return ret; } ret = imx_drm_add_crtc(&ipu_crtc->base, &ipu_crtc->imx_crtc, &ipu_crtc_helper_funcs, THIS_MODULE, ipu_crtc->dev->parent->of_node, pdata->di); if (ret) { dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); goto err_put_resources; } ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch, IPU_IRQ_EOF); ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); goto err_put_resources; } disable_irq(ipu_crtc->irq); return 0; err_put_resources: ipu_put_resources(ipu_crtc); return ret; } static int ipu_drm_probe(struct platform_device *pdev) { struct ipu_client_platformdata *pdata = pdev->dev.platform_data; struct ipu_crtc *ipu_crtc; int ret; if (!pdata) return -EINVAL; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL); if (!ipu_crtc) return -ENOMEM; ipu_crtc->dev = &pdev->dev; ret = ipu_crtc_init(ipu_crtc, pdata); if (ret) return ret; platform_set_drvdata(pdev, ipu_crtc); return 0; } static int ipu_drm_remove(struct platform_device *pdev) { struct ipu_crtc *ipu_crtc = platform_get_drvdata(pdev); imx_drm_remove_crtc(ipu_crtc->imx_crtc); ipu_put_resources(ipu_crtc); return 0; } static struct platform_driver ipu_drm_driver = { .driver = { .name = "imx-ipuv3-crtc", }, .probe = ipu_drm_probe, .remove = ipu_drm_remove, }; module_platform_driver(ipu_drm_driver); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
temasek/Kernel-Nexus7
arch/powerpc/kernel/dma-iommu.c
2833
3233
/* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * * Provide default implementations of the DMA mapping callbacks for * busses using the iommu infrastructure */ #include <asm/iommu.h> /* * Generic iommu implementation */ /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. */ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, dev_to_node(dev)); } static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be * contiguous real kernel storage (not vmalloc). The address passed here * comprises a page address and offset into that page. The dma_addr_t * returned will point to the same byte within the page as was passed in. */ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, size, device_to_mask(dev), direction, attrs); } static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, attrs); } static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, device_to_mask(dev), direction, attrs); } static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); } /* We support DMA to/from any memory page via the iommu */ static int dma_iommu_dma_supported(struct device *dev, u64 mask) { struct iommu_table *tbl = get_iommu_table_base(dev); if (!tbl) { dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" ", table unavailable\n", mask); return 0; } if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { dev_info(dev, "Warning: IOMMU window too big for device mask\n"); dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", mask, (tbl->it_offset + tbl->it_size) << IOMMU_PAGE_SHIFT); return 0; } else return 1; } struct dma_map_ops dma_iommu_ops = { .alloc_coherent = dma_iommu_alloc_coherent, .free_coherent = dma_iommu_free_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, }; EXPORT_SYMBOL(dma_iommu_ops);
gpl-2.0
TheBootloader/android_kernel_shooter
arch/powerpc/kernel/dma-iommu.c
2833
3233
/* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * * Provide default implementations of the DMA mapping callbacks for * busses using the iommu infrastructure */ #include <asm/iommu.h> /* * Generic iommu implementation */ /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. */ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, dev_to_node(dev)); } static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be * contiguous real kernel storage (not vmalloc). The address passed here * comprises a page address and offset into that page. The dma_addr_t * returned will point to the same byte within the page as was passed in. */ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, size, device_to_mask(dev), direction, attrs); } static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, attrs); } static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, device_to_mask(dev), direction, attrs); } static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, struct dma_attrs *attrs) { iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); } /* We support DMA to/from any memory page via the iommu */ static int dma_iommu_dma_supported(struct device *dev, u64 mask) { struct iommu_table *tbl = get_iommu_table_base(dev); if (!tbl) { dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" ", table unavailable\n", mask); return 0; } if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { dev_info(dev, "Warning: IOMMU window too big for device mask\n"); dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", mask, (tbl->it_offset + tbl->it_size) << IOMMU_PAGE_SHIFT); return 0; } else return 1; } struct dma_map_ops dma_iommu_ops = { .alloc_coherent = dma_iommu_alloc_coherent, .free_coherent = dma_iommu_free_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, }; EXPORT_SYMBOL(dma_iommu_ops);
gpl-2.0
Hundsbuah/tf700t_10_6_1_14_4
sound/soc/jz4740/jz4740-i2s.c
3089
13138
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "jz4740-i2s.h" #include "jz4740-pcm.h" #define JZ_REG_AIC_CONF 0x00 #define JZ_REG_AIC_CTRL 0x04 #define JZ_REG_AIC_I2S_FMT 0x10 #define JZ_REG_AIC_FIFO_STATUS 0x14 #define JZ_REG_AIC_I2S_STATUS 0x1c #define JZ_REG_AIC_CLK_DIV 0x30 #define JZ_REG_AIC_FIFO 0x34 #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_MASK (0xf << 12) #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_MASK (0xf << 8) #define JZ_AIC_CONF_OVERFLOW_PLAY_LAST BIT(6) #define JZ_AIC_CONF_INTERNAL_CODEC BIT(5) #define JZ_AIC_CONF_I2S BIT(4) #define JZ_AIC_CONF_RESET BIT(3) #define JZ_AIC_CONF_BIT_CLK_MASTER BIT(2) #define JZ_AIC_CONF_SYNC_CLK_MASTER BIT(1) #define JZ_AIC_CONF_ENABLE BIT(0) #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12 #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8 #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19) #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16) #define JZ_AIC_CTRL_ENABLE_RX_DMA BIT(15) #define JZ_AIC_CTRL_ENABLE_TX_DMA BIT(14) #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) #define JZ_AIC_CTRL_FLUSH BIT(8) #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) #define JZ_AIC_CTRL_ENABLE_TFS_INT BIT(3) #define JZ_AIC_CTRL_ENABLE_LOOPBACK BIT(2) #define JZ_AIC_CTRL_ENABLE_PLAYBACK BIT(1) #define JZ_AIC_CTRL_ENABLE_CAPTURE BIT(0) #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET 19 #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16 #define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12) #define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4) #define JZ_AIC_I2S_FMT_MSB BIT(0) #define JZ_AIC_I2S_STATUS_BUSY BIT(2) #define JZ_AIC_CLK_DIV_MASK 0xf struct jz4740_i2s { struct resource *mem; void __iomem *base; dma_addr_t phys_base; struct clk *clk_aic; struct clk *clk_i2s; struct jz4740_pcm_config pcm_config_playback; struct jz4740_pcm_config pcm_config_capture; }; static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s, unsigned int reg) { return readl(i2s->base + reg); } static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, unsigned int reg, uint32_t value) { writel(value, i2s->base + reg); } static int jz4740_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf, ctrl; if (dai->active) return 0; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); ctrl |= JZ_AIC_CTRL_FLUSH; jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); clk_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) return; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable(i2s->clk_i2s); } static int jz4740_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t ctrl; uint32_t mask; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) mask = JZ_AIC_CTRL_ENABLE_PLAYBACK | JZ_AIC_CTRL_ENABLE_TX_DMA; else mask = JZ_AIC_CTRL_ENABLE_CAPTURE | JZ_AIC_CTRL_ENABLE_RX_DMA; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ctrl |= mask; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ctrl &= ~mask; break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); return 0; } static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t format = 0; uint32_t conf; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER; format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK; break; case SND_SOC_DAIFMT_CBM_CFS: conf |= JZ_AIC_CONF_SYNC_CLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFM: conf |= JZ_AIC_CONF_BIT_CLK_MASTER; break; case SND_SOC_DAIFMT_CBM_CFM: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_MSB: format |= JZ_AIC_I2S_FMT_MSB; break; case SND_SOC_DAIFMT_I2S: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); jz4740_i2s_write(i2s, JZ_REG_AIC_I2S_FMT, format); return 0; } static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); enum jz4740_dma_width dma_width; struct jz4740_pcm_config *pcm_config; unsigned int sample_size; uint32_t ctrl; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: sample_size = 0; dma_width = JZ4740_DMA_WIDTH_8BIT; break; case SNDRV_PCM_FORMAT_S16: sample_size = 1; dma_width = JZ4740_DMA_WIDTH_16BIT; break; default: return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ctrl &= ~JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET; if (params_channels(params) == 1) ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO; else ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO; pcm_config = &i2s->pcm_config_playback; pcm_config->dma_config.dst_width = dma_width; } else { ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET; pcm_config = &i2s->pcm_config_capture; pcm_config->dma_config.src_width = dma_width; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); snd_soc_dai_set_dma_data(dai, substream, pcm_config); return 0; } static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); struct clk *parent; int ret = 0; switch (clk_id) { case JZ4740_I2S_CLKSRC_EXT: parent = clk_get(NULL, "ext"); clk_set_parent(i2s->clk_i2s, parent); break; case JZ4740_I2S_CLKSRC_PLL: parent = clk_get(NULL, "pll half"); clk_set_parent(i2s->clk_i2s, parent); ret = clk_set_rate(i2s->clk_i2s, freq); break; default: return -EINVAL; } clk_put(parent); return ret; } static int jz4740_i2s_suspend(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) { conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable(i2s->clk_i2s); } clk_disable(i2s->clk_aic); return 0; } static int jz4740_i2s_resume(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_enable(i2s->clk_aic); if (dai->active) { clk_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); } return 0; } static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s) { struct jz4740_dma_config *dma_config; /* Playback */ dma_config = &i2s->pcm_config_playback.dma_config; dma_config->src_width = JZ4740_DMA_WIDTH_32BIT, dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE; dma_config->request_type = JZ4740_DMA_TYPE_AIC_TRANSMIT; dma_config->flags = JZ4740_DMA_SRC_AUTOINC; dma_config->mode = JZ4740_DMA_MODE_SINGLE; i2s->pcm_config_playback.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO; /* Capture */ dma_config = &i2s->pcm_config_capture.dma_config; dma_config->dst_width = JZ4740_DMA_WIDTH_32BIT, dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE; dma_config->request_type = JZ4740_DMA_TYPE_AIC_RECEIVE; dma_config->flags = JZ4740_DMA_DST_AUTOINC; dma_config->mode = JZ4740_DMA_MODE_SINGLE; i2s->pcm_config_capture.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO; } static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_enable(i2s->clk_aic); jz4740_i2c_init_pcm_config(i2s); conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) | (8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) | JZ_AIC_CONF_OVERFLOW_PLAY_LAST | JZ_AIC_CONF_I2S | JZ_AIC_CONF_INTERNAL_CODEC; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET); jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static int jz4740_i2s_dai_remove(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); clk_disable(i2s->clk_aic); return 0; } static struct snd_soc_dai_ops jz4740_i2s_dai_ops = { .startup = jz4740_i2s_startup, .shutdown = jz4740_i2s_shutdown, .trigger = jz4740_i2s_trigger, .hw_params = jz4740_i2s_hw_params, .set_fmt = jz4740_i2s_set_fmt, .set_sysclk = jz4740_i2s_set_sysclk, }; #define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE) static struct snd_soc_dai_driver jz4740_i2s_dai = { .probe = jz4740_i2s_dai_probe, .remove = jz4740_i2s_dai_remove, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .symmetric_rates = 1, .ops = &jz4740_i2s_dai_ops, .suspend = jz4740_i2s_suspend, .resume = jz4740_i2s_resume, }; static int __devinit jz4740_i2s_dev_probe(struct platform_device *pdev) { struct jz4740_i2s *i2s; int ret; i2s = kzalloc(sizeof(*i2s), GFP_KERNEL); if (!i2s) return -ENOMEM; i2s->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!i2s->mem) { ret = -ENOENT; goto err_free; } i2s->mem = request_mem_region(i2s->mem->start, resource_size(i2s->mem), pdev->name); if (!i2s->mem) { ret = -EBUSY; goto err_free; } i2s->base = ioremap_nocache(i2s->mem->start, resource_size(i2s->mem)); if (!i2s->base) { ret = -EBUSY; goto err_release_mem_region; } i2s->phys_base = i2s->mem->start; i2s->clk_aic = clk_get(&pdev->dev, "aic"); if (IS_ERR(i2s->clk_aic)) { ret = PTR_ERR(i2s->clk_aic); goto err_iounmap; } i2s->clk_i2s = clk_get(&pdev->dev, "i2s"); if (IS_ERR(i2s->clk_i2s)) { ret = PTR_ERR(i2s->clk_i2s); goto err_clk_put_aic; } platform_set_drvdata(pdev, i2s); ret = snd_soc_register_dai(&pdev->dev, &jz4740_i2s_dai); if (ret) { dev_err(&pdev->dev, "Failed to register DAI\n"); goto err_clk_put_i2s; } return 0; err_clk_put_i2s: clk_put(i2s->clk_i2s); err_clk_put_aic: clk_put(i2s->clk_aic); err_iounmap: iounmap(i2s->base); err_release_mem_region: release_mem_region(i2s->mem->start, resource_size(i2s->mem)); err_free: kfree(i2s); return ret; } static int __devexit jz4740_i2s_dev_remove(struct platform_device *pdev) { struct jz4740_i2s *i2s = platform_get_drvdata(pdev); snd_soc_unregister_dai(&pdev->dev); clk_put(i2s->clk_i2s); clk_put(i2s->clk_aic); iounmap(i2s->base); release_mem_region(i2s->mem->start, resource_size(i2s->mem)); platform_set_drvdata(pdev, NULL); kfree(i2s); return 0; } static struct platform_driver jz4740_i2s_driver = { .probe = jz4740_i2s_dev_probe, .remove = __devexit_p(jz4740_i2s_dev_remove), .driver = { .name = "jz4740-i2s", .owner = THIS_MODULE, }, }; static int __init jz4740_i2s_init(void) { return platform_driver_register(&jz4740_i2s_driver); } module_init(jz4740_i2s_init); static void __exit jz4740_i2s_exit(void) { platform_driver_unregister(&jz4740_i2s_driver); } module_exit(jz4740_i2s_exit); MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>"); MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-i2s");
gpl-2.0
jakew02/android_kernel_lge_bullhead
drivers/isdn/gigaset/usb-gigaset.c
3089
25328
/* * USB driver for Gigaset 307x directly or using M105 Data. * * Copyright (c) 2001 by Stefan Eilers * and Hansjoerg Lipp <hjlipp@web.de>. * * This driver was derived from the USB skeleton driver by * Greg Kroah-Hartman <greg@kroah.com> * * ===================================================================== * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ===================================================================== */ #include "gigaset.h" #include <linux/usb.h> #include <linux/module.h> #include <linux/moduleparam.h> /* Version Information */ #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers" #define DRIVER_DESC "USB Driver for Gigaset 307x using M105" /* Module parameters */ static int startmode = SM_ISDN; static int cidmode = 1; module_param(startmode, int, S_IRUGO); module_param(cidmode, int, S_IRUGO); MODULE_PARM_DESC(startmode, "start in isdn4linux mode"); MODULE_PARM_DESC(cidmode, "Call-ID mode"); #define GIGASET_MINORS 1 #define GIGASET_MINOR 8 #define GIGASET_MODULENAME "usb_gigaset" #define GIGASET_DEVNAME "ttyGU" /* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */ #define IF_WRITEBUF 264 /* Values for the Gigaset M105 Data */ #define USB_M105_VENDOR_ID 0x0681 #define USB_M105_PRODUCT_ID 0x0009 /* table of devices that work with this driver */ static const struct usb_device_id gigaset_table[] = { { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gigaset_table); /* * Control requests (empty fields: 00) * * RT|RQ|VALUE|INDEX|LEN |DATA * In: * C1 08 01 * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:? * C1 0F ll ll * Get device information/status (llll: 0x200 and 0x40 seen). * Real size: I only saw MIN(llll,0x64). * Contents: seems to be always the same... * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes) * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0" * rest: ? * Out: * 41 11 * Initialize/reset device ? * 41 00 xx 00 * ? (xx=00 or 01; 01 on start, 00 on close) * 41 07 vv mm * Set/clear flags vv=value, mm=mask (see RQ 08) * 41 12 xx * Used before the following configuration requests are issued * (with xx=0x0f). I've seen other values<0xf, though. * 41 01 xx xx * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1. * 41 03 ps bb * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity * [ 0x30: m, 0x40: s ] * [s: 0: 1 stop bit; 1: 1.5; 2: 2] * bb: bits/byte (seen 7 and 8) * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00 * ?? * Initialization: 01, 40, 00, 00 * Open device: 00 40, 00, 00 * yy and zz seem to be equal, either 0x00 or 0x0a * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80) * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). * xx is usually 0x00 but was 0x7e before starting data transfer * in unimodem mode. So, this might be an array of characters that * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. * * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two * flags per packet. */ /* functions called if a device of this driver is connected/disconnected */ static int gigaset_probe(struct usb_interface *interface, const struct usb_device_id *id); static void gigaset_disconnect(struct usb_interface *interface); /* functions called before/after suspend */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message); static int gigaset_resume(struct usb_interface *intf); static int gigaset_pre_reset(struct usb_interface *intf); static struct gigaset_driver *driver; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver gigaset_usb_driver = { .name = GIGASET_MODULENAME, .probe = gigaset_probe, .disconnect = gigaset_disconnect, .id_table = gigaset_table, .suspend = gigaset_suspend, .resume = gigaset_resume, .reset_resume = gigaset_resume, .pre_reset = gigaset_pre_reset, .post_reset = gigaset_resume, .disable_hub_initiated_lpm = 1, }; struct usb_cardstate { struct usb_device *udev; /* usb device pointer */ struct usb_interface *interface; /* interface for this device */ int busy; /* bulk output in progress */ /* Output buffer */ unsigned char *bulk_out_buffer; int bulk_out_size; __u8 bulk_out_endpointAddr; struct urb *bulk_out_urb; /* Input buffer */ unsigned char *rcvbuf; int rcvbuf_size; struct urb *read_urb; __u8 int_in_endpointAddr; char bchars[6]; /* for request 0x19 */ }; static inline unsigned tiocm_to_gigaset(unsigned state) { return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0); } static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state) { struct usb_device *udev = cs->hw.usb->udev; unsigned mask, val; int r; mask = tiocm_to_gigaset(old_state ^ new_state); val = tiocm_to_gigaset(new_state); gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41, (val & 0xff) | ((mask & 0xff) << 8), 0, NULL, 0, 2000 /* timeout? */); if (r < 0) return r; return 0; } /* * Set M105 configuration value * using undocumented device commands reverse engineered from USB traces * of the Siemens Windows driver */ static int set_value(struct cardstate *cs, u8 req, u16 val) { struct usb_device *udev = cs->hw.usb->udev; int r, r2; gig_dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41, 0xf /*?*/, 0, NULL, 0, 2000 /*?*/); /* no idea what this does */ if (r < 0) { dev_err(&udev->dev, "error %d on request 0x12\n", -r); return r; } r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41, val, 0, NULL, 0, 2000 /*?*/); if (r < 0) dev_err(&udev->dev, "error %d on request 0x%02x\n", -r, (unsigned)req); r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/); if (r2 < 0) dev_err(&udev->dev, "error %d on request 0x19\n", -r2); return r < 0 ? r : (r2 < 0 ? r2 : 0); } /* * set the baud rate on the internal serial adapter * using the undocumented parameter setting command */ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) { u16 val; u32 rate; cflag &= CBAUD; switch (cflag) { case B300: rate = 300; break; case B600: rate = 600; break; case B1200: rate = 1200; break; case B2400: rate = 2400; break; case B4800: rate = 4800; break; case B9600: rate = 9600; break; case B19200: rate = 19200; break; case B38400: rate = 38400; break; case B57600: rate = 57600; break; case B115200: rate = 115200; break; default: rate = 9600; dev_err(cs->dev, "unsupported baudrate request 0x%x," " using default of B9600\n", cflag); } val = 0x383fff / rate + 1; return set_value(cs, 1, val); } /* * set the line format on the internal serial adapter * using the undocumented parameter setting command */ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) { u16 val = 0; /* set the parity */ if (cflag & PARENB) val |= (cflag & PARODD) ? 0x10 : 0x20; /* set the number of data bits */ switch (cflag & CSIZE) { case CS5: val |= 5 << 8; break; case CS6: val |= 6 << 8; break; case CS7: val |= 7 << 8; break; case CS8: val |= 8 << 8; break; default: dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n"); val |= 8 << 8; break; } /* set the number of stop bits */ if (cflag & CSTOPB) { if ((cflag & CSIZE) == CS5) val |= 1; /* 1.5 stop bits */ else val |= 2; /* 2 stop bits */ } return set_value(cs, 3, val); } /*============================================================================*/ static int gigaset_init_bchannel(struct bc_state *bcs) { /* nothing to do for M10x */ gigaset_bchannel_up(bcs); return 0; } static int gigaset_close_bchannel(struct bc_state *bcs) { /* nothing to do for M10x */ gigaset_bchannel_down(bcs); return 0; } static int write_modem(struct cardstate *cs); static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb); /* Write tasklet handler: Continue sending current skb, or send command, or * start sending an skb from the send queue. */ static void gigaset_modem_fill(unsigned long data) { struct cardstate *cs = (struct cardstate *) data; struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ struct cmdbuf_t *cb; int again; gig_dbg(DEBUG_OUTPUT, "modem_fill"); if (cs->hw.usb->busy) { gig_dbg(DEBUG_OUTPUT, "modem_fill: busy"); return; } do { again = 0; if (!bcs->tx_skb) { /* no skb is being sent */ cb = cs->cmdbuf; if (cb) { /* commands to send? */ gig_dbg(DEBUG_OUTPUT, "modem_fill: cb"); if (send_cb(cs, cb) < 0) { gig_dbg(DEBUG_OUTPUT, "modem_fill: send_cb failed"); again = 1; /* no callback will be called! */ } } else { /* skbs to send? */ bcs->tx_skb = skb_dequeue(&bcs->squeue); if (bcs->tx_skb) gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!", (unsigned long) bcs->tx_skb); } } if (bcs->tx_skb) { gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb"); if (write_modem(cs) < 0) { gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed"); again = 1; /* no callback will be called! */ } } } while (again); } /* * Interrupt Input URB completion routine */ static void gigaset_read_int_callback(struct urb *urb) { struct cardstate *cs = urb->context; struct inbuf_t *inbuf = cs->inbuf; int status = urb->status; int r; unsigned numbytes; unsigned char *src; unsigned long flags; if (!status) { numbytes = urb->actual_length; if (numbytes) { src = cs->hw.usb->rcvbuf; if (unlikely(*src)) dev_warn(cs->dev, "%s: There was no leading 0, but 0x%02x!\n", __func__, (unsigned) *src); ++src; /* skip leading 0x00 */ --numbytes; if (gigaset_fill_inbuf(inbuf, src, numbytes)) { gig_dbg(DEBUG_INTR, "%s-->BH", __func__); gigaset_schedule_event(inbuf->cs); } } else gig_dbg(DEBUG_INTR, "Received zero block length"); } else { /* The urb might have been killed. */ gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d", __func__, status); if (status == -ENOENT || status == -ESHUTDOWN) /* killed or endpoint shutdown: don't resubmit */ return; } /* resubmit URB */ spin_lock_irqsave(&cs->lock, flags); if (!cs->connected) { spin_unlock_irqrestore(&cs->lock, flags); pr_err("%s: disconnected\n", __func__); return; } r = usb_submit_urb(urb, GFP_ATOMIC); spin_unlock_irqrestore(&cs->lock, flags); if (r) dev_err(cs->dev, "error %d resubmitting URB\n", -r); } /* This callback routine is called when data was transmitted to the device. */ static void gigaset_write_bulk_callback(struct urb *urb) { struct cardstate *cs = urb->context; int status = urb->status; unsigned long flags; switch (status) { case 0: /* normal completion */ break; case -ENOENT: /* killed */ gig_dbg(DEBUG_ANY, "%s: killed", __func__); cs->hw.usb->busy = 0; return; default: dev_err(cs->dev, "bulk transfer failed (status %d)\n", -status); /* That's all we can do. Communication problems are handled by timeouts or network protocols. */ } spin_lock_irqsave(&cs->lock, flags); if (!cs->connected) { pr_err("%s: disconnected\n", __func__); } else { cs->hw.usb->busy = 0; tasklet_schedule(&cs->write_tasklet); } spin_unlock_irqrestore(&cs->lock, flags); } static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) { struct cmdbuf_t *tcb; unsigned long flags; int count; int status = -ENOENT; struct usb_cardstate *ucs = cs->hw.usb; do { if (!cb->len) { tcb = cb; spin_lock_irqsave(&cs->cmdlock, flags); cs->cmdbytes -= cs->curlen; gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left", cs->curlen, cs->cmdbytes); cs->cmdbuf = cb = cb->next; if (cb) { cb->prev = NULL; cs->curlen = cb->len; } else { cs->lastcmdbuf = NULL; cs->curlen = 0; } spin_unlock_irqrestore(&cs->cmdlock, flags); if (tcb->wake_tasklet) tasklet_schedule(tcb->wake_tasklet); kfree(tcb); } if (cb) { count = min(cb->len, ucs->bulk_out_size); gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count); usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, usb_sndbulkpipe(ucs->udev, ucs->bulk_out_endpointAddr & 0x0f), cb->buf + cb->offset, count, gigaset_write_bulk_callback, cs); cb->offset += count; cb->len -= count; ucs->busy = 1; spin_lock_irqsave(&cs->lock, flags); status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV; spin_unlock_irqrestore(&cs->lock, flags); if (status) { ucs->busy = 0; dev_err(cs->dev, "could not submit urb (error %d)\n", -status); cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */ } } } while (cb && status); /* next command on error */ return status; } /* Send command to device. */ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) { unsigned long flags; gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? DEBUG_TRANSCMD : DEBUG_LOCKCMD, "CMD Transmit", cb->len, cb->buf); spin_lock_irqsave(&cs->cmdlock, flags); cb->prev = cs->lastcmdbuf; if (cs->lastcmdbuf) cs->lastcmdbuf->next = cb; else { cs->cmdbuf = cb; cs->curlen = cb->len; } cs->cmdbytes += cb->len; cs->lastcmdbuf = cb; spin_unlock_irqrestore(&cs->cmdlock, flags); spin_lock_irqsave(&cs->lock, flags); if (cs->connected) tasklet_schedule(&cs->write_tasklet); spin_unlock_irqrestore(&cs->lock, flags); return cb->len; } static int gigaset_write_room(struct cardstate *cs) { unsigned bytes; bytes = cs->cmdbytes; return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0; } static int gigaset_chars_in_buffer(struct cardstate *cs) { return cs->cmdbytes; } /* * set the break characters on the internal serial adapter * using undocumented device commands reverse engineered from USB traces * of the Siemens Windows driver */ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) { struct usb_device *udev = cs->hw.usb->udev; gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf); memcpy(cs->hw.usb->bchars, buf, 6); return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, 0, 0, &buf, 6, 2000); } static void gigaset_freebcshw(struct bc_state *bcs) { /* unused */ } /* Initialize the b-channel structure */ static int gigaset_initbcshw(struct bc_state *bcs) { /* unused */ bcs->hw.usb = NULL; return 0; } static void gigaset_reinitbcshw(struct bc_state *bcs) { /* nothing to do for M10x */ } static void gigaset_freecshw(struct cardstate *cs) { tasklet_kill(&cs->write_tasklet); kfree(cs->hw.usb); } static int gigaset_initcshw(struct cardstate *cs) { struct usb_cardstate *ucs; cs->hw.usb = ucs = kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); return -ENOMEM; } ucs->bchars[0] = 0; ucs->bchars[1] = 0; ucs->bchars[2] = 0; ucs->bchars[3] = 0; ucs->bchars[4] = 0x11; ucs->bchars[5] = 0x13; ucs->bulk_out_buffer = NULL; ucs->bulk_out_urb = NULL; ucs->read_urb = NULL; tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); return 0; } /* Send data from current skb to the device. */ static int write_modem(struct cardstate *cs) { int ret = 0; int count; struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ struct usb_cardstate *ucs = cs->hw.usb; unsigned long flags; gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len); if (!bcs->tx_skb->len) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; return -EINVAL; } /* Copy data to bulk out buffer and transmit data */ count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); skb_pull(bcs->tx_skb, count); ucs->busy = 1; gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); spin_lock_irqsave(&cs->lock, flags); if (cs->connected) { usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, usb_sndbulkpipe(ucs->udev, ucs->bulk_out_endpointAddr & 0x0f), ucs->bulk_out_buffer, count, gigaset_write_bulk_callback, cs); ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); } else { ret = -ENODEV; } spin_unlock_irqrestore(&cs->lock, flags); if (ret) { dev_err(cs->dev, "could not submit urb (error %d)\n", -ret); ucs->busy = 0; } if (!bcs->tx_skb->len) { /* skb sent completely */ gigaset_skb_sent(bcs, bcs->tx_skb); gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb); dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; } return ret; } static int gigaset_probe(struct usb_interface *interface, const struct usb_device_id *id) { int retval; struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *hostif = interface->cur_altsetting; struct cardstate *cs = NULL; struct usb_cardstate *ucs = NULL; struct usb_endpoint_descriptor *endpoint; int buffer_size; gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__); /* See if the device offered us matches what we can accept */ if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) || (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) { gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); return -ENODEV; } if (hostif->desc.bInterfaceNumber != 0) { gig_dbg(DEBUG_ANY, "interface %d not for me - skip", hostif->desc.bInterfaceNumber); return -ENODEV; } if (hostif->desc.bAlternateSetting != 0) { dev_notice(&udev->dev, "unsupported altsetting %d - skip", hostif->desc.bAlternateSetting); return -ENODEV; } if (hostif->desc.bInterfaceClass != 255) { dev_notice(&udev->dev, "unsupported interface class %d - skip", hostif->desc.bInterfaceClass); return -ENODEV; } dev_info(&udev->dev, "%s: Device matched ... !\n", __func__); /* allocate memory for our device state and initialize it */ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); if (!cs) return -ENODEV; ucs = cs->hw.usb; /* save off device structure ptrs for later use */ usb_get_dev(udev); ucs->udev = udev; ucs->interface = interface; cs->dev = &interface->dev; /* save address of controller structure */ usb_set_intfdata(interface, cs); endpoint = &hostif->endpoint[0].desc; buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->bulk_out_size = buffer_size; ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress; ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!ucs->bulk_out_buffer) { dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n"); retval = -ENOMEM; goto error; } ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!ucs->bulk_out_urb) { dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n"); retval = -ENOMEM; goto error; } endpoint = &hostif->endpoint[1].desc; ucs->busy = 0; ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!ucs->read_urb) { dev_err(cs->dev, "No free urbs available\n"); retval = -ENOMEM; goto error; } buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->rcvbuf_size = buffer_size; ucs->int_in_endpointAddr = endpoint->bEndpointAddress; ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL); if (!ucs->rcvbuf) { dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); retval = -ENOMEM; goto error; } /* Fill the interrupt urb and send it to the core */ usb_fill_int_urb(ucs->read_urb, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress & 0x0f), ucs->rcvbuf, buffer_size, gigaset_read_int_callback, cs, endpoint->bInterval); retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); if (retval) { dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval); goto error; } /* tell common part that the device is ready */ if (startmode == SM_LOCKED) cs->mstate = MS_LOCKED; retval = gigaset_start(cs); if (retval < 0) { tasklet_kill(&cs->write_tasklet); goto error; } return 0; error: usb_kill_urb(ucs->read_urb); kfree(ucs->bulk_out_buffer); usb_free_urb(ucs->bulk_out_urb); kfree(ucs->rcvbuf); usb_free_urb(ucs->read_urb); usb_set_intfdata(interface, NULL); ucs->read_urb = ucs->bulk_out_urb = NULL; ucs->rcvbuf = ucs->bulk_out_buffer = NULL; usb_put_dev(ucs->udev); ucs->udev = NULL; ucs->interface = NULL; gigaset_freecs(cs); return retval; } static void gigaset_disconnect(struct usb_interface *interface) { struct cardstate *cs; struct usb_cardstate *ucs; cs = usb_get_intfdata(interface); ucs = cs->hw.usb; dev_info(cs->dev, "disconnecting Gigaset USB adapter\n"); usb_kill_urb(ucs->read_urb); gigaset_stop(cs); usb_set_intfdata(interface, NULL); tasklet_kill(&cs->write_tasklet); usb_kill_urb(ucs->bulk_out_urb); kfree(ucs->bulk_out_buffer); usb_free_urb(ucs->bulk_out_urb); kfree(ucs->rcvbuf); usb_free_urb(ucs->read_urb); ucs->read_urb = ucs->bulk_out_urb = NULL; ucs->rcvbuf = ucs->bulk_out_buffer = NULL; usb_put_dev(ucs->udev); ucs->interface = NULL; ucs->udev = NULL; cs->dev = NULL; gigaset_freecs(cs); } /* gigaset_suspend * This function is called before the USB connection is suspended or reset. */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) { struct cardstate *cs = usb_get_intfdata(intf); /* stop activity */ cs->connected = 0; /* prevent rescheduling */ usb_kill_urb(cs->hw.usb->read_urb); tasklet_kill(&cs->write_tasklet); usb_kill_urb(cs->hw.usb->bulk_out_urb); gig_dbg(DEBUG_SUSPEND, "suspend complete"); return 0; } /* gigaset_resume * This function is called after the USB connection has been resumed or reset. */ static int gigaset_resume(struct usb_interface *intf) { struct cardstate *cs = usb_get_intfdata(intf); int rc; /* resubmit interrupt URB */ cs->connected = 1; rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL); if (rc) { dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc); return rc; } gig_dbg(DEBUG_SUSPEND, "resume complete"); return 0; } /* gigaset_pre_reset * This function is called before the USB connection is reset. */ static int gigaset_pre_reset(struct usb_interface *intf) { /* same as suspend */ return gigaset_suspend(intf, PMSG_ON); } static const struct gigaset_ops ops = { gigaset_write_cmd, gigaset_write_room, gigaset_chars_in_buffer, gigaset_brkchars, gigaset_init_bchannel, gigaset_close_bchannel, gigaset_initbcshw, gigaset_freebcshw, gigaset_reinitbcshw, gigaset_initcshw, gigaset_freecshw, gigaset_set_modem_ctrl, gigaset_baud_rate, gigaset_set_line_ctrl, gigaset_m10x_send_skb, gigaset_m10x_input, }; /* * This function is called while kernel-module is loaded */ static int __init usb_gigaset_init(void) { int result; /* allocate memory for our driver state and initialize it */ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &ops, THIS_MODULE); if (driver == NULL) { result = -ENOMEM; goto error; } /* register this driver with the USB subsystem */ result = usb_register(&gigaset_usb_driver); if (result < 0) { pr_err("error %d registering USB driver\n", -result); goto error; } pr_info(DRIVER_DESC "\n"); return 0; error: if (driver) gigaset_freedriver(driver); driver = NULL; return result; } /* * This function is called while unloading the kernel-module */ static void __exit usb_gigaset_exit(void) { int i; gigaset_blockdriver(driver); /* => probe will fail * => no gigaset_start any more */ /* stop all connected devices */ for (i = 0; i < driver->minors; i++) gigaset_shutdown(driver->cs + i); /* from now on, no isdn callback should be possible */ /* deregister this driver with the USB subsystem */ usb_deregister(&gigaset_usb_driver); /* this will call the disconnect-callback */ /* from now on, no disconnect/probe callback should be running */ gigaset_freedriver(driver); driver = NULL; } module_init(usb_gigaset_init); module_exit(usb_gigaset_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
jiangdapeng/btrfs-next
arch/mips/bcm47xx/gpio.c
4881
2516
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> */ #include <linux/export.h> #include <linux/ssb/ssb.h> #include <linux/ssb/ssb_driver_chipcommon.h> #include <linux/ssb/ssb_driver_extif.h> #include <asm/mach-bcm47xx/bcm47xx.h> #include <asm/mach-bcm47xx/gpio.h> #if (BCM47XX_CHIPCO_GPIO_LINES > BCM47XX_EXTIF_GPIO_LINES) static DECLARE_BITMAP(gpio_in_use, BCM47XX_CHIPCO_GPIO_LINES); #else static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES); #endif int gpio_request(unsigned gpio, const char *tag) { switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) && ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) return -EINVAL; if (ssb_extif_available(&bcm47xx_bus.ssb.extif) && ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) return -EINVAL; if (test_and_set_bit(gpio, gpio_in_use)) return -EBUSY; return 0; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: if (gpio >= BCM47XX_CHIPCO_GPIO_LINES) return -EINVAL; if (test_and_set_bit(gpio, gpio_in_use)) return -EBUSY; return 0; #endif } return -EINVAL; } EXPORT_SYMBOL(gpio_request); void gpio_free(unsigned gpio) { switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) && ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) return; if (ssb_extif_available(&bcm47xx_bus.ssb.extif) && ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) return; clear_bit(gpio, gpio_in_use); return; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: if (gpio >= BCM47XX_CHIPCO_GPIO_LINES) return; clear_bit(gpio, gpio_in_use); return; #endif } } EXPORT_SYMBOL(gpio_free); int gpio_to_irq(unsigned gpio) { switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco)) return ssb_mips_irq(bcm47xx_bus.ssb.chipco.dev) + 2; else if (ssb_extif_available(&bcm47xx_bus.ssb.extif)) return ssb_mips_irq(bcm47xx_bus.ssb.extif.dev) + 2; else return -EINVAL; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: return bcma_core_mips_irq(bcm47xx_bus.bcma.bus.drv_cc.core) + 2; #endif } return -EINVAL; } EXPORT_SYMBOL_GPL(gpio_to_irq);
gpl-2.0
TeamEOS/kernel_oppo_msm8960
arch/arm/plat-omap/debug-devices.c
5137
2192
/* * linux/arch/arm/plat-omap/debug-devices.c * * Copyright (C) 2005 Nokia Corporation * Modified from mach-omap2/board-h4.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/smc91x.h> #include <mach/hardware.h> #include <plat/board.h> /* Many OMAP development platforms reuse the same "debug board"; these * platforms include H2, H3, H4, and Perseus2. */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .dev = { .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct resource led_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, }; static struct platform_device led_device = { .name = "omap_dbg_led", .id = -1, .num_resources = ARRAY_SIZE(led_resources), .resource = led_resources, }; static struct platform_device *debug_devices[] __initdata = { &smc91x_device, &led_device, /* ps2 kbd + mouse ports */ /* 4 extra uarts */ /* 6 input dip switches */ /* 8 output pins */ }; int __init debug_card_init(u32 addr, unsigned gpio) { int status; smc91x_resources[0].start = addr + 0x300; smc91x_resources[0].end = addr + 0x30f; smc91x_resources[1].start = gpio_to_irq(gpio); smc91x_resources[1].end = gpio_to_irq(gpio); status = gpio_request(gpio, "SMC91x irq"); if (status < 0) { printk(KERN_ERR "GPIO%d unavailable for smc91x IRQ\n", gpio); return status; } gpio_direction_input(gpio); led_resources[0].start = addr; led_resources[0].end = addr + SZ_4K - 1; return platform_add_devices(debug_devices, ARRAY_SIZE(debug_devices)); }
gpl-2.0
kostoulhs/android_kernel_samsung_expressltexx
arch/x86/pci/mmconfig-shared.c
5393
15939
/* * mmconfig-shared.c - Low-level direct PCI config space access via * MMCONFIG - common code between i386 and x86-64. * * This code does: * - known chipset handling * - ACPI decoding and validation * * Per-architecture code takes care of the mappings and accesses * themselves. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/sfi_acpi.h> #include <linux/bitmap.h> #include <linux/dmi.h> #include <linux/slab.h> #include <asm/e820.h> #include <asm/pci_x86.h> #include <asm/acpi.h> #define PREFIX "PCI: " /* Indicate if the mmcfg resources have been placed into the resource table. */ static int __initdata pci_mmcfg_resources_inserted; LIST_HEAD(pci_mmcfg_list); static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg) { if (cfg->res.parent) release_resource(&cfg->res); list_del(&cfg->list); kfree(cfg); } static __init void free_all_mmcfg(void) { struct pci_mmcfg_region *cfg, *tmp; pci_mmcfg_arch_free(); list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list) pci_mmconfig_remove(cfg); } static __init void list_add_sorted(struct pci_mmcfg_region *new) { struct pci_mmcfg_region *cfg; /* keep list sorted by segment and starting bus number */ list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (cfg->segment > new->segment || (cfg->segment == new->segment && cfg->start_bus >= new->start_bus)) { list_add_tail(&new->list, &cfg->list); return; } } list_add_tail(&new->list, &pci_mmcfg_list); } static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, int end, u64 addr) { struct pci_mmcfg_region *new; struct resource *res; if (addr == 0) return NULL; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; new->address = addr; new->segment = segment; new->start_bus = start; new->end_bus = end; list_add_sorted(new); res = &new->res; res->start = addr + PCI_MMCFG_BUS_OFFSET(start); res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); res->name = new->name; printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at " "%pR (base %#lx)\n", segment, start, end, &new->res, (unsigned long) addr); return new; } struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus) { struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) if (cfg->segment == segment && cfg->start_bus <= bus && bus <= cfg->end_bus) return cfg; return NULL; } static const char __init *pci_mmcfg_e7520(void) { u32 win; raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); win = win & 0xf000; if (win == 0x0000 || win == 0xf000) return NULL; if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL) return NULL; return "Intel Corporation E7520 Memory Controller Hub"; } static const char __init *pci_mmcfg_intel_945(void) { u32 pciexbar, mask = 0, len = 0; raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); /* Enable bit */ if (!(pciexbar & 1)) return NULL; /* Size bits */ switch ((pciexbar >> 1) & 3) { case 0: mask = 0xf0000000U; len = 0x10000000U; break; case 1: mask = 0xf8000000U; len = 0x08000000U; break; case 2: mask = 0xfc000000U; len = 0x04000000U; break; default: return NULL; } /* Errata #2, things break when not aligned on a 256Mb boundary */ /* Can only happen in 64M/128M mode */ if ((pciexbar & mask) & 0x0fffffffU) return NULL; /* Don't hit the APIC registers and their friends */ if ((pciexbar & mask) >= 0xf0000000U) return NULL; if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL) return NULL; return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; } static const char __init *pci_mmcfg_amd_fam10h(void) { u32 low, high, address; u64 base, msr; int i; unsigned segnbits = 0, busnbits, end_bus; if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) return NULL; address = MSR_FAM10H_MMIO_CONF_BASE; if (rdmsr_safe(address, &low, &high)) return NULL; msr = high; msr <<= 32; msr |= low; /* mmconfig is not enable */ if (!(msr & FAM10H_MMIO_CONF_ENABLE)) return NULL; base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & FAM10H_MMIO_CONF_BUSRANGE_MASK; /* * only handle bus 0 ? * need to skip it */ if (!busnbits) return NULL; if (busnbits > 8) { segnbits = busnbits - 8; busnbits = 8; } end_bus = (1 << busnbits) - 1; for (i = 0; i < (1 << segnbits); i++) if (pci_mmconfig_add(i, 0, end_bus, base + (1<<28) * i) == NULL) { free_all_mmcfg(); return NULL; } return "AMD Family 10h NB"; } static bool __initdata mcp55_checked; static const char __init *pci_mmcfg_nvidia_mcp55(void) { int bus; int mcp55_mmconf_found = 0; static const u32 extcfg_regnum = 0x90; static const u32 extcfg_regsize = 4; static const u32 extcfg_enable_mask = 1<<31; static const u32 extcfg_start_mask = 0xff<<16; static const int extcfg_start_shift = 16; static const u32 extcfg_size_mask = 0x3<<28; static const int extcfg_size_shift = 28; static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20}; static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff}; static const int extcfg_base_lshift = 25; /* * do check if amd fam10h already took over */ if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked) return NULL; mcp55_checked = true; for (bus = 0; bus < 256; bus++) { u64 base; u32 l, extcfg; u16 vendor, device; int start, size_index, end; raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l); vendor = l & 0xffff; device = (l >> 16) & 0xffff; if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device) continue; raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum, extcfg_regsize, &extcfg); if (!(extcfg & extcfg_enable_mask)) continue; size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; base = extcfg & extcfg_base_mask[size_index]; /* base could > 4G */ base <<= extcfg_base_lshift; start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; end = start + extcfg_sizebus[size_index] - 1; if (pci_mmconfig_add(0, start, end, base) == NULL) continue; mcp55_mmconf_found++; } if (!mcp55_mmconf_found) return NULL; return "nVidia MCP55"; } struct pci_mmcfg_hostbridge_probe { u32 bus; u32 devfn; u32 vendor; u32 device; const char *(*probe)(void); }; static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 }, { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 }, { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h }, { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h }, { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA, 0x0369, pci_mmcfg_nvidia_mcp55 }, }; static void __init pci_mmcfg_check_end_bus_number(void) { struct pci_mmcfg_region *cfg, *cfgx; /* Fixup overlaps */ list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (cfg->end_bus < cfg->start_bus) cfg->end_bus = 255; /* Don't access the list head ! */ if (cfg->list.next == &pci_mmcfg_list) break; cfgx = list_entry(cfg->list.next, typeof(*cfg), list); if (cfg->end_bus >= cfgx->start_bus) cfg->end_bus = cfgx->start_bus - 1; } } static int __init pci_mmcfg_check_hostbridge(void) { u32 l; u32 bus, devfn; u16 vendor, device; int i; const char *name; if (!raw_pci_ops) return 0; free_all_mmcfg(); for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { bus = pci_mmcfg_probes[i].bus; devfn = pci_mmcfg_probes[i].devfn; raw_pci_ops->read(0, bus, devfn, 0, 4, &l); vendor = l & 0xffff; device = (l >> 16) & 0xffff; name = NULL; if (pci_mmcfg_probes[i].vendor == vendor && pci_mmcfg_probes[i].device == device) name = pci_mmcfg_probes[i].probe(); if (name) printk(KERN_INFO PREFIX "%s with MMCONFIG support\n", name); } /* some end_bus_number is crazy, fix it */ pci_mmcfg_check_end_bus_number(); return !list_empty(&pci_mmcfg_list); } static void __init pci_mmcfg_insert_resources(void) { struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) insert_resource(&iomem_resource, &cfg->res); /* Mark that the resources have been inserted. */ pci_mmcfg_resources_inserted = 1; } static acpi_status __init check_mcfg_resource(struct acpi_resource *res, void *data) { struct resource *mcfg_res = data; struct acpi_resource_address64 address; acpi_status status; if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { struct acpi_resource_fixed_memory32 *fixmem32 = &res->data.fixed_memory32; if (!fixmem32) return AE_OK; if ((mcfg_res->start >= fixmem32->address) && (mcfg_res->end < (fixmem32->address + fixmem32->address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } } if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) && (res->type != ACPI_RESOURCE_TYPE_ADDRESS64)) return AE_OK; status = acpi_resource_to_address64(res, &address); if (ACPI_FAILURE(status) || (address.address_length <= 0) || (address.resource_type != ACPI_MEMORY_RANGE)) return AE_OK; if ((mcfg_res->start >= address.minimum) && (mcfg_res->end < (address.minimum + address.address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } return AE_OK; } static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl, void *context, void **rv) { struct resource *mcfg_res = context; acpi_walk_resources(handle, METHOD_NAME__CRS, check_mcfg_resource, context); if (mcfg_res->flags) return AE_CTRL_TERMINATE; return AE_OK; } static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used) { struct resource mcfg_res; mcfg_res.start = start; mcfg_res.end = end - 1; mcfg_res.flags = 0; acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL); if (!mcfg_res.flags) acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res, NULL); return mcfg_res.flags; } typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); static int __init is_mmconf_reserved(check_reserved_t is_reserved, struct pci_mmcfg_region *cfg, int with_e820) { u64 addr = cfg->res.start; u64 size = resource_size(&cfg->res); u64 old_size = size; int valid = 0, num_buses; while (!is_reserved(addr, addr + size, E820_RESERVED)) { size >>= 1; if (size < (16UL<<20)) break; } if (size >= (16UL<<20) || size == old_size) { printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n", &cfg->res, with_e820 ? "E820" : "ACPI motherboard resources"); valid = 1; if (old_size != size) { /* update end_bus */ cfg->end_bus = cfg->start_bus + ((size>>20) - 1); num_buses = cfg->end_bus - cfg->start_bus + 1; cfg->res.end = cfg->res.start + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %04x [bus %02x-%02x]", cfg->segment, cfg->start_bus, cfg->end_bus); printk(KERN_INFO PREFIX "MMCONFIG for %04x [bus%02x-%02x] " "at %pR (base %#lx) (size reduced!)\n", cfg->segment, cfg->start_bus, cfg->end_bus, &cfg->res, (unsigned long) cfg->address); } } return valid; } static void __init pci_mmcfg_reject_broken(int early) { struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) { int valid = 0; if (!early && !acpi_disabled) { valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0); if (valid) continue; else printk(KERN_ERR FW_BUG PREFIX "MMCONFIG at %pR not reserved in " "ACPI motherboard resources\n", &cfg->res); } /* Don't try to do this check unless configuration type 1 is available. how about type 2 ?*/ if (raw_pci_ops) valid = is_mmconf_reserved(e820_all_mapped, cfg, 1); if (!valid) goto reject; } return; reject: printk(KERN_INFO PREFIX "not using MMCONFIG\n"); free_all_mmcfg(); } static int __initdata known_bridge; static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, struct acpi_mcfg_allocation *cfg) { int year; if (cfg->address < 0xFFFFFFFF) return 0; if (!strcmp(mcfg->header.oem_id, "SGI") || !strcmp(mcfg->header.oem_id, "SGI2")) return 0; if (mcfg->header.revision >= 1) { if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2010) return 0; } printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx " "is above 4GB, ignored\n", cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number, cfg->address); return -EINVAL; } static int __init pci_parse_mcfg(struct acpi_table_header *header) { struct acpi_table_mcfg *mcfg; struct acpi_mcfg_allocation *cfg_table, *cfg; unsigned long i; int entries; if (!header) return -EINVAL; mcfg = (struct acpi_table_mcfg *)header; /* how many config structures do we have */ free_all_mmcfg(); entries = 0; i = header->length - sizeof(struct acpi_table_mcfg); while (i >= sizeof(struct acpi_mcfg_allocation)) { entries++; i -= sizeof(struct acpi_mcfg_allocation); }; if (entries == 0) { printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); return -ENODEV; } cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1]; for (i = 0; i < entries; i++) { cfg = &cfg_table[i]; if (acpi_mcfg_check_entry(mcfg, cfg)) { free_all_mmcfg(); return -ENODEV; } if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number, cfg->address) == NULL) { printk(KERN_WARNING PREFIX "no memory for MCFG entries\n"); free_all_mmcfg(); return -ENOMEM; } } return 0; } static void __init __pci_mmcfg_init(int early) { /* MMCONFIG disabled */ if ((pci_probe & PCI_PROBE_MMCONF) == 0) return; /* MMCONFIG already enabled */ if (!early && !(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF)) return; /* for late to exit */ if (known_bridge) return; if (early) { if (pci_mmcfg_check_hostbridge()) known_bridge = 1; } if (!known_bridge) acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); pci_mmcfg_reject_broken(early); if (list_empty(&pci_mmcfg_list)) return; if (pcibios_last_bus < 0) { const struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (cfg->segment) break; pcibios_last_bus = cfg->end_bus; } } if (pci_mmcfg_arch_init()) pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; else { /* * Signal not to attempt to insert mmcfg resources because * the architecture mmcfg setup could not initialize. */ pci_mmcfg_resources_inserted = 1; } } void __init pci_mmcfg_early_init(void) { __pci_mmcfg_init(1); } void __init pci_mmcfg_late_init(void) { __pci_mmcfg_init(0); } static int __init pci_mmcfg_late_insert_resources(void) { /* * If resources are already inserted or we are not using MMCONFIG, * don't insert the resources. */ if ((pci_mmcfg_resources_inserted == 1) || (pci_probe & PCI_PROBE_MMCONF) == 0 || list_empty(&pci_mmcfg_list)) return 1; /* * Attempt to insert the mmcfg resources but not with the busy flag * marked so it won't cause request errors when __request_region is * called. */ pci_mmcfg_insert_resources(); return 0; } /* * Perform MMCONFIG resource insertion after PCI initialization to allow for * misprogrammed MCFG tables that state larger sizes but actually conflict * with other system resources. */ late_initcall(pci_mmcfg_late_insert_resources);
gpl-2.0
hallovveen31/ICED_KERNEL
arch/mips/cavium-octeon/octeon-irq.c
7185
29727
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks */ #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/percpu.h> #include <linux/irq.h> #include <linux/smp.h> #include <asm/octeon/octeon.h> static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; union octeon_ciu_chip_data { void *p; unsigned long l; struct { unsigned int line:6; unsigned int bit:6; } s; }; struct octeon_core_chip_data { struct mutex core_irq_mutex; bool current_en; bool desired_en; u8 bit; }; #define MIPS_CORE_IRQ_LINES 8 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit, struct irq_chip *chip, irq_flow_handler_t handler) { union octeon_ciu_chip_data cd; irq_set_chip_and_handler(irq, chip, handler); cd.l = 0; cd.s.line = line; cd.s.bit = bit; irq_set_chip_data(irq, cd.p); octeon_irq_ciu_to_irq[line][bit] = irq; } static int octeon_coreid_for_cpu(int cpu) { #ifdef CONFIG_SMP return cpu_logical_map(cpu); #else return cvmx_get_core_num(); #endif } static int octeon_cpu_for_coreid(int coreid) { #ifdef CONFIG_SMP return cpu_number_map(coreid); #else return smp_processor_id(); #endif } static void octeon_irq_core_ack(struct irq_data *data) { struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); unsigned int bit = cd->bit; /* * We don't need to disable IRQs to make these atomic since * they are already disabled earlier in the low level * interrupt code. */ clear_c0_status(0x100 << bit); /* The two user interrupts must be cleared manually. */ if (bit < 2) clear_c0_cause(0x100 << bit); } static void octeon_irq_core_eoi(struct irq_data *data) { struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); /* * We don't need to disable IRQs to make these atomic since * they are already disabled earlier in the low level * interrupt code. */ set_c0_status(0x100 << cd->bit); } static void octeon_irq_core_set_enable_local(void *arg) { struct irq_data *data = arg; struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); unsigned int mask = 0x100 << cd->bit; /* * Interrupts are already disabled, so these are atomic. */ if (cd->desired_en) set_c0_status(mask); else clear_c0_status(mask); } static void octeon_irq_core_disable(struct irq_data *data) { struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); cd->desired_en = false; } static void octeon_irq_core_enable(struct irq_data *data) { struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); cd->desired_en = true; } static void octeon_irq_core_bus_lock(struct irq_data *data) { struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); mutex_lock(&cd->core_irq_mutex); } static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) { struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); if (cd->desired_en != cd->current_en) { on_each_cpu(octeon_irq_core_set_enable_local, data, 1); cd->current_en = cd->desired_en; } mutex_unlock(&cd->core_irq_mutex); } static struct irq_chip octeon_irq_chip_core = { .name = "Core", .irq_enable = octeon_irq_core_enable, .irq_disable = octeon_irq_core_disable, .irq_ack = octeon_irq_core_ack, .irq_eoi = octeon_irq_core_eoi, .irq_bus_lock = octeon_irq_core_bus_lock, .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, .irq_cpu_online = octeon_irq_core_eoi, .irq_cpu_offline = octeon_irq_core_ack, .flags = IRQCHIP_ONOFFLINE_ENABLED, }; static void __init octeon_irq_init_core(void) { int i; int irq; struct octeon_core_chip_data *cd; for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { cd = &octeon_irq_core_chip_data[i]; cd->current_en = false; cd->desired_en = false; cd->bit = i; mutex_init(&cd->core_irq_mutex); irq = OCTEON_IRQ_SW0 + i; switch (irq) { case OCTEON_IRQ_TIMER: case OCTEON_IRQ_SW0: case OCTEON_IRQ_SW1: case OCTEON_IRQ_5: case OCTEON_IRQ_PERF: irq_set_chip_data(irq, cd); irq_set_chip_and_handler(irq, &octeon_irq_chip_core, handle_percpu_irq); break; default: break; } } } static int next_cpu_for_irq(struct irq_data *data) { #ifdef CONFIG_SMP int cpu; int weight = cpumask_weight(data->affinity); if (weight > 1) { cpu = smp_processor_id(); for (;;) { cpu = cpumask_next(cpu, data->affinity); if (cpu >= nr_cpu_ids) { cpu = -1; continue; } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { break; } } } else if (weight == 1) { cpu = cpumask_first(data->affinity); } else { cpu = smp_processor_id(); } return cpu; #else return smp_processor_id(); #endif } static void octeon_irq_ciu_enable(struct irq_data *data) { int cpu = next_cpu_for_irq(data); int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } } static void octeon_irq_ciu_enable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } } static void octeon_irq_ciu_disable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } } static void octeon_irq_ciu_disable_all(struct irq_data *data) { unsigned long flags; unsigned long *pen; int cpu; union octeon_ciu_chip_data cd; wmb(); /* Make sure flag changes arrive before register updates. */ cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } } static void octeon_irq_ciu_enable_all(struct irq_data *data) { unsigned long flags; unsigned long *pen; int cpu; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } } /* * Enable the irq on the next core in the affinity set for chips that * have the EN*_W1{S,C} registers. */ static void octeon_irq_ciu_enable_v2(struct irq_data *data) { u64 mask; int cpu = next_cpu_for_irq(data); union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << (cd.s.bit); /* * Called under the desc lock, so these should never get out * of sync. */ if (cd.s.line == 0) { int index = octeon_coreid_for_cpu(cpu) * 2; set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } /* * Enable the irq on the current CPU for chips that * have the EN*_W1{S,C} registers. */ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) { u64 mask; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { int index = cvmx_get_core_num() * 2; set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) { u64 mask; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { int index = cvmx_get_core_num() * 2; clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } /* * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. */ static void octeon_irq_ciu_ack(struct irq_data *data) { u64 mask; union octeon_ciu_chip_data cd; cd.p = data->chip_data; mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { int index = cvmx_get_core_num() * 2; cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); } else { cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); } } /* * Disable the irq on the all cores for chips that have the EN*_W1{S,C} * registers. */ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) { int cpu; u64 mask; union octeon_ciu_chip_data cd; wmb(); /* Make sure flag changes arrive before register updates. */ cd.p = data->chip_data; mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2; clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } } else { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } } /* * Enable the irq on the all cores for chips that have the EN*_W1{S,C} * registers. */ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) { int cpu; u64 mask; union octeon_ciu_chip_data cd; cd.p = data->chip_data; mask = 1ull << (cd.s.bit); if (cd.s.line == 0) { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2; set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } } else { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } } #ifdef CONFIG_SMP static void octeon_irq_cpu_offline_ciu(struct irq_data *data) { int cpu = smp_processor_id(); cpumask_t new_affinity; if (!cpumask_test_cpu(cpu, data->affinity)) return; if (cpumask_weight(data->affinity) > 1) { /* * It has multi CPU affinity, just remove this CPU * from the affinity set. */ cpumask_copy(&new_affinity, data->affinity); cpumask_clear_cpu(cpu, &new_affinity); } else { /* Otherwise, put it on lowest numbered online CPU. */ cpumask_clear(&new_affinity); cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); } __irq_set_affinity_locked(data, &new_affinity); } static int octeon_irq_ciu_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); unsigned long flags; union octeon_ciu_chip_data cd; cd.p = data->chip_data; /* * For non-v2 CIU, we will allow only single CPU affinity. * This removes the need to do locking in the .ack/.eoi * functions. */ if (cpumask_weight(dest) != 1) return -EINVAL; if (!enable_one) return 0; if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; set_bit(cd.s.bit, pen); } else { clear_bit(cd.s.bit, pen); } cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; set_bit(cd.s.bit, pen); } else { clear_bit(cd.s.bit, pen); } cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } return 0; } /* * Set affinity for the irq for chips that have the EN*_W1{S,C} * registers. */ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, const struct cpumask *dest, bool force) { int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); u64 mask; union octeon_ciu_chip_data cd; if (!enable_one) return 0; cd.p = data->chip_data; mask = 1ull << cd.s.bit; if (cd.s.line == 0) { for_each_online_cpu(cpu) { unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); int index = octeon_coreid_for_cpu(cpu) * 2; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } } } else { for_each_online_cpu(cpu) { unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); int index = octeon_coreid_for_cpu(cpu) * 2 + 1; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } else { clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } } return 0; } #endif /* * The v1 CIU code already masks things, so supply a dummy version to * the core chip code. */ static void octeon_irq_dummy_mask(struct irq_data *data) { } /* * Newer octeon chips have support for lockless CIU operation. */ static struct irq_chip octeon_irq_chip_ciu_v2 = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable_v2, .irq_disable = octeon_irq_ciu_disable_all_v2, .irq_mask = octeon_irq_ciu_disable_local_v2, .irq_unmask = octeon_irq_ciu_enable_v2, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, .irq_cpu_offline = octeon_irq_cpu_offline_ciu, #endif }; static struct irq_chip octeon_irq_chip_ciu_edge_v2 = { .name = "CIU-E", .irq_enable = octeon_irq_ciu_enable_v2, .irq_disable = octeon_irq_ciu_disable_all_v2, .irq_ack = octeon_irq_ciu_ack, .irq_mask = octeon_irq_ciu_disable_local_v2, .irq_unmask = octeon_irq_ciu_enable_v2, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, .irq_cpu_offline = octeon_irq_cpu_offline_ciu, #endif }; static struct irq_chip octeon_irq_chip_ciu = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable, .irq_disable = octeon_irq_ciu_disable_all, .irq_mask = octeon_irq_dummy_mask, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity, .irq_cpu_offline = octeon_irq_cpu_offline_ciu, #endif }; static struct irq_chip octeon_irq_chip_ciu_edge = { .name = "CIU-E", .irq_enable = octeon_irq_ciu_enable, .irq_disable = octeon_irq_ciu_disable_all, .irq_mask = octeon_irq_dummy_mask, .irq_ack = octeon_irq_ciu_ack, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity, .irq_cpu_offline = octeon_irq_cpu_offline_ciu, #endif }; /* The mbox versions don't do any affinity or round-robin. */ static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { .name = "CIU-M", .irq_enable = octeon_irq_ciu_enable_all_v2, .irq_disable = octeon_irq_ciu_disable_all_v2, .irq_ack = octeon_irq_ciu_disable_local_v2, .irq_eoi = octeon_irq_ciu_enable_local_v2, .irq_cpu_online = octeon_irq_ciu_enable_local_v2, .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, .flags = IRQCHIP_ONOFFLINE_ENABLED, }; static struct irq_chip octeon_irq_chip_ciu_mbox = { .name = "CIU-M", .irq_enable = octeon_irq_ciu_enable_all, .irq_disable = octeon_irq_ciu_disable_all, .irq_cpu_online = octeon_irq_ciu_enable_local, .irq_cpu_offline = octeon_irq_ciu_disable_local, .flags = IRQCHIP_ONOFFLINE_ENABLED, }; /* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core. */ static void octeon_irq_ciu_wd_enable(struct irq_data *data) { unsigned long flags; unsigned long *pen; int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ int cpu = octeon_cpu_for_coreid(coreid); raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); set_bit(coreid, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } /* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core. */ static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) { int coreid = data->irq - OCTEON_IRQ_WDOG0; int cpu = octeon_cpu_for_coreid(coreid); set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); } static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { .name = "CIU-W", .irq_enable = octeon_irq_ciu1_wd_enable_v2, .irq_disable = octeon_irq_ciu_disable_all_v2, .irq_mask = octeon_irq_ciu_disable_local_v2, .irq_unmask = octeon_irq_ciu_enable_local_v2, }; static struct irq_chip octeon_irq_chip_ciu_wd = { .name = "CIU-W", .irq_enable = octeon_irq_ciu_wd_enable, .irq_disable = octeon_irq_ciu_disable_all, .irq_mask = octeon_irq_dummy_mask, }; static void octeon_irq_ip2_v1(void) { const unsigned long core_id = cvmx_get_core_num(); u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); clear_c0_status(STATUSF_IP2); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[0][bit]; if (likely(irq)) do_IRQ(irq); else spurious_interrupt(); } else { spurious_interrupt(); } set_c0_status(STATUSF_IP2); } static void octeon_irq_ip2_v2(void) { const unsigned long core_id = cvmx_get_core_num(); u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[0][bit]; if (likely(irq)) do_IRQ(irq); else spurious_interrupt(); } else { spurious_interrupt(); } } static void octeon_irq_ip3_v1(void) { u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); clear_c0_status(STATUSF_IP3); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[1][bit]; if (likely(irq)) do_IRQ(irq); else spurious_interrupt(); } else { spurious_interrupt(); } set_c0_status(STATUSF_IP3); } static void octeon_irq_ip3_v2(void) { u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[1][bit]; if (likely(irq)) do_IRQ(irq); else spurious_interrupt(); } else { spurious_interrupt(); } } static void octeon_irq_ip4_mask(void) { clear_c0_status(STATUSF_IP4); spurious_interrupt(); } static void (*octeon_irq_ip2)(void); static void (*octeon_irq_ip3)(void); static void (*octeon_irq_ip4)(void); void __cpuinitdata (*octeon_irq_setup_secondary)(void); static void __cpuinit octeon_irq_percpu_enable(void) { irq_cpu_online(); } static void __cpuinit octeon_irq_init_ciu_percpu(void) { int coreid = cvmx_get_core_num(); /* * Disable All CIU Interrupts. The ones we need will be * enabled later. Read the SUM register so we know the write * completed. */ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); } static void __cpuinit octeon_irq_setup_secondary_ciu(void) { __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; octeon_irq_init_ciu_percpu(); octeon_irq_percpu_enable(); /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); clear_c0_status(STATUSF_IP4); } static void __init octeon_irq_init_ciu(void) { unsigned int i; struct irq_chip *chip; struct irq_chip *chip_edge; struct irq_chip *chip_mbox; struct irq_chip *chip_wd; octeon_irq_init_ciu_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN6XXX)) { octeon_irq_ip2 = octeon_irq_ip2_v2; octeon_irq_ip3 = octeon_irq_ip3_v2; chip = &octeon_irq_chip_ciu_v2; chip_edge = &octeon_irq_chip_ciu_edge_v2; chip_mbox = &octeon_irq_chip_ciu_mbox_v2; chip_wd = &octeon_irq_chip_ciu_wd_v2; } else { octeon_irq_ip2 = octeon_irq_ip2_v1; octeon_irq_ip3 = octeon_irq_ip3_v1; chip = &octeon_irq_chip_ciu; chip_edge = &octeon_irq_chip_ciu_edge; chip_mbox = &octeon_irq_chip_ciu_mbox; chip_wd = &octeon_irq_chip_ciu_wd; } octeon_irq_ip4 = octeon_irq_ip4_mask; /* Mips internal */ octeon_irq_init_core(); /* CIU_0 */ for (i = 0; i < 16; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); for (i = 0; i < 16; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq); for (i = 0; i < 4; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); for (i = 0; i < 4; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq); for (i = 0; i < 2; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq); for (i = 0; i < 4; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); /* CIU_1 */ for (i = 0; i < 16; i++) octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq); octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq); /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); clear_c0_status(STATUSF_IP4); } void __init arch_init_irq(void) { #ifdef CONFIG_SMP /* Set the default affinity to the boot cpu. */ cpumask_clear(irq_default_affinity); cpumask_set_cpu(smp_processor_id(), irq_default_affinity); #endif octeon_irq_init_ciu(); } asmlinkage void plat_irq_dispatch(void) { unsigned long cop0_cause; unsigned long cop0_status; while (1) { cop0_cause = read_c0_cause(); cop0_status = read_c0_status(); cop0_cause &= cop0_status; cop0_cause &= ST0_IM; if (unlikely(cop0_cause & STATUSF_IP2)) octeon_irq_ip2(); else if (unlikely(cop0_cause & STATUSF_IP3)) octeon_irq_ip3(); else if (unlikely(cop0_cause & STATUSF_IP4)) octeon_irq_ip4(); else if (likely(cop0_cause)) do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); else break; } } #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) { irq_cpu_offline(); } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
poondog/venom-vivo
sound/drivers/pcsp/pcsp_input.c
9233
2288
/* * PC Speaker beeper driver for Linux * * Copyright (c) 2002 Vojtech Pavlik * Copyright (c) 1992 Orest Zborowski * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation */ #include <linux/init.h> #include <linux/input.h> #include <asm/io.h> #include "pcsp.h" static void pcspkr_do_sound(unsigned int count) { unsigned long flags; raw_spin_lock_irqsave(&i8253_lock, flags); if (count) { /* set command for counter 2, 2 byte write */ outb_p(0xB6, 0x43); /* select desired HZ */ outb_p(count & 0xff, 0x42); outb((count >> 8) & 0xff, 0x42); /* enable counter 2 */ outb_p(inb_p(0x61) | 3, 0x61); } else { /* disable counter 2 */ outb(inb_p(0x61) & 0xFC, 0x61); } raw_spin_unlock_irqrestore(&i8253_lock, flags); } void pcspkr_stop_sound(void) { pcspkr_do_sound(0); } static int pcspkr_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned int count = 0; if (atomic_read(&pcsp_chip.timer_active) || !pcsp_chip.pcspkr) return 0; switch (type) { case EV_SND: switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } break; default: return -1; } if (value > 20 && value < 32767) count = PIT_TICK_RATE / value; pcspkr_do_sound(count); return 0; } int __devinit pcspkr_input_init(struct input_dev **rdev, struct device *dev) { int err; struct input_dev *input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_dev->name = "PC Speaker"; input_dev->phys = "isa0061/input0"; input_dev->id.bustype = BUS_ISA; input_dev->id.vendor = 0x001f; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = dev; input_dev->evbit[0] = BIT(EV_SND); input_dev->sndbit[0] = BIT(SND_BELL) | BIT(SND_TONE); input_dev->event = pcspkr_input_event; err = input_register_device(input_dev); if (err) { input_free_device(input_dev); return err; } *rdev = input_dev; return 0; } int pcspkr_input_remove(struct input_dev *dev) { pcspkr_stop_sound(); input_unregister_device(dev); /* this also does kfree() */ return 0; }
gpl-2.0
ollie27/android_kernel_samsung_aries
arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c
9489
43141
/* * SH7785 Pinmux * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7785.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA, PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA, PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA, PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA, PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA, PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA, PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA, PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA, PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA, PM1_DATA, PM0_DATA, PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA, PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA, PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA, PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA, PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PA7_IN, PA6_IN, PA5_IN, PA4_IN, PA3_IN, PA2_IN, PA1_IN, PA0_IN, PB7_IN, PB6_IN, PB5_IN, PB4_IN, PB3_IN, PB2_IN, PB1_IN, PB0_IN, PC7_IN, PC6_IN, PC5_IN, PC4_IN, PC3_IN, PC2_IN, PC1_IN, PC0_IN, PD7_IN, PD6_IN, PD5_IN, PD4_IN, PD3_IN, PD2_IN, PD1_IN, PD0_IN, PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN, PF7_IN, PF6_IN, PF5_IN, PF4_IN, PF3_IN, PF2_IN, PF1_IN, PF0_IN, PG7_IN, PG6_IN, PG5_IN, PG4_IN, PG3_IN, PG2_IN, PG1_IN, PG0_IN, PH7_IN, PH6_IN, PH5_IN, PH4_IN, PH3_IN, PH2_IN, PH1_IN, PH0_IN, PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN, PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN, PK7_IN, PK6_IN, PK5_IN, PK4_IN, PK3_IN, PK2_IN, PK1_IN, PK0_IN, PL7_IN, PL6_IN, PL5_IN, PL4_IN, PL3_IN, PL2_IN, PL1_IN, PL0_IN, PM1_IN, PM0_IN, PN7_IN, PN6_IN, PN5_IN, PN4_IN, PN3_IN, PN2_IN, PN1_IN, PN0_IN, PP5_IN, PP4_IN, PP3_IN, PP2_IN, PP1_IN, PP0_IN, PQ4_IN, PQ3_IN, PQ2_IN, PQ1_IN, PQ0_IN, PR3_IN, PR2_IN, PR1_IN, PR0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU, PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU, PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU, PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU, PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU, PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU, PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU, PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU, PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU, PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU, PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU, PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU, PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU, PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU, PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU, PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU, PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, PJ0_IN_PU, PK7_IN_PU, PK6_IN_PU, PK5_IN_PU, PK4_IN_PU, PK3_IN_PU, PK2_IN_PU, PK1_IN_PU, PK0_IN_PU, PL7_IN_PU, PL6_IN_PU, PL5_IN_PU, PL4_IN_PU, PL3_IN_PU, PL2_IN_PU, PL1_IN_PU, PL0_IN_PU, PM1_IN_PU, PM0_IN_PU, PN7_IN_PU, PN6_IN_PU, PN5_IN_PU, PN4_IN_PU, PN3_IN_PU, PN2_IN_PU, PN1_IN_PU, PN0_IN_PU, PP5_IN_PU, PP4_IN_PU, PP3_IN_PU, PP2_IN_PU, PP1_IN_PU, PP0_IN_PU, PQ4_IN_PU, PQ3_IN_PU, PQ2_IN_PU, PQ1_IN_PU, PQ0_IN_PU, PR3_IN_PU, PR2_IN_PU, PR1_IN_PU, PR0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT, PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT, PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT, PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT, PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT, PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT, PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT, PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT, PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT, PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT, PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT, PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT, PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT, PL7_OUT, PL6_OUT, PL5_OUT, PL4_OUT, PL3_OUT, PL2_OUT, PL1_OUT, PL0_OUT, PM1_OUT, PM0_OUT, PN7_OUT, PN6_OUT, PN5_OUT, PN4_OUT, PN3_OUT, PN2_OUT, PN1_OUT, PN0_OUT, PP5_OUT, PP4_OUT, PP3_OUT, PP2_OUT, PP1_OUT, PP0_OUT, PQ4_OUT, PQ3_OUT, PQ2_OUT, PQ1_OUT, PQ0_OUT, PR3_OUT, PR2_OUT, PR1_OUT, PR0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PA7_FN, PA6_FN, PA5_FN, PA4_FN, PA3_FN, PA2_FN, PA1_FN, PA0_FN, PB7_FN, PB6_FN, PB5_FN, PB4_FN, PB3_FN, PB2_FN, PB1_FN, PB0_FN, PC7_FN, PC6_FN, PC5_FN, PC4_FN, PC3_FN, PC2_FN, PC1_FN, PC0_FN, PD7_FN, PD6_FN, PD5_FN, PD4_FN, PD3_FN, PD2_FN, PD1_FN, PD0_FN, PE5_FN, PE4_FN, PE3_FN, PE2_FN, PE1_FN, PE0_FN, PF7_FN, PF6_FN, PF5_FN, PF4_FN, PF3_FN, PF2_FN, PF1_FN, PF0_FN, PG7_FN, PG6_FN, PG5_FN, PG4_FN, PG3_FN, PG2_FN, PG1_FN, PG0_FN, PH7_FN, PH6_FN, PH5_FN, PH4_FN, PH3_FN, PH2_FN, PH1_FN, PH0_FN, PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN, PJ3_FN, PJ2_FN, PJ1_FN, PJ0_FN, PK7_FN, PK6_FN, PK5_FN, PK4_FN, PK3_FN, PK2_FN, PK1_FN, PK0_FN, PL7_FN, PL6_FN, PL5_FN, PL4_FN, PL3_FN, PL2_FN, PL1_FN, PL0_FN, PM1_FN, PM0_FN, PN7_FN, PN6_FN, PN5_FN, PN4_FN, PN3_FN, PN2_FN, PN1_FN, PN0_FN, PP5_FN, PP4_FN, PP3_FN, PP2_FN, PP1_FN, PP0_FN, PQ4_FN, PQ3_FN, PQ2_FN, PQ1_FN, PQ0_FN, PR3_FN, PR2_FN, PR1_FN, PR0_FN, P1MSEL15_0, P1MSEL15_1, P1MSEL14_0, P1MSEL14_1, P1MSEL13_0, P1MSEL13_1, P1MSEL12_0, P1MSEL12_1, P1MSEL11_0, P1MSEL11_1, P1MSEL10_0, P1MSEL10_1, P1MSEL9_0, P1MSEL9_1, P1MSEL8_0, P1MSEL8_1, P1MSEL7_0, P1MSEL7_1, P1MSEL6_0, P1MSEL6_1, P1MSEL5_0, P1MSEL4_0, P1MSEL4_1, P1MSEL3_0, P1MSEL3_1, P1MSEL2_0, P1MSEL2_1, P1MSEL1_0, P1MSEL1_1, P1MSEL0_0, P1MSEL0_1, P2MSEL2_0, P2MSEL2_1, P2MSEL1_0, P2MSEL1_1, P2MSEL0_0, P2MSEL0_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, D63_AD31_MARK, D62_AD30_MARK, D61_AD29_MARK, D60_AD28_MARK, D59_AD27_MARK, D58_AD26_MARK, D57_AD25_MARK, D56_AD24_MARK, D55_AD23_MARK, D54_AD22_MARK, D53_AD21_MARK, D52_AD20_MARK, D51_AD19_MARK, D50_AD18_MARK, D49_AD17_DB5_MARK, D48_AD16_DB4_MARK, D47_AD15_DB3_MARK, D46_AD14_DB2_MARK, D45_AD13_DB1_MARK, D44_AD12_DB0_MARK, D43_AD11_DG5_MARK, D42_AD10_DG4_MARK, D41_AD9_DG3_MARK, D40_AD8_DG2_MARK, D39_AD7_DG1_MARK, D38_AD6_DG0_MARK, D37_AD5_DR5_MARK, D36_AD4_DR4_MARK, D35_AD3_DR3_MARK, D34_AD2_DR2_MARK, D33_AD1_DR1_MARK, D32_AD0_DR0_MARK, REQ1_MARK, REQ2_MARK, REQ3_MARK, GNT1_MARK, GNT2_MARK, GNT3_MARK, MMCCLK_MARK, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, SCIF1_SCK_MARK, SCIF1_RXD_MARK, SCIF1_TXD_MARK, SCIF0_CTS_MARK, INTD_MARK, FCE_MARK, SCIF0_RTS_MARK, HSPI_CS_MARK, FSE_MARK, SCIF0_SCK_MARK, HSPI_CLK_MARK, FRE_MARK, SCIF0_RXD_MARK, HSPI_RX_MARK, FRB_MARK, SCIF0_TXD_MARK, HSPI_TX_MARK, FWE_MARK, SCIF5_TXD_MARK, HAC1_SYNC_MARK, SSI1_WS_MARK, SIOF_TXD_PJ_MARK, HAC0_SDOUT_MARK, SSI0_SDATA_MARK, SIOF_RXD_PJ_MARK, HAC0_SDIN_MARK, SSI0_SCK_MARK, SIOF_SYNC_PJ_MARK, HAC0_SYNC_MARK, SSI0_WS_MARK, SIOF_MCLK_PJ_MARK, HAC_RES_MARK, SIOF_SCK_PJ_MARK, HAC0_BITCLK_MARK, SSI0_CLK_MARK, HAC1_BITCLK_MARK, SSI1_CLK_MARK, TCLK_MARK, IOIS16_MARK, STATUS0_MARK, DRAK0_PK3_MARK, STATUS1_MARK, DRAK1_PK2_MARK, DACK2_MARK, SCIF2_TXD_MARK, MMCCMD_MARK, SIOF_TXD_PK_MARK, DACK3_MARK, SCIF2_SCK_MARK, MMCDAT_MARK, SIOF_SCK_PK_MARK, DREQ0_MARK, DREQ1_MARK, DRAK0_PK1_MARK, DRAK1_PK0_MARK, DREQ2_MARK, INTB_MARK, DREQ3_MARK, INTC_MARK, DRAK2_MARK, CE2A_MARK, IRL4_MARK, FD4_MARK, IRL5_MARK, FD5_MARK, IRL6_MARK, FD6_MARK, IRL7_MARK, FD7_MARK, DRAK3_MARK, CE2B_MARK, BREQ_BSACK_MARK, BACK_BSREQ_MARK, SCIF5_RXD_MARK, HAC1_SDIN_MARK, SSI1_SCK_MARK, SCIF5_SCK_MARK, HAC1_SDOUT_MARK, SSI1_SDATA_MARK, SCIF3_TXD_MARK, FCLE_MARK, SCIF3_RXD_MARK, FALE_MARK, SCIF3_SCK_MARK, FD0_MARK, SCIF4_TXD_MARK, FD1_MARK, SCIF4_RXD_MARK, FD2_MARK, SCIF4_SCK_MARK, FD3_MARK, DEVSEL_DCLKOUT_MARK, STOP_CDE_MARK, LOCK_ODDF_MARK, TRDY_DISPL_MARK, IRDY_HSYNC_MARK, PCIFRAME_VSYNC_MARK, INTA_MARK, GNT0_GNTIN_MARK, REQ0_REQOUT_MARK, PERR_MARK, SERR_MARK, WE7_CBE3_MARK, WE6_CBE2_MARK, WE5_CBE1_MARK, WE4_CBE0_MARK, SCIF2_RXD_MARK, SIOF_RXD_MARK, MRESETOUT_MARK, IRQOUT_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PA GPIO */ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU), PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU), PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU), PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU), PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU), PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU), PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU), PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU), /* PB GPIO */ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU), PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU), PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU), PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU), PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU), PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU), PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU), PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU), /* PC GPIO */ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU), PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU), PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU), PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU), PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU), PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU), PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU), PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU), /* PD GPIO */ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU), PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU), PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU), PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU), PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU), PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU), PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU), PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU), /* PE GPIO */ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU), PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU), PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU), PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU), PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU), PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU), /* PF GPIO */ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU), PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU), PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU), PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU), PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU), PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU), PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU), PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU), /* PG GPIO */ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU), PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU), PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU), PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU), PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU), PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU), PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU), PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU), /* PH GPIO */ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU), PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU), PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU), PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU), PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU), PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU), PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU), PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU), /* PJ GPIO */ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU), PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU), PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU), PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU), PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU), PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU), PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU), PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT, PJ0_IN_PU), /* PK GPIO */ PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT, PK7_IN_PU), PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT, PK6_IN_PU), PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT, PK5_IN_PU), PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT, PK4_IN_PU), PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT, PK3_IN_PU), PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT, PK2_IN_PU), PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT, PK1_IN_PU), PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT, PK0_IN_PU), /* PL GPIO */ PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT, PL7_IN_PU), PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT, PL6_IN_PU), PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT, PL5_IN_PU), PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT, PL4_IN_PU), PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT, PL3_IN_PU), PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT, PL2_IN_PU), PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT, PL1_IN_PU), PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT, PL0_IN_PU), /* PM GPIO */ PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT, PM1_IN_PU), PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT, PM0_IN_PU), /* PN GPIO */ PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT, PN7_IN_PU), PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT, PN6_IN_PU), PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT, PN5_IN_PU), PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT, PN4_IN_PU), PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT, PN3_IN_PU), PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT, PN2_IN_PU), PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT, PN1_IN_PU), PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT, PN0_IN_PU), /* PP GPIO */ PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT, PP5_IN_PU), PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT, PP4_IN_PU), PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT, PP3_IN_PU), PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT, PP2_IN_PU), PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT, PP1_IN_PU), PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT, PP0_IN_PU), /* PQ GPIO */ PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT, PQ4_IN_PU), PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT, PQ3_IN_PU), PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT, PQ2_IN_PU), PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT, PQ1_IN_PU), PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT, PQ0_IN_PU), /* PR GPIO */ PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT, PR3_IN_PU), PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT, PR2_IN_PU), PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT, PR1_IN_PU), PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT, PR0_IN_PU), /* PA FN */ PINMUX_DATA(D63_AD31_MARK, PA7_FN), PINMUX_DATA(D62_AD30_MARK, PA6_FN), PINMUX_DATA(D61_AD29_MARK, PA5_FN), PINMUX_DATA(D60_AD28_MARK, PA4_FN), PINMUX_DATA(D59_AD27_MARK, PA3_FN), PINMUX_DATA(D58_AD26_MARK, PA2_FN), PINMUX_DATA(D57_AD25_MARK, PA1_FN), PINMUX_DATA(D56_AD24_MARK, PA0_FN), /* PB FN */ PINMUX_DATA(D55_AD23_MARK, PB7_FN), PINMUX_DATA(D54_AD22_MARK, PB6_FN), PINMUX_DATA(D53_AD21_MARK, PB5_FN), PINMUX_DATA(D52_AD20_MARK, PB4_FN), PINMUX_DATA(D51_AD19_MARK, PB3_FN), PINMUX_DATA(D50_AD18_MARK, PB2_FN), PINMUX_DATA(D49_AD17_DB5_MARK, PB1_FN), PINMUX_DATA(D48_AD16_DB4_MARK, PB0_FN), /* PC FN */ PINMUX_DATA(D47_AD15_DB3_MARK, PC7_FN), PINMUX_DATA(D46_AD14_DB2_MARK, PC6_FN), PINMUX_DATA(D45_AD13_DB1_MARK, PC5_FN), PINMUX_DATA(D44_AD12_DB0_MARK, PC4_FN), PINMUX_DATA(D43_AD11_DG5_MARK, PC3_FN), PINMUX_DATA(D42_AD10_DG4_MARK, PC2_FN), PINMUX_DATA(D41_AD9_DG3_MARK, PC1_FN), PINMUX_DATA(D40_AD8_DG2_MARK, PC0_FN), /* PD FN */ PINMUX_DATA(D39_AD7_DG1_MARK, PD7_FN), PINMUX_DATA(D38_AD6_DG0_MARK, PD6_FN), PINMUX_DATA(D37_AD5_DR5_MARK, PD5_FN), PINMUX_DATA(D36_AD4_DR4_MARK, PD4_FN), PINMUX_DATA(D35_AD3_DR3_MARK, PD3_FN), PINMUX_DATA(D34_AD2_DR2_MARK, PD2_FN), PINMUX_DATA(D33_AD1_DR1_MARK, PD1_FN), PINMUX_DATA(D32_AD0_DR0_MARK, PD0_FN), /* PE FN */ PINMUX_DATA(REQ1_MARK, PE5_FN), PINMUX_DATA(REQ2_MARK, PE4_FN), PINMUX_DATA(REQ3_MARK, P2MSEL0_0, PE3_FN), PINMUX_DATA(GNT1_MARK, PE2_FN), PINMUX_DATA(GNT2_MARK, PE1_FN), PINMUX_DATA(GNT3_MARK, P2MSEL0_0, PE0_FN), PINMUX_DATA(MMCCLK_MARK, P2MSEL0_1, PE0_FN), /* PF FN */ PINMUX_DATA(D31_MARK, PF7_FN), PINMUX_DATA(D30_MARK, PF6_FN), PINMUX_DATA(D29_MARK, PF5_FN), PINMUX_DATA(D28_MARK, PF4_FN), PINMUX_DATA(D27_MARK, PF3_FN), PINMUX_DATA(D26_MARK, PF2_FN), PINMUX_DATA(D25_MARK, PF1_FN), PINMUX_DATA(D24_MARK, PF0_FN), /* PF FN */ PINMUX_DATA(D23_MARK, PG7_FN), PINMUX_DATA(D22_MARK, PG6_FN), PINMUX_DATA(D21_MARK, PG5_FN), PINMUX_DATA(D20_MARK, PG4_FN), PINMUX_DATA(D19_MARK, PG3_FN), PINMUX_DATA(D18_MARK, PG2_FN), PINMUX_DATA(D17_MARK, PG1_FN), PINMUX_DATA(D16_MARK, PG0_FN), /* PH FN */ PINMUX_DATA(SCIF1_SCK_MARK, PH7_FN), PINMUX_DATA(SCIF1_RXD_MARK, PH6_FN), PINMUX_DATA(SCIF1_TXD_MARK, PH5_FN), PINMUX_DATA(SCIF0_CTS_MARK, PH4_FN), PINMUX_DATA(INTD_MARK, P1MSEL7_1, PH4_FN), PINMUX_DATA(FCE_MARK, P1MSEL8_1, P1MSEL7_0, PH4_FN), PINMUX_DATA(SCIF0_RTS_MARK, P1MSEL8_0, P1MSEL7_0, PH3_FN), PINMUX_DATA(HSPI_CS_MARK, P1MSEL8_0, P1MSEL7_1, PH3_FN), PINMUX_DATA(FSE_MARK, P1MSEL8_1, P1MSEL7_0, PH3_FN), PINMUX_DATA(SCIF0_SCK_MARK, P1MSEL8_0, P1MSEL7_0, PH2_FN), PINMUX_DATA(HSPI_CLK_MARK, P1MSEL8_0, P1MSEL7_1, PH2_FN), PINMUX_DATA(FRE_MARK, P1MSEL8_1, P1MSEL7_0, PH2_FN), PINMUX_DATA(SCIF0_RXD_MARK, P1MSEL8_0, P1MSEL7_0, PH1_FN), PINMUX_DATA(HSPI_RX_MARK, P1MSEL8_0, P1MSEL7_1, PH1_FN), PINMUX_DATA(FRB_MARK, P1MSEL8_1, P1MSEL7_0, PH1_FN), PINMUX_DATA(SCIF0_TXD_MARK, P1MSEL8_0, P1MSEL7_0, PH0_FN), PINMUX_DATA(HSPI_TX_MARK, P1MSEL8_0, P1MSEL7_1, PH0_FN), PINMUX_DATA(FWE_MARK, P1MSEL8_1, P1MSEL7_0, PH0_FN), /* PJ FN */ PINMUX_DATA(SCIF5_TXD_MARK, P1MSEL2_0, P1MSEL1_0, PJ7_FN), PINMUX_DATA(HAC1_SYNC_MARK, P1MSEL2_0, P1MSEL1_1, PJ7_FN), PINMUX_DATA(SSI1_WS_MARK, P1MSEL2_1, P1MSEL1_0, PJ7_FN), PINMUX_DATA(SIOF_TXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ6_FN), PINMUX_DATA(HAC0_SDOUT_MARK, P1MSEL4_0, P1MSEL3_1, PJ6_FN), PINMUX_DATA(SSI0_SDATA_MARK, P1MSEL4_1, P1MSEL3_0, PJ6_FN), PINMUX_DATA(SIOF_RXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ5_FN), PINMUX_DATA(HAC0_SDIN_MARK, P1MSEL4_0, P1MSEL3_1, PJ5_FN), PINMUX_DATA(SSI0_SCK_MARK, P1MSEL4_1, P1MSEL3_0, PJ5_FN), PINMUX_DATA(SIOF_SYNC_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ4_FN), PINMUX_DATA(HAC0_SYNC_MARK, P1MSEL4_0, P1MSEL3_1, PJ4_FN), PINMUX_DATA(SSI0_WS_MARK, P1MSEL4_1, P1MSEL3_0, PJ4_FN), PINMUX_DATA(SIOF_MCLK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ3_FN), PINMUX_DATA(HAC_RES_MARK, P1MSEL4_0, P1MSEL3_1, PJ3_FN), PINMUX_DATA(SIOF_SCK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ2_FN), PINMUX_DATA(HAC0_BITCLK_MARK, P1MSEL4_0, P1MSEL3_1, PJ2_FN), PINMUX_DATA(SSI0_CLK_MARK, P1MSEL4_1, P1MSEL3_0, PJ2_FN), PINMUX_DATA(HAC1_BITCLK_MARK, P1MSEL2_0, PJ1_FN), PINMUX_DATA(SSI1_CLK_MARK, P1MSEL2_1, P1MSEL1_0, PJ1_FN), PINMUX_DATA(TCLK_MARK, P1MSEL9_0, PJ0_FN), PINMUX_DATA(IOIS16_MARK, P1MSEL9_1, PJ0_FN), /* PK FN */ PINMUX_DATA(STATUS0_MARK, P1MSEL15_0, PK7_FN), PINMUX_DATA(DRAK0_PK3_MARK, P1MSEL15_1, PK7_FN), PINMUX_DATA(STATUS1_MARK, P1MSEL15_0, PK6_FN), PINMUX_DATA(DRAK1_PK2_MARK, P1MSEL15_1, PK6_FN), PINMUX_DATA(DACK2_MARK, P1MSEL12_0, P1MSEL11_0, PK5_FN), PINMUX_DATA(SCIF2_TXD_MARK, P1MSEL12_1, P1MSEL11_0, PK5_FN), PINMUX_DATA(MMCCMD_MARK, P1MSEL12_1, P1MSEL11_1, PK5_FN), PINMUX_DATA(SIOF_TXD_PK_MARK, P2MSEL1_1, P1MSEL12_0, P1MSEL11_1, PK5_FN), PINMUX_DATA(DACK3_MARK, P1MSEL12_0, P1MSEL11_0, PK4_FN), PINMUX_DATA(SCIF2_SCK_MARK, P1MSEL12_1, P1MSEL11_0, PK4_FN), PINMUX_DATA(MMCDAT_MARK, P1MSEL12_1, P1MSEL11_1, PK4_FN), PINMUX_DATA(SIOF_SCK_PK_MARK, P2MSEL1_1, P1MSEL12_0, P1MSEL11_1, PK4_FN), PINMUX_DATA(DREQ0_MARK, PK3_FN), PINMUX_DATA(DREQ1_MARK, PK2_FN), PINMUX_DATA(DRAK0_PK1_MARK, PK1_FN), PINMUX_DATA(DRAK1_PK0_MARK, PK0_FN), /* PL FN */ PINMUX_DATA(DREQ2_MARK, P1MSEL13_0, PL7_FN), PINMUX_DATA(INTB_MARK, P1MSEL13_1, PL7_FN), PINMUX_DATA(DREQ3_MARK, P1MSEL13_0, PL6_FN), PINMUX_DATA(INTC_MARK, P1MSEL13_1, PL6_FN), PINMUX_DATA(DRAK2_MARK, P1MSEL10_0, PL5_FN), PINMUX_DATA(CE2A_MARK, P1MSEL10_1, PL5_FN), PINMUX_DATA(IRL4_MARK, P1MSEL14_0, PL4_FN), PINMUX_DATA(FD4_MARK, P1MSEL14_1, PL4_FN), PINMUX_DATA(IRL5_MARK, P1MSEL14_0, PL3_FN), PINMUX_DATA(FD5_MARK, P1MSEL14_1, PL3_FN), PINMUX_DATA(IRL6_MARK, P1MSEL14_0, PL2_FN), PINMUX_DATA(FD6_MARK, P1MSEL14_1, PL2_FN), PINMUX_DATA(IRL7_MARK, P1MSEL14_0, PL1_FN), PINMUX_DATA(FD7_MARK, P1MSEL14_1, PL1_FN), PINMUX_DATA(DRAK3_MARK, P1MSEL10_0, PL0_FN), PINMUX_DATA(CE2B_MARK, P1MSEL10_1, PL0_FN), /* PM FN */ PINMUX_DATA(BREQ_BSACK_MARK, PM1_FN), PINMUX_DATA(BACK_BSREQ_MARK, PM0_FN), /* PN FN */ PINMUX_DATA(SCIF5_RXD_MARK, P1MSEL2_0, P1MSEL1_0, PN7_FN), PINMUX_DATA(HAC1_SDIN_MARK, P1MSEL2_0, P1MSEL1_1, PN7_FN), PINMUX_DATA(SSI1_SCK_MARK, P1MSEL2_1, P1MSEL1_0, PN7_FN), PINMUX_DATA(SCIF5_SCK_MARK, P1MSEL2_0, P1MSEL1_0, PN6_FN), PINMUX_DATA(HAC1_SDOUT_MARK, P1MSEL2_0, P1MSEL1_1, PN6_FN), PINMUX_DATA(SSI1_SDATA_MARK, P1MSEL2_1, P1MSEL1_0, PN6_FN), PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL0_0, PN5_FN), PINMUX_DATA(FCLE_MARK, P1MSEL0_1, PN5_FN), PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL0_0, PN4_FN), PINMUX_DATA(FALE_MARK, P1MSEL0_1, PN4_FN), PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL0_0, PN3_FN), PINMUX_DATA(FD0_MARK, P1MSEL0_1, PN3_FN), PINMUX_DATA(SCIF4_TXD_MARK, P1MSEL0_0, PN2_FN), PINMUX_DATA(FD1_MARK, P1MSEL0_1, PN2_FN), PINMUX_DATA(SCIF4_RXD_MARK, P1MSEL0_0, PN1_FN), PINMUX_DATA(FD2_MARK, P1MSEL0_1, PN1_FN), PINMUX_DATA(SCIF4_SCK_MARK, P1MSEL0_0, PN0_FN), PINMUX_DATA(FD3_MARK, P1MSEL0_1, PN0_FN), /* PP FN */ PINMUX_DATA(DEVSEL_DCLKOUT_MARK, PP5_FN), PINMUX_DATA(STOP_CDE_MARK, PP4_FN), PINMUX_DATA(LOCK_ODDF_MARK, PP3_FN), PINMUX_DATA(TRDY_DISPL_MARK, PP2_FN), PINMUX_DATA(IRDY_HSYNC_MARK, PP1_FN), PINMUX_DATA(PCIFRAME_VSYNC_MARK, PP0_FN), /* PQ FN */ PINMUX_DATA(INTA_MARK, PQ4_FN), PINMUX_DATA(GNT0_GNTIN_MARK, PQ3_FN), PINMUX_DATA(REQ0_REQOUT_MARK, PQ2_FN), PINMUX_DATA(PERR_MARK, PQ1_FN), PINMUX_DATA(SERR_MARK, PQ0_FN), /* PR FN */ PINMUX_DATA(WE7_CBE3_MARK, PR3_FN), PINMUX_DATA(WE6_CBE2_MARK, PR2_FN), PINMUX_DATA(WE5_CBE1_MARK, PR1_FN), PINMUX_DATA(WE4_CBE0_MARK, PR0_FN), /* MISC FN */ PINMUX_DATA(SCIF2_RXD_MARK, P1MSEL6_0, P1MSEL5_0), PINMUX_DATA(SIOF_RXD_MARK, P2MSEL1_1, P1MSEL6_1, P1MSEL5_0), PINMUX_DATA(MRESETOUT_MARK, P2MSEL2_0), PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1), }; static struct pinmux_gpio pinmux_gpios[] = { /* PA */ PINMUX_GPIO(GPIO_PA7, PA7_DATA), PINMUX_GPIO(GPIO_PA6, PA6_DATA), PINMUX_GPIO(GPIO_PA5, PA5_DATA), PINMUX_GPIO(GPIO_PA4, PA4_DATA), PINMUX_GPIO(GPIO_PA3, PA3_DATA), PINMUX_GPIO(GPIO_PA2, PA2_DATA), PINMUX_GPIO(GPIO_PA1, PA1_DATA), PINMUX_GPIO(GPIO_PA0, PA0_DATA), /* PB */ PINMUX_GPIO(GPIO_PB7, PB7_DATA), PINMUX_GPIO(GPIO_PB6, PB6_DATA), PINMUX_GPIO(GPIO_PB5, PB5_DATA), PINMUX_GPIO(GPIO_PB4, PB4_DATA), PINMUX_GPIO(GPIO_PB3, PB3_DATA), PINMUX_GPIO(GPIO_PB2, PB2_DATA), PINMUX_GPIO(GPIO_PB1, PB1_DATA), PINMUX_GPIO(GPIO_PB0, PB0_DATA), /* PC */ PINMUX_GPIO(GPIO_PC7, PC7_DATA), PINMUX_GPIO(GPIO_PC6, PC6_DATA), PINMUX_GPIO(GPIO_PC5, PC5_DATA), PINMUX_GPIO(GPIO_PC4, PC4_DATA), PINMUX_GPIO(GPIO_PC3, PC3_DATA), PINMUX_GPIO(GPIO_PC2, PC2_DATA), PINMUX_GPIO(GPIO_PC1, PC1_DATA), PINMUX_GPIO(GPIO_PC0, PC0_DATA), /* PD */ PINMUX_GPIO(GPIO_PD7, PD7_DATA), PINMUX_GPIO(GPIO_PD6, PD6_DATA), PINMUX_GPIO(GPIO_PD5, PD5_DATA), PINMUX_GPIO(GPIO_PD4, PD4_DATA), PINMUX_GPIO(GPIO_PD3, PD3_DATA), PINMUX_GPIO(GPIO_PD2, PD2_DATA), PINMUX_GPIO(GPIO_PD1, PD1_DATA), PINMUX_GPIO(GPIO_PD0, PD0_DATA), /* PE */ PINMUX_GPIO(GPIO_PE5, PE5_DATA), PINMUX_GPIO(GPIO_PE4, PE4_DATA), PINMUX_GPIO(GPIO_PE3, PE3_DATA), PINMUX_GPIO(GPIO_PE2, PE2_DATA), PINMUX_GPIO(GPIO_PE1, PE1_DATA), PINMUX_GPIO(GPIO_PE0, PE0_DATA), /* PF */ PINMUX_GPIO(GPIO_PF7, PF7_DATA), PINMUX_GPIO(GPIO_PF6, PF6_DATA), PINMUX_GPIO(GPIO_PF5, PF5_DATA), PINMUX_GPIO(GPIO_PF4, PF4_DATA), PINMUX_GPIO(GPIO_PF3, PF3_DATA), PINMUX_GPIO(GPIO_PF2, PF2_DATA), PINMUX_GPIO(GPIO_PF1, PF1_DATA), PINMUX_GPIO(GPIO_PF0, PF0_DATA), /* PG */ PINMUX_GPIO(GPIO_PG7, PG7_DATA), PINMUX_GPIO(GPIO_PG6, PG6_DATA), PINMUX_GPIO(GPIO_PG5, PG5_DATA), PINMUX_GPIO(GPIO_PG4, PG4_DATA), PINMUX_GPIO(GPIO_PG3, PG3_DATA), PINMUX_GPIO(GPIO_PG2, PG2_DATA), PINMUX_GPIO(GPIO_PG1, PG1_DATA), PINMUX_GPIO(GPIO_PG0, PG0_DATA), /* PH */ PINMUX_GPIO(GPIO_PH7, PH7_DATA), PINMUX_GPIO(GPIO_PH6, PH6_DATA), PINMUX_GPIO(GPIO_PH5, PH5_DATA), PINMUX_GPIO(GPIO_PH4, PH4_DATA), PINMUX_GPIO(GPIO_PH3, PH3_DATA), PINMUX_GPIO(GPIO_PH2, PH2_DATA), PINMUX_GPIO(GPIO_PH1, PH1_DATA), PINMUX_GPIO(GPIO_PH0, PH0_DATA), /* PJ */ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA), PINMUX_GPIO(GPIO_PJ6, PJ6_DATA), PINMUX_GPIO(GPIO_PJ5, PJ5_DATA), PINMUX_GPIO(GPIO_PJ4, PJ4_DATA), PINMUX_GPIO(GPIO_PJ3, PJ3_DATA), PINMUX_GPIO(GPIO_PJ2, PJ2_DATA), PINMUX_GPIO(GPIO_PJ1, PJ1_DATA), PINMUX_GPIO(GPIO_PJ0, PJ0_DATA), /* PK */ PINMUX_GPIO(GPIO_PK7, PK7_DATA), PINMUX_GPIO(GPIO_PK6, PK6_DATA), PINMUX_GPIO(GPIO_PK5, PK5_DATA), PINMUX_GPIO(GPIO_PK4, PK4_DATA), PINMUX_GPIO(GPIO_PK3, PK3_DATA), PINMUX_GPIO(GPIO_PK2, PK2_DATA), PINMUX_GPIO(GPIO_PK1, PK1_DATA), PINMUX_GPIO(GPIO_PK0, PK0_DATA), /* PL */ PINMUX_GPIO(GPIO_PL7, PL7_DATA), PINMUX_GPIO(GPIO_PL6, PL6_DATA), PINMUX_GPIO(GPIO_PL5, PL5_DATA), PINMUX_GPIO(GPIO_PL4, PL4_DATA), PINMUX_GPIO(GPIO_PL3, PL3_DATA), PINMUX_GPIO(GPIO_PL2, PL2_DATA), PINMUX_GPIO(GPIO_PL1, PL1_DATA), PINMUX_GPIO(GPIO_PL0, PL0_DATA), /* PM */ PINMUX_GPIO(GPIO_PM1, PM1_DATA), PINMUX_GPIO(GPIO_PM0, PM0_DATA), /* PN */ PINMUX_GPIO(GPIO_PN7, PN7_DATA), PINMUX_GPIO(GPIO_PN6, PN6_DATA), PINMUX_GPIO(GPIO_PN5, PN5_DATA), PINMUX_GPIO(GPIO_PN4, PN4_DATA), PINMUX_GPIO(GPIO_PN3, PN3_DATA), PINMUX_GPIO(GPIO_PN2, PN2_DATA), PINMUX_GPIO(GPIO_PN1, PN1_DATA), PINMUX_GPIO(GPIO_PN0, PN0_DATA), /* PP */ PINMUX_GPIO(GPIO_PP5, PP5_DATA), PINMUX_GPIO(GPIO_PP4, PP4_DATA), PINMUX_GPIO(GPIO_PP3, PP3_DATA), PINMUX_GPIO(GPIO_PP2, PP2_DATA), PINMUX_GPIO(GPIO_PP1, PP1_DATA), PINMUX_GPIO(GPIO_PP0, PP0_DATA), /* PQ */ PINMUX_GPIO(GPIO_PQ4, PQ4_DATA), PINMUX_GPIO(GPIO_PQ3, PQ3_DATA), PINMUX_GPIO(GPIO_PQ2, PQ2_DATA), PINMUX_GPIO(GPIO_PQ1, PQ1_DATA), PINMUX_GPIO(GPIO_PQ0, PQ0_DATA), /* PR */ PINMUX_GPIO(GPIO_PR3, PR3_DATA), PINMUX_GPIO(GPIO_PR2, PR2_DATA), PINMUX_GPIO(GPIO_PR1, PR1_DATA), PINMUX_GPIO(GPIO_PR0, PR0_DATA), /* FN */ PINMUX_GPIO(GPIO_FN_D63_AD31, D63_AD31_MARK), PINMUX_GPIO(GPIO_FN_D62_AD30, D62_AD30_MARK), PINMUX_GPIO(GPIO_FN_D61_AD29, D61_AD29_MARK), PINMUX_GPIO(GPIO_FN_D60_AD28, D60_AD28_MARK), PINMUX_GPIO(GPIO_FN_D59_AD27, D59_AD27_MARK), PINMUX_GPIO(GPIO_FN_D58_AD26, D58_AD26_MARK), PINMUX_GPIO(GPIO_FN_D57_AD25, D57_AD25_MARK), PINMUX_GPIO(GPIO_FN_D56_AD24, D56_AD24_MARK), PINMUX_GPIO(GPIO_FN_D55_AD23, D55_AD23_MARK), PINMUX_GPIO(GPIO_FN_D54_AD22, D54_AD22_MARK), PINMUX_GPIO(GPIO_FN_D53_AD21, D53_AD21_MARK), PINMUX_GPIO(GPIO_FN_D52_AD20, D52_AD20_MARK), PINMUX_GPIO(GPIO_FN_D51_AD19, D51_AD19_MARK), PINMUX_GPIO(GPIO_FN_D50_AD18, D50_AD18_MARK), PINMUX_GPIO(GPIO_FN_D49_AD17_DB5, D49_AD17_DB5_MARK), PINMUX_GPIO(GPIO_FN_D48_AD16_DB4, D48_AD16_DB4_MARK), PINMUX_GPIO(GPIO_FN_D47_AD15_DB3, D47_AD15_DB3_MARK), PINMUX_GPIO(GPIO_FN_D46_AD14_DB2, D46_AD14_DB2_MARK), PINMUX_GPIO(GPIO_FN_D45_AD13_DB1, D45_AD13_DB1_MARK), PINMUX_GPIO(GPIO_FN_D44_AD12_DB0, D44_AD12_DB0_MARK), PINMUX_GPIO(GPIO_FN_D43_AD11_DG5, D43_AD11_DG5_MARK), PINMUX_GPIO(GPIO_FN_D42_AD10_DG4, D42_AD10_DG4_MARK), PINMUX_GPIO(GPIO_FN_D41_AD9_DG3, D41_AD9_DG3_MARK), PINMUX_GPIO(GPIO_FN_D40_AD8_DG2, D40_AD8_DG2_MARK), PINMUX_GPIO(GPIO_FN_D39_AD7_DG1, D39_AD7_DG1_MARK), PINMUX_GPIO(GPIO_FN_D38_AD6_DG0, D38_AD6_DG0_MARK), PINMUX_GPIO(GPIO_FN_D37_AD5_DR5, D37_AD5_DR5_MARK), PINMUX_GPIO(GPIO_FN_D36_AD4_DR4, D36_AD4_DR4_MARK), PINMUX_GPIO(GPIO_FN_D35_AD3_DR3, D35_AD3_DR3_MARK), PINMUX_GPIO(GPIO_FN_D34_AD2_DR2, D34_AD2_DR2_MARK), PINMUX_GPIO(GPIO_FN_D33_AD1_DR1, D33_AD1_DR1_MARK), PINMUX_GPIO(GPIO_FN_D32_AD0_DR0, D32_AD0_DR0_MARK), PINMUX_GPIO(GPIO_FN_REQ1, REQ1_MARK), PINMUX_GPIO(GPIO_FN_REQ2, REQ2_MARK), PINMUX_GPIO(GPIO_FN_REQ3, REQ3_MARK), PINMUX_GPIO(GPIO_FN_GNT1, GNT1_MARK), PINMUX_GPIO(GPIO_FN_GNT2, GNT2_MARK), PINMUX_GPIO(GPIO_FN_GNT3, GNT3_MARK), PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK), PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK), PINMUX_GPIO(GPIO_FN_INTD, INTD_MARK), PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK), PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK), PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK), PINMUX_GPIO(GPIO_FN_FRE, FRE_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK), PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK), PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK), PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK), PINMUX_GPIO(GPIO_FN_SIOF_TXD_PJ, SIOF_TXD_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK), PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK), PINMUX_GPIO(GPIO_FN_SIOF_RXD_PJ, SIOF_RXD_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK), PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF_SYNC_PJ, SIOF_SYNC_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK), PINMUX_GPIO(GPIO_FN_SIOF_MCLK_PJ, SIOF_MCLK_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK), PINMUX_GPIO(GPIO_FN_SIOF_SCK_PJ, SIOF_SCK_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK), PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK), PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK), PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK), PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), PINMUX_GPIO(GPIO_FN_DRAK0_PK3, DRAK0_PK3_MARK), PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK), PINMUX_GPIO(GPIO_FN_DRAK1_PK2, DRAK1_PK2_MARK), PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK), PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK), PINMUX_GPIO(GPIO_FN_SIOF_TXD_PK, SIOF_TXD_PK_MARK), PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT, MMCDAT_MARK), PINMUX_GPIO(GPIO_FN_SIOF_SCK_PK, SIOF_SCK_PK_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), PINMUX_GPIO(GPIO_FN_DRAK0_PK1, DRAK0_PK1_MARK), PINMUX_GPIO(GPIO_FN_DRAK1_PK0, DRAK1_PK0_MARK), PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK), PINMUX_GPIO(GPIO_FN_INTB, INTB_MARK), PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK), PINMUX_GPIO(GPIO_FN_INTC, INTC_MARK), PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK), PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK), PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK), PINMUX_GPIO(GPIO_FN_FD4, FD4_MARK), PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK), PINMUX_GPIO(GPIO_FN_FD5, FD5_MARK), PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK), PINMUX_GPIO(GPIO_FN_FD6, FD6_MARK), PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK), PINMUX_GPIO(GPIO_FN_FD7, FD7_MARK), PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK), PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK), PINMUX_GPIO(GPIO_FN_BREQ_BSACK, BREQ_BSACK_MARK), PINMUX_GPIO(GPIO_FN_BACK_BSREQ, BACK_BSREQ_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK), PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK), PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK), PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK), PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK), PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK), PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK), PINMUX_GPIO(GPIO_FN_FD0, FD0_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK), PINMUX_GPIO(GPIO_FN_FD1, FD1_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK), PINMUX_GPIO(GPIO_FN_FD2, FD2_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK), PINMUX_GPIO(GPIO_FN_FD3, FD3_MARK), PINMUX_GPIO(GPIO_FN_DEVSEL_DCLKOUT, DEVSEL_DCLKOUT_MARK), PINMUX_GPIO(GPIO_FN_STOP_CDE, STOP_CDE_MARK), PINMUX_GPIO(GPIO_FN_LOCK_ODDF, LOCK_ODDF_MARK), PINMUX_GPIO(GPIO_FN_TRDY_DISPL, TRDY_DISPL_MARK), PINMUX_GPIO(GPIO_FN_IRDY_HSYNC, IRDY_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_PCIFRAME_VSYNC, PCIFRAME_VSYNC_MARK), PINMUX_GPIO(GPIO_FN_INTA, INTA_MARK), PINMUX_GPIO(GPIO_FN_GNT0_GNTIN, GNT0_GNTIN_MARK), PINMUX_GPIO(GPIO_FN_REQ0_REQOUT, REQ0_REQOUT_MARK), PINMUX_GPIO(GPIO_FN_PERR, PERR_MARK), PINMUX_GPIO(GPIO_FN_SERR, SERR_MARK), PINMUX_GPIO(GPIO_FN_WE7_CBE3, WE7_CBE3_MARK), PINMUX_GPIO(GPIO_FN_WE6_CBE2, WE6_CBE2_MARK), PINMUX_GPIO(GPIO_FN_WE5_CBE1, WE5_CBE1_MARK), PINMUX_GPIO(GPIO_FN_WE4_CBE0, WE4_CBE0_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF_RXD, SIOF_RXD_MARK), PINMUX_GPIO(GPIO_FN_MRESETOUT, MRESETOUT_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) { PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU, PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU, PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU, PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU, PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU, PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU, PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU, PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU } }, { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) { PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU, PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU, PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU, PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU, PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU, PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU, PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU, PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU } }, { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) { PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU, PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU, PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU, PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU, PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU, PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU, PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU, PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU } }, { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) { PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU, PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU, PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU, PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU, PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU, PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU, PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU, PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU } }, { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU, PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU, PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU, PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU, PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU, PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU } }, { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) { PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU, PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU, PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU, PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU, PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU, PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU, PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU, PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU } }, { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) { PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU, PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU, PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU, PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU, PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU, PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU, PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU, PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU } }, { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) { PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU, PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU, PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU, PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU, PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU, PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU, PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU, PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU } }, { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) { PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU, PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU, PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU, PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU, PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU, PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU, PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU, PJ0_FN, PJ0_OUT, PJ0_IN, PJ0_IN_PU } }, { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) { PK7_FN, PK7_OUT, PK7_IN, PK7_IN_PU, PK6_FN, PK6_OUT, PK6_IN, PK6_IN_PU, PK5_FN, PK5_OUT, PK5_IN, PK5_IN_PU, PK4_FN, PK4_OUT, PK4_IN, PK4_IN_PU, PK3_FN, PK3_OUT, PK3_IN, PK3_IN_PU, PK2_FN, PK2_OUT, PK2_IN, PK2_IN_PU, PK1_FN, PK1_OUT, PK1_IN, PK1_IN_PU, PK0_FN, PK0_OUT, PK0_IN, PK0_IN_PU } }, { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) { PL7_FN, PL7_OUT, PL7_IN, PL7_IN_PU, PL6_FN, PL6_OUT, PL6_IN, PL6_IN_PU, PL5_FN, PL5_OUT, PL5_IN, PL5_IN_PU, PL4_FN, PL4_OUT, PL4_IN, PL4_IN_PU, PL3_FN, PL3_OUT, PL3_IN, PL3_IN_PU, PL2_FN, PL2_OUT, PL2_IN, PL2_IN_PU, PL1_FN, PL1_OUT, PL1_IN, PL1_IN_PU, PL0_FN, PL0_OUT, PL0_IN, PL0_IN_PU } }, { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PM1_FN, PM1_OUT, PM1_IN, PM1_IN_PU, PM0_FN, PM0_OUT, PM0_IN, PM0_IN_PU } }, { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) { PN7_FN, PN7_OUT, PN7_IN, PN7_IN_PU, PN6_FN, PN6_OUT, PN6_IN, PN6_IN_PU, PN5_FN, PN5_OUT, PN5_IN, PN5_IN_PU, PN4_FN, PN4_OUT, PN4_IN, PN4_IN_PU, PN3_FN, PN3_OUT, PN3_IN, PN3_IN_PU, PN2_FN, PN2_OUT, PN2_IN, PN2_IN_PU, PN1_FN, PN1_OUT, PN1_IN, PN1_IN_PU, PN0_FN, PN0_OUT, PN0_IN, PN0_IN_PU } }, { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, PP5_FN, PP5_OUT, PP5_IN, PP5_IN_PU, PP4_FN, PP4_OUT, PP4_IN, PP4_IN_PU, PP3_FN, PP3_OUT, PP3_IN, PP3_IN_PU, PP2_FN, PP2_OUT, PP2_IN, PP2_IN_PU, PP1_FN, PP1_OUT, PP1_IN, PP1_IN_PU, PP0_FN, PP0_OUT, PP0_IN, PP0_IN_PU } }, { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PQ4_FN, PQ4_OUT, PQ4_IN, PQ4_IN_PU, PQ3_FN, PQ3_OUT, PQ3_IN, PQ3_IN_PU, PQ2_FN, PQ2_OUT, PQ2_IN, PQ2_IN_PU, PQ1_FN, PQ1_OUT, PQ1_IN, PQ1_IN_PU, PQ0_FN, PQ0_OUT, PQ0_IN, PQ0_IN_PU } }, { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PR3_FN, PR3_OUT, PR3_IN, PR3_IN_PU, PR2_FN, PR2_OUT, PR2_IN, PR2_IN_PU, PR1_FN, PR1_OUT, PR1_IN, PR1_IN_PU, PR0_FN, PR0_OUT, PR0_IN, PR0_IN_PU } }, { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) { P1MSEL15_0, P1MSEL15_1, P1MSEL14_0, P1MSEL14_1, P1MSEL13_0, P1MSEL13_1, P1MSEL12_0, P1MSEL12_1, P1MSEL11_0, P1MSEL11_1, P1MSEL10_0, P1MSEL10_1, P1MSEL9_0, P1MSEL9_1, P1MSEL8_0, P1MSEL8_1, P1MSEL7_0, P1MSEL7_1, P1MSEL6_0, P1MSEL6_1, P1MSEL5_0, 0, P1MSEL4_0, P1MSEL4_1, P1MSEL3_0, P1MSEL3_1, P1MSEL2_0, P1MSEL2_1, P1MSEL1_0, P1MSEL1_1, P1MSEL0_0, P1MSEL0_1 } }, { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, P2MSEL2_0, P2MSEL2_1, P2MSEL1_0, P2MSEL1_1, P2MSEL0_0, P2MSEL0_1 } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xffe70020, 8) { PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xffe70022, 8) { PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xffe70024, 8) { PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xffe70026, 8) { PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xffe70028, 8) { 0, 0, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8) { PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8) { PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA, PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8) { PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA, PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xffe70030, 8) { PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA, PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xffe70032, 8) { PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA, PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xffe70034, 8) { PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA, PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xffe70036, 8) { 0, 0, 0, 0, 0, 0, PM1_DATA, PM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xffe70038, 8) { PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA, PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA } }, { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8) { 0, 0, PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8) { 0, 0, 0, PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8) { 0, 0, 0, 0, PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA } }, { }, }; static struct pinmux_info sh7785_pinmux_info = { .name = "sh7785_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PA7, .last_gpio = GPIO_FN_IRQOUT, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7785_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
zhaojinxin409/shooter-player
src/filters/transform/mpcvideodec/ffmpeg/libavcodec/wmadec.c
18
30872
/* * WMA compatible decoder * Copyright (c) 2002 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file libavcodec/wmadec.c * WMA compatible decoder. * This decoder handles Microsoft Windows Media Audio data, versions 1 & 2. * WMA v1 is identified by audio format 0x160 in Microsoft media files * (ASF/AVI/WAV). WMA v2 is identified by audio format 0x161. * * To use this decoder, a calling application must supply the extra data * bytes provided with the WMA data. These are the extra, codec-specific * bytes at the end of a WAVEFORMATEX data structure. Transmit these bytes * to the decoder using the extradata[_size] fields in AVCodecContext. There * should be 4 extra bytes for v1 data and 6 extra bytes for v2 data. */ #include "avcodec.h" #include "wma.h" #undef NDEBUG #include <assert.h> #define EXPVLCBITS 8 #define EXPMAX ((19+EXPVLCBITS-1)/EXPVLCBITS) #define HGAINVLCBITS 9 #define HGAINMAX ((13+HGAINVLCBITS-1)/HGAINVLCBITS) static void wma_lsp_to_curve_init(WMACodecContext *s, int frame_len); #ifdef TRACE static void dump_shorts(WMACodecContext *s, const char *name, const short *tab, int n) { int i; tprintf(s->avctx, "%s[%d]:\n", name, n); for(i=0;i<n;i++) { if ((i & 7) == 0) tprintf(s->avctx, "%4d: ", i); tprintf(s->avctx, " %5d.0", tab[i]); if ((i & 7) == 7) tprintf(s->avctx, "\n"); } } static void dump_floats(WMACodecContext *s, const char *name, int prec, const float *tab, int n) { int i; tprintf(s->avctx, "%s[%d]:\n", name, n); for(i=0;i<n;i++) { if ((i & 7) == 0) tprintf(s->avctx, "%4d: ", i); tprintf(s->avctx, " %8.*f", prec, tab[i]); if ((i & 7) == 7) tprintf(s->avctx, "\n"); } if ((i & 7) != 0) tprintf(s->avctx, "\n"); } #endif static int wma_decode_init(AVCodecContext * avctx) { WMACodecContext *s = avctx->priv_data; int i, flags2; uint8_t *extradata; s->avctx = avctx; /* extract flag infos */ flags2 = 0; extradata = avctx->extradata; if (avctx->codec->id == CODEC_ID_WMAV1 && avctx->extradata_size >= 4) { flags2 = AV_RL16(extradata+2); } else if (avctx->codec->id == CODEC_ID_WMAV2 && avctx->extradata_size >= 6) { flags2 = AV_RL16(extradata+4); } // for(i=0; i<avctx->extradata_size; i++) // av_log(NULL, AV_LOG_ERROR, "%02X ", extradata[i]); s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; if(ff_wma_init(avctx, flags2)<0) return -1; /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 1, 1.0); if (s->use_noise_coding) { init_vlc(&s->hgain_vlc, HGAINVLCBITS, sizeof(ff_wma_hgain_huffbits), ff_wma_hgain_huffbits, 1, 1, ff_wma_hgain_huffcodes, 2, 2, 0); } if (s->use_exp_vlc) { init_vlc(&s->exp_vlc, EXPVLCBITS, sizeof(ff_wma_scale_huffbits), //FIXME move out of context ff_wma_scale_huffbits, 1, 1, ff_wma_scale_huffcodes, 4, 4, 0); } else { wma_lsp_to_curve_init(s, s->frame_len); } avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } /** * compute x^-0.25 with an exponent and mantissa table. We use linear * interpolation to reduce the mantissa table size at a small speed * expense (linear interpolation approximately doubles the number of * bits of precision). */ static inline float pow_m1_4(WMACodecContext *s, float x) { union { float f; unsigned int v; } u, t; unsigned int e, m; float a, b; u.f = x; e = u.v >> 23; m = (u.v >> (23 - LSP_POW_BITS)) & ((1 << LSP_POW_BITS) - 1); /* build interpolation scale: 1 <= t < 2. */ t.v = ((u.v << LSP_POW_BITS) & ((1 << 23) - 1)) | (127 << 23); a = s->lsp_pow_m_table1[m]; b = s->lsp_pow_m_table2[m]; return s->lsp_pow_e_table[e] * (a + b * t.f); } static void wma_lsp_to_curve_init(WMACodecContext *s, int frame_len) { float wdel, a, b; int i, e, m; wdel = M_PI / frame_len; for(i=0;i<frame_len;i++) s->lsp_cos_table[i] = 2.0f * cos(wdel * i); /* tables for x^-0.25 computation */ for(i=0;i<256;i++) { e = i - 126; s->lsp_pow_e_table[i] = pow(2.0, e * -0.25); } /* NOTE: these two tables are needed to avoid two operations in pow_m1_4 */ b = 1.0; for(i=(1 << LSP_POW_BITS) - 1;i>=0;i--) { m = (1 << LSP_POW_BITS) + i; a = (float)m * (0.5 / (1 << LSP_POW_BITS)); a = pow(a, -0.25); s->lsp_pow_m_table1[i] = 2 * a - b; s->lsp_pow_m_table2[i] = b - a; b = a; } #if 0 for(i=1;i<20;i++) { float v, r1, r2; v = 5.0 / i; r1 = pow_m1_4(s, v); r2 = pow(v,-0.25); printf("%f^-0.25=%f e=%f\n", v, r1, r2 - r1); } #endif } /** * NOTE: We use the same code as Vorbis here * @todo optimize it further with SSE/3Dnow */ static void wma_lsp_to_curve(WMACodecContext *s, float *out, float *val_max_ptr, int n, float *lsp) { int i, j; float p, q, w, v, val_max; val_max = 0; for(i=0;i<n;i++) { p = 0.5f; q = 0.5f; w = s->lsp_cos_table[i]; for(j=1;j<NB_LSP_COEFS;j+=2){ q *= w - lsp[j - 1]; p *= w - lsp[j]; } p *= p * (2.0f - w); q *= q * (2.0f + w); v = p + q; v = pow_m1_4(s, v); if (v > val_max) val_max = v; out[i] = v; } *val_max_ptr = val_max; } /** * decode exponents coded with LSP coefficients (same idea as Vorbis) */ static void decode_exp_lsp(WMACodecContext *s, int ch) { float lsp_coefs[NB_LSP_COEFS]; int val, i; for(i = 0; i < NB_LSP_COEFS; i++) { if (i == 0 || i >= 8) val = get_bits(&s->gb, 3); else val = get_bits(&s->gb, 4); lsp_coefs[i] = ff_wma_lsp_codebook[i][val]; } wma_lsp_to_curve(s, s->exponents[ch], &s->max_exponent[ch], s->block_len, lsp_coefs); } /** pow(10, i / 16.0) for i in -60..67 */ static const float pow_tab[128] = { 1.7782794100389e-04, 2.0535250264571e-04, 2.3713737056617e-04, 2.7384196342644e-04, 3.1622776601684e-04, 3.6517412725484e-04, 4.2169650342858e-04, 4.8696752516586e-04, 5.6234132519035e-04, 6.4938163157621e-04, 7.4989420933246e-04, 8.6596432336006e-04, 1.0000000000000e-03, 1.1547819846895e-03, 1.3335214321633e-03, 1.5399265260595e-03, 1.7782794100389e-03, 2.0535250264571e-03, 2.3713737056617e-03, 2.7384196342644e-03, 3.1622776601684e-03, 3.6517412725484e-03, 4.2169650342858e-03, 4.8696752516586e-03, 5.6234132519035e-03, 6.4938163157621e-03, 7.4989420933246e-03, 8.6596432336006e-03, 1.0000000000000e-02, 1.1547819846895e-02, 1.3335214321633e-02, 1.5399265260595e-02, 1.7782794100389e-02, 2.0535250264571e-02, 2.3713737056617e-02, 2.7384196342644e-02, 3.1622776601684e-02, 3.6517412725484e-02, 4.2169650342858e-02, 4.8696752516586e-02, 5.6234132519035e-02, 6.4938163157621e-02, 7.4989420933246e-02, 8.6596432336007e-02, 1.0000000000000e-01, 1.1547819846895e-01, 1.3335214321633e-01, 1.5399265260595e-01, 1.7782794100389e-01, 2.0535250264571e-01, 2.3713737056617e-01, 2.7384196342644e-01, 3.1622776601684e-01, 3.6517412725484e-01, 4.2169650342858e-01, 4.8696752516586e-01, 5.6234132519035e-01, 6.4938163157621e-01, 7.4989420933246e-01, 8.6596432336007e-01, 1.0000000000000e+00, 1.1547819846895e+00, 1.3335214321633e+00, 1.5399265260595e+00, 1.7782794100389e+00, 2.0535250264571e+00, 2.3713737056617e+00, 2.7384196342644e+00, 3.1622776601684e+00, 3.6517412725484e+00, 4.2169650342858e+00, 4.8696752516586e+00, 5.6234132519035e+00, 6.4938163157621e+00, 7.4989420933246e+00, 8.6596432336007e+00, 1.0000000000000e+01, 1.1547819846895e+01, 1.3335214321633e+01, 1.5399265260595e+01, 1.7782794100389e+01, 2.0535250264571e+01, 2.3713737056617e+01, 2.7384196342644e+01, 3.1622776601684e+01, 3.6517412725484e+01, 4.2169650342858e+01, 4.8696752516586e+01, 5.6234132519035e+01, 6.4938163157621e+01, 7.4989420933246e+01, 8.6596432336007e+01, 1.0000000000000e+02, 1.1547819846895e+02, 1.3335214321633e+02, 1.5399265260595e+02, 1.7782794100389e+02, 2.0535250264571e+02, 2.3713737056617e+02, 2.7384196342644e+02, 3.1622776601684e+02, 3.6517412725484e+02, 4.2169650342858e+02, 4.8696752516586e+02, 5.6234132519035e+02, 6.4938163157621e+02, 7.4989420933246e+02, 8.6596432336007e+02, 1.0000000000000e+03, 1.1547819846895e+03, 1.3335214321633e+03, 1.5399265260595e+03, 1.7782794100389e+03, 2.0535250264571e+03, 2.3713737056617e+03, 2.7384196342644e+03, 3.1622776601684e+03, 3.6517412725484e+03, 4.2169650342858e+03, 4.8696752516586e+03, 5.6234132519035e+03, 6.4938163157621e+03, 7.4989420933246e+03, 8.6596432336007e+03, 1.0000000000000e+04, 1.1547819846895e+04, 1.3335214321633e+04, 1.5399265260595e+04, }; /** * decode exponents coded with VLC codes */ static int decode_exp_vlc(WMACodecContext *s, int ch) { int last_exp, n, code; const uint16_t *ptr; float v, max_scale; uint32_t *q, *q_end, iv; const float *ptab = pow_tab + 60; const uint32_t *iptab = (const uint32_t*)ptab; ptr = s->exponent_bands[s->frame_len_bits - s->block_len_bits]; q = (uint32_t *)s->exponents[ch]; q_end = q + s->block_len; max_scale = 0; if (s->version == 1) { last_exp = get_bits(&s->gb, 5) + 10; v = ptab[last_exp]; iv = iptab[last_exp]; max_scale = v; n = *ptr++; switch (n & 3) do { case 0: *q++ = iv; case 3: *q++ = iv; case 2: *q++ = iv; case 1: *q++ = iv; } while ((n -= 4) > 0); }else last_exp = 36; while (q < q_end) { code = get_vlc2(&s->gb, s->exp_vlc.table, EXPVLCBITS, EXPMAX); if (code < 0) return -1; /* NOTE: this offset is the same as MPEG4 AAC ! */ last_exp += code - 60; if ((unsigned)last_exp + 60 > FF_ARRAY_ELEMS(pow_tab)) return -1; v = ptab[last_exp]; iv = iptab[last_exp]; if (v > max_scale) max_scale = v; n = *ptr++; switch (n & 3) do { case 0: *q++ = iv; case 3: *q++ = iv; case 2: *q++ = iv; case 1: *q++ = iv; } while ((n -= 4) > 0); } s->max_exponent[ch] = max_scale; return 0; } /** * Apply MDCT window and add into output. * * We ensure that when the windows overlap their squared sum * is always 1 (MDCT reconstruction rule). */ static void wma_window(WMACodecContext *s, float *out) { float *in = s->output; int block_len, bsize, n; /* left part */ if (s->block_len_bits <= s->prev_block_len_bits) { block_len = s->block_len; bsize = s->frame_len_bits - s->block_len_bits; s->dsp.vector_fmul_add(out, in, s->windows[bsize], out, block_len); } else { block_len = 1 << s->prev_block_len_bits; n = (s->block_len - block_len) / 2; bsize = s->frame_len_bits - s->prev_block_len_bits; s->dsp.vector_fmul_add(out+n, in+n, s->windows[bsize], out+n, block_len); memcpy(out+n+block_len, in+n+block_len, n*sizeof(float)); } out += s->block_len; in += s->block_len; /* right part */ if (s->block_len_bits <= s->next_block_len_bits) { block_len = s->block_len; bsize = s->frame_len_bits - s->block_len_bits; s->dsp.vector_fmul_reverse(out, in, s->windows[bsize], block_len); } else { block_len = 1 << s->next_block_len_bits; n = (s->block_len - block_len) / 2; bsize = s->frame_len_bits - s->next_block_len_bits; memcpy(out, in, n*sizeof(float)); s->dsp.vector_fmul_reverse(out+n, in+n, s->windows[bsize], block_len); memset(out+n+block_len, 0, n*sizeof(float)); } } /** * @return 0 if OK. 1 if last block of frame. return -1 if * unrecorrable error. */ static int wma_decode_block(WMACodecContext *s) { int n, v, a, ch, bsize; int coef_nb_bits, total_gain; int nb_coefs[MAX_CHANNELS]; float mdct_norm; #ifdef TRACE tprintf(s->avctx, "***decode_block: %d:%d\n", s->frame_count - 1, s->block_num); #endif /* compute current block length */ if (s->use_variable_block_len) { n = av_log2(s->nb_block_sizes - 1) + 1; if (s->reset_block_lengths) { s->reset_block_lengths = 0; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes) return -1; s->prev_block_len_bits = s->frame_len_bits - v; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes) return -1; s->block_len_bits = s->frame_len_bits - v; } else { /* update block lengths */ s->prev_block_len_bits = s->block_len_bits; s->block_len_bits = s->next_block_len_bits; } v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes) return -1; s->next_block_len_bits = s->frame_len_bits - v; } else { /* fixed block len */ s->next_block_len_bits = s->frame_len_bits; s->prev_block_len_bits = s->frame_len_bits; s->block_len_bits = s->frame_len_bits; } /* now check if the block length is coherent with the frame length */ s->block_len = 1 << s->block_len_bits; if ((s->block_pos + s->block_len) > s->frame_len) return -1; if (s->nb_channels == 2) { s->ms_stereo = get_bits1(&s->gb); } v = 0; for(ch = 0; ch < s->nb_channels; ch++) { a = get_bits1(&s->gb); s->channel_coded[ch] = a; v |= a; } bsize = s->frame_len_bits - s->block_len_bits; /* if no channel coded, no need to go further */ /* XXX: fix potential framing problems */ if (!v) goto next; /* read total gain and extract corresponding number of bits for coef escape coding */ total_gain = 1; for(;;) { a = get_bits(&s->gb, 7); total_gain += a; if (a != 127) break; } coef_nb_bits= ff_wma_total_gain_to_bits(total_gain); /* compute number of coefficients */ n = s->coefs_end[bsize] - s->coefs_start; for(ch = 0; ch < s->nb_channels; ch++) nb_coefs[ch] = n; /* complex coding */ if (s->use_noise_coding) { for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { int i, n, a; n = s->exponent_high_sizes[bsize]; for(i=0;i<n;i++) { a = get_bits1(&s->gb); s->high_band_coded[ch][i] = a; /* if noise coding, the coefficients are not transmitted */ if (a) nb_coefs[ch] -= s->exponent_high_bands[bsize][i]; } } } for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { int i, n, val, code; n = s->exponent_high_sizes[bsize]; val = (int)0x80000000; for(i=0;i<n;i++) { if (s->high_band_coded[ch][i]) { if (val == (int)0x80000000) { val = get_bits(&s->gb, 7) - 19; } else { code = get_vlc2(&s->gb, s->hgain_vlc.table, HGAINVLCBITS, HGAINMAX); if (code < 0) return -1; val += code - 18; } s->high_band_values[ch][i] = val; } } } } } /* exponents can be reused in short blocks. */ if ((s->block_len_bits == s->frame_len_bits) || get_bits1(&s->gb)) { for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { if (s->use_exp_vlc) { if (decode_exp_vlc(s, ch) < 0) return -1; } else { decode_exp_lsp(s, ch); } s->exponents_bsize[ch] = bsize; } } } /* parse spectral coefficients : just RLE encoding */ for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { int tindex; WMACoef* ptr = &s->coefs1[ch][0]; /* special VLC tables are used for ms stereo because there is potentially less energy there */ tindex = (ch == 1 && s->ms_stereo); memset(ptr, 0, s->block_len * sizeof(WMACoef)); ff_wma_run_level_decode(s->avctx, &s->gb, &s->coef_vlc[tindex], s->level_table[tindex], s->run_table[tindex], 0, ptr, 0, nb_coefs[ch], s->block_len, s->frame_len_bits, coef_nb_bits); } if (s->version == 1 && s->nb_channels >= 2) { align_get_bits(&s->gb); } } /* normalize */ { int n4 = s->block_len / 2; mdct_norm = 1.0 / (float)n4; if (s->version == 1) { mdct_norm *= sqrt(n4); } } /* finally compute the MDCT coefficients */ for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { WMACoef *coefs1; float *coefs, *exponents, mult, mult1, noise; int i, j, n, n1, last_high_band, esize; float exp_power[HIGH_BAND_MAX_SIZE]; coefs1 = s->coefs1[ch]; exponents = s->exponents[ch]; esize = s->exponents_bsize[ch]; mult = pow(10, total_gain * 0.05) / s->max_exponent[ch]; mult *= mdct_norm; coefs = s->coefs[ch]; if (s->use_noise_coding) { mult1 = mult; /* very low freqs : noise */ for(i = 0;i < s->coefs_start; i++) { *coefs++ = s->noise_table[s->noise_index] * exponents[i<<bsize>>esize] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); } n1 = s->exponent_high_sizes[bsize]; /* compute power of high bands */ exponents = s->exponents[ch] + (s->high_band_start[bsize]<<bsize); last_high_band = 0; /* avoid warning */ for(j=0;j<n1;j++) { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; if (s->high_band_coded[ch][j]) { float e2, v; e2 = 0; for(i = 0;i < n; i++) { v = exponents[i<<bsize>>esize]; e2 += v * v; } exp_power[j] = e2 / n; last_high_band = j; tprintf(s->avctx, "%d: power=%f (%d)\n", j, exp_power[j], n); } exponents += n<<bsize; } /* main freqs and high freqs */ exponents = s->exponents[ch] + (s->coefs_start<<bsize); for(j=-1;j<n1;j++) { if (j < 0) { n = s->high_band_start[bsize] - s->coefs_start; } else { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; } if (j >= 0 && s->high_band_coded[ch][j]) { /* use noise with specified power */ mult1 = sqrt(exp_power[j] / exp_power[last_high_band]); /* XXX: use a table */ mult1 = mult1 * pow(10, s->high_band_values[ch][j] * 0.05); mult1 = mult1 / (s->max_exponent[ch] * s->noise_mult); mult1 *= mdct_norm; for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = noise * exponents[i<<bsize>>esize] * mult1; } exponents += n<<bsize; } else { /* coded values + small noise */ for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = ((*coefs1++) + noise) * exponents[i<<bsize>>esize] * mult; } exponents += n<<bsize; } } /* very high freqs : noise */ n = s->block_len - s->coefs_end[bsize]; mult1 = mult * exponents[((-1<<bsize))>>esize]; for(i = 0; i < n; i++) { *coefs++ = s->noise_table[s->noise_index] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); } } else { /* XXX: optimize more */ for(i = 0;i < s->coefs_start; i++) *coefs++ = 0.0; n = nb_coefs[ch]; for(i = 0;i < n; i++) { *coefs++ = coefs1[i] * exponents[i<<bsize>>esize] * mult; } n = s->block_len - s->coefs_end[bsize]; for(i = 0;i < n; i++) *coefs++ = 0.0; } } } #ifdef TRACE for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { dump_floats(s, "exponents", 3, s->exponents[ch], s->block_len); dump_floats(s, "coefs", 1, s->coefs[ch], s->block_len); } } #endif if (s->ms_stereo && s->channel_coded[1]) { /* nominal case for ms stereo: we do it before mdct */ /* no need to optimize this case because it should almost never happen */ if (!s->channel_coded[0]) { tprintf(s->avctx, "rare ms-stereo case happened\n"); memset(s->coefs[0], 0, sizeof(float) * s->block_len); s->channel_coded[0] = 1; } s->dsp.butterflies_float(s->coefs[0], s->coefs[1], s->block_len); } next: for(ch = 0; ch < s->nb_channels; ch++) { int n4, index; n4 = s->block_len / 2; if(s->channel_coded[ch]){ ff_imdct_calc(&s->mdct_ctx[bsize], s->output, s->coefs[ch]); }else if(!(s->ms_stereo && ch==1)) memset(s->output, 0, sizeof(s->output)); /* multiply by the window and add in the frame */ index = (s->frame_len / 2) + s->block_pos - n4; wma_window(s, &s->frame_out[ch][index]); } /* update block number */ s->block_num++; s->block_pos += s->block_len; if (s->block_pos >= s->frame_len) return 1; else return 0; } /* decode a frame of frame_len samples */ static int wma_decode_frame(WMACodecContext *s, int16_t *samples) { int ret, i, n, ch, incr; int16_t *ptr; float *iptr; #ifdef TRACE tprintf(s->avctx, "***decode_frame: %d size=%d\n", s->frame_count++, s->frame_len); #endif /* read each block */ s->block_num = 0; s->block_pos = 0; for(;;) { ret = wma_decode_block(s); if (ret < 0) return -1; if (ret) break; } /* convert frame to integer */ n = s->frame_len; incr = s->nb_channels; for(ch = 0; ch < s->nb_channels; ch++) { ptr = samples + ch; iptr = s->frame_out[ch]; for(i=0;i<n;i++) { *ptr = av_clip_int16(lrintf(*iptr++)); ptr += incr; } /* prepare for next block */ memmove(&s->frame_out[ch][0], &s->frame_out[ch][s->frame_len], s->frame_len * sizeof(float)); } #ifdef TRACE dump_shorts(s, "samples", samples, n * s->nb_channels); #endif return 0; } static int wma_decode_superframe(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf,int buf_size) { // const uint8_t *buf = avpkt->data; //int buf_size = avpkt->size; WMACodecContext *s = avctx->priv_data; int nb_frames, bit_offset, i, pos, len; uint8_t *q; int16_t *samples; tprintf(avctx, "***decode_superframe:\n"); if(buf_size==0){ s->last_superframe_len = 0; return 0; } if (buf_size < s->block_align) return 0; buf_size = s->block_align; samples = data; init_get_bits(&s->gb, buf, buf_size*8); if (s->use_bit_reservoir) { /* read super frame header */ skip_bits(&s->gb, 4); /* super frame index */ nb_frames = get_bits(&s->gb, 4) - 1; if((nb_frames+1) * s->nb_channels * s->frame_len * sizeof(int16_t) > *data_size){ av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n"); goto fail; } bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3); if (s->last_superframe_len > 0) { // printf("skip=%d\n", s->last_bitoffset); /* add bit_offset bits to last frame */ if ((s->last_superframe_len + ((bit_offset + 7) >> 3)) > MAX_CODED_SUPERFRAME_SIZE) goto fail; q = s->last_superframe + s->last_superframe_len; len = bit_offset; while (len > 7) { *q++ = (get_bits)(&s->gb, 8); len -= 8; } if (len > 0) { *q++ = (get_bits)(&s->gb, len) << (8 - len); } /* XXX: bit_offset bits into last frame */ init_get_bits(&s->gb, s->last_superframe, MAX_CODED_SUPERFRAME_SIZE*8); /* skip unused bits */ if (s->last_bitoffset > 0) skip_bits(&s->gb, s->last_bitoffset); /* this frame is stored in the last superframe and in the current one */ if (wma_decode_frame(s, samples) < 0) goto fail; samples += s->nb_channels * s->frame_len; } /* read each frame starting from bit_offset */ pos = bit_offset + 4 + 4 + s->byte_offset_bits + 3; init_get_bits(&s->gb, buf + (pos >> 3), (MAX_CODED_SUPERFRAME_SIZE - (pos >> 3))*8); len = pos & 7; if (len > 0) skip_bits(&s->gb, len); s->reset_block_lengths = 1; for(i=0;i<nb_frames;i++) { if (wma_decode_frame(s, samples) < 0) goto fail; samples += s->nb_channels * s->frame_len; } /* we copy the end of the frame in the last frame buffer */ pos = get_bits_count(&s->gb) + ((bit_offset + 4 + 4 + s->byte_offset_bits + 3) & ~7); s->last_bitoffset = pos & 7; pos >>= 3; len = buf_size - pos; if (len > MAX_CODED_SUPERFRAME_SIZE || len < 0) { goto fail; } s->last_superframe_len = len; memcpy(s->last_superframe, buf + pos, len); } else { if(s->nb_channels * s->frame_len * sizeof(int16_t) > *data_size){ av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n"); goto fail; } /* single frame decode */ if (wma_decode_frame(s, samples) < 0) goto fail; samples += s->nb_channels * s->frame_len; } //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align); *data_size = (int8_t *)samples - (int8_t *)data; return s->block_align; fail: /* when error, we reset the bit reservoir */ s->last_superframe_len = 0; return -1; } AVCodec wmav1_decoder = { "wmav1", CODEC_TYPE_AUDIO, CODEC_ID_WMAV1, sizeof(WMACodecContext), wma_decode_init, NULL, ff_wma_end, wma_decode_superframe, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"), }; AVCodec wmav2_decoder = { "wmav2", CODEC_TYPE_AUDIO, CODEC_ID_WMAV2, sizeof(WMACodecContext), wma_decode_init, NULL, ff_wma_end, wma_decode_superframe, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"), };
gpl-2.0
williamcotton/charybdis
modules/m_lusers.c
18
3036
/* * ircd-ratbox: A slightly useful ircd. * m_lusers.c: Sends user statistics. * * Copyright (C) 1990 Jarkko Oikarinen and University of Oulu, Co Center * Copyright (C) 1996-2002 Hybrid Development Team * Copyright (C) 2002-2005 ircd-ratbox development team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * $Id: m_lusers.c 254 2005-09-21 23:35:12Z nenolod $ */ #include "stdinc.h" #include "client.h" #include "ircd.h" #include "numeric.h" #include "s_serv.h" /* hunt_server */ #include "s_user.h" /* show_lusers */ #include "send.h" #include "s_conf.h" #include "msg.h" #include "parse.h" #include "modules.h" static int m_lusers(struct Client *, struct Client *, int, const char **); static int ms_lusers(struct Client *, struct Client *, int, const char **); struct Message lusers_msgtab = { "LUSERS", 0, 0, 0, MFLG_SLOW, {mg_unreg, {m_lusers, 0}, {ms_lusers, 0}, mg_ignore, mg_ignore, {ms_lusers, 0}} }; mapi_clist_av1 lusers_clist[] = { &lusers_msgtab, NULL }; DECLARE_MODULE_AV1(lusers, NULL, NULL, lusers_clist, NULL, NULL, "$Revision: 254 $"); /* * m_lusers - LUSERS message handler * parv[1] = host/server mask. * parv[2] = server to query * * 199970918 JRL hacked to ignore parv[1] completely and require parc > 3 * to cause a force */ static int m_lusers(struct Client *client_p, struct Client *source_p, int parc, const char *parv[]) { static time_t last_used = 0; if (parc > 2) { if((last_used + ConfigFileEntry.pace_wait) > rb_current_time()) { /* safe enough to give this on a local connect only */ sendto_one(source_p, form_str(RPL_LOAD2HI), me.name, source_p->name, "LUSERS"); return 0; } else last_used = rb_current_time(); if(hunt_server(client_p, source_p, ":%s LUSERS %s :%s", 2, parc, parv) != HUNTED_ISME) return 0; } show_lusers(source_p); return 0; } /* * ms_lusers - LUSERS message handler for servers and opers * parv[1] = host/server mask. * parv[2] = server to query * * 199970918 JRL hacked to ignore parv[1] completely and require parc > 3 * to cause a force */ static int ms_lusers(struct Client *client_p, struct Client *source_p, int parc, const char *parv[]) { if(parc > 2) { if(hunt_server(client_p, source_p, ":%s LUSERS %s :%s", 2, parc, parv) != HUNTED_ISME) return 0; } show_lusers(source_p); return 0; }
gpl-2.0
yangxi/omap4m3
c/src/lib/libbsp/m68k/ods68302/startup/m68302scc.c
18
3405
/*****************************************************************************/ /* M68302 SCC Polled Driver */ /*****************************************************************************/ #include <bsp.h> #include <rtems/m68k/m68302.h> #include <m68302scc.h> #define M68302_SCC_COUNT (3) static volatile m302_SCC_t *scc[M68302_SCC_COUNT] = { 0, 0, 0 }; static volatile m302_SCC_Registers_t *scc_reg[M68302_SCC_COUNT] = { 0, 0, 0 }; static int scc_translate[M68302_SCC_COUNT] = { 0, 0, 0 }; static const uint16_t baud_clocks[] = { (SYSTEM_CLOCK / ( 4800 * 16)), (SYSTEM_CLOCK / ( 9600 * 16)), (SYSTEM_CLOCK / ( 19200 * 16)), (SYSTEM_CLOCK / ( 38400 * 16)), (SYSTEM_CLOCK / ( 57600 * 16)), (SYSTEM_CLOCK / (115700 * 16)) }; void scc_initialise(int channel, int baud, int translate) { uint16_t scon; if (channel < M68302_SCC_COUNT) { scc[channel] = &m302.scc1 + channel; scc_reg[channel] = &m302.reg.scc[channel]; scc_translate[channel] = translate; scon = (baud_clocks[baud] & 0xF800) == 0 ? 0 : 1; scon |= (((baud_clocks[baud] / (1 + scon * 3)) - 1) << 1) & 0x0FFE; scc_reg[channel]->scon = scon; scc_reg[channel]->scm = 0x0171; scc[channel]->bd.tx[0].status = 0x2000; scc[channel]->bd.tx[0].length = 0; scc[channel]->bd.tx[0].buffer = (uint8_t*) &(scc[channel]->bd.tx[1].buffer); scc[channel]->bd.rx[0].status = 0x2000; scc[channel]->bd.rx[0].length = 0; scc[channel]->bd.rx[0].buffer = (uint8_t*) &(scc[channel]->bd.rx[1].buffer); scc[channel]->parm.rfcr = 0x50; scc[channel]->parm.tfcr = 0x50; scc[channel]->parm.mrblr = 0x0001; scc[channel]->prot.uart.max_idl = 0x0004; scc[channel]->prot.uart.brkcr = 1; scc[channel]->prot.uart.parec = 0; scc[channel]->prot.uart.frmec = 0; scc[channel]->prot.uart.nosec = 0; scc[channel]->prot.uart.brkec = 0; scc[channel]->prot.uart.uaddr1 = 0; scc[channel]->prot.uart.uaddr2 = 0; scc[channel]->prot.uart.character[0] = 0x0003; scc[channel]->prot.uart.character[1] = 0x8000; scc_reg[channel]->scce = 0xFF; scc_reg[channel]->sccm = 0x15; scc_reg[channel]->scm = 0x17d; } } unsigned char scc_status(int channel, unsigned char status) { uint16_t rx_status; m302.reg.wcn = 0; if ((channel < M68302_SCC_COUNT) && scc[channel]) { rx_status = scc[channel]->bd.rx[0].status; if ((rx_status & 0x8000) == 0) { if (rx_status & 0x003B) { return 2; } if (status == 0) { return 1; } } } return 0; } unsigned char scc_in(int channel) { m302.reg.wcn = 0; if ((channel < M68302_SCC_COUNT) && scc[channel]) { if ((scc[channel]->bd.rx[0].status & 0x8000) == 0) { unsigned char c; c = *(scc[channel]->bd.rx[0].buffer); scc[channel]->bd.rx[0].status = 0xa000; return c; } } return 0; } void scc_out(int channel, unsigned char character) { if ((channel < M68302_SCC_COUNT) && scc[channel]) { do { m302.reg.wcn = 0; } while (scc[channel]->bd.tx[0].status & 0x8000); *(scc[channel]->bd.tx[0].buffer) = character; scc[channel]->bd.tx[0].length = 1; scc[channel]->bd.tx[0].status = 0xa000; if (scc_translate[channel]) { if (character == '\n') { scc_out(channel, '\r'); } } } }
gpl-2.0
scruiser/kernel
arch/arm/mach-mvebu/mvebu-soc-id.c
18
3046
/* * ID and revision information for mvebu SoCs * * Copyright (C) 2014 Marvell * * Gregory CLEMENT <gregory.clement@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * All the mvebu SoCs have information related to their variant and * revision that can be read from the PCI control register. This is * done before the PCI initialization to avoid any conflict. Once the * ID and revision are retrieved, the mapping is freed. */ #define pr_fmt(fmt) "mvebu-soc-id: " fmt #include <linux/clk.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include "mvebu-soc-id.h" #define PCIE_DEV_ID_OFF 0x0 #define PCIE_DEV_REV_OFF 0x8 #define SOC_ID_MASK 0xFFFF0000 #define SOC_REV_MASK 0xFF static u32 soc_dev_id; static u32 soc_rev; static bool is_id_valid; static const struct of_device_id mvebu_pcie_of_match_table[] = { { .compatible = "marvell,armada-xp-pcie", }, { .compatible = "marvell,armada-370-pcie", }, { .compatible = "marvell,kirkwood-pcie" }, {}, }; int mvebu_get_soc_id(u32 *dev, u32 *rev) { if (is_id_valid) { *dev = soc_dev_id; *rev = soc_rev; return 0; } else return -1; } static int __init mvebu_soc_id_init(void) { struct device_node *np; int ret = 0; void __iomem *pci_base; struct clk *clk; struct device_node *child; np = of_find_matching_node(NULL, mvebu_pcie_of_match_table); if (!np) return ret; /* * ID and revision are available from any port, so we * just pick the first one */ child = of_get_next_child(np, NULL); if (child == NULL) { pr_err("cannot get pci node\n"); ret = -ENOMEM; goto clk_err; } clk = of_clk_get_by_name(child, NULL); if (IS_ERR(clk)) { pr_err("cannot get clock\n"); ret = -ENOMEM; goto clk_err; } ret = clk_prepare_enable(clk); if (ret) { pr_err("cannot enable clock\n"); goto clk_err; } pci_base = of_iomap(child, 0); if (pci_base == NULL) { pr_err("cannot map registers\n"); ret = -ENOMEM; goto res_ioremap; } /* SoC ID */ soc_dev_id = readl(pci_base + PCIE_DEV_ID_OFF) >> 16; /* SoC revision */ soc_rev = readl(pci_base + PCIE_DEV_REV_OFF) & SOC_REV_MASK; is_id_valid = true; pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev); iounmap(pci_base); res_ioremap: /* * If the PCIe unit is actually enabled and we have PCI * support in the kernel, we intentionally do not release the * reference to the clock. We want to keep it running since * the bootloader does some PCIe link configuration that the * kernel is for now unable to do, and gating the clock would * make us loose this precious configuration. */ if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) { clk_disable_unprepare(clk); clk_put(clk); } clk_err: of_node_put(child); of_node_put(np); return ret; } core_initcall(mvebu_soc_id_init);
gpl-2.0
gianlucaborello/linux
drivers/gpu/drm/amd/amdgpu/si_dpm.c
18
261159
/* * Copyright 2013 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "amdgpu.h" #include "amdgpu_pm.h" #include "amdgpu_dpm.h" #include "amdgpu_atombios.h" #include "si/sid.h" #include "r600_dpm.h" #include "si_dpm.h" #include "atom.h" #include "../include/pptable.h" #include <linux/math64.h> #include <linux/seq_file.h> #include <linux/firmware.h> #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b #define MC_CG_ARB_FREQ_F2 0x0c #define MC_CG_ARB_FREQ_F3 0x0d #define SMC_RAM_END 0x20000 #define SCLK_MIN_DEEPSLEEP_FREQ 1350 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 #define BIOS_SCRATCH_4 0x5cd MODULE_FIRMWARE("radeon/tahiti_smc.bin"); MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); MODULE_FIRMWARE("radeon/verde_smc.bin"); MODULE_FIRMWARE("radeon/verde_k_smc.bin"); MODULE_FIRMWARE("radeon/oland_smc.bin"); MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; struct _ATOM_PPLIB_POWERPLAYTABLE pplib; struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; }; union fan_info { struct _ATOM_PPLIB_FANTABLE fan; struct _ATOM_PPLIB_FANTABLE2 fan2; struct _ATOM_PPLIB_FANTABLE3 fan3; }; union pplib_clock_info { struct _ATOM_PPLIB_R600_CLOCK_INFO r600; struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; struct _ATOM_PPLIB_SI_CLOCK_INFO si; }; static const u32 r600_utc[R600_PM_NUMBER_OF_TC] = { R600_UTC_DFLT_00, R600_UTC_DFLT_01, R600_UTC_DFLT_02, R600_UTC_DFLT_03, R600_UTC_DFLT_04, R600_UTC_DFLT_05, R600_UTC_DFLT_06, R600_UTC_DFLT_07, R600_UTC_DFLT_08, R600_UTC_DFLT_09, R600_UTC_DFLT_10, R600_UTC_DFLT_11, R600_UTC_DFLT_12, R600_UTC_DFLT_13, R600_UTC_DFLT_14, }; static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = { R600_DTC_DFLT_00, R600_DTC_DFLT_01, R600_DTC_DFLT_02, R600_DTC_DFLT_03, R600_DTC_DFLT_04, R600_DTC_DFLT_05, R600_DTC_DFLT_06, R600_DTC_DFLT_07, R600_DTC_DFLT_08, R600_DTC_DFLT_09, R600_DTC_DFLT_10, R600_DTC_DFLT_11, R600_DTC_DFLT_12, R600_DTC_DFLT_13, R600_DTC_DFLT_14, }; static const struct si_cac_config_reg cac_weights_tahiti[] = { { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg lcac_tahiti[] = { { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_override_tahiti[] = { { 0xFFFFFFFF } }; static const struct si_powertune_data powertune_data_tahiti = { ((1 << 16) | 27027), 6, 0, 4, 95, { 0UL, 0UL, 4521550UL, 309631529UL, -1270850L, 4513710L, 40 }, 595000000UL, 12, { 0, 0, 0, 0, 0, 0, 0, 0 }, true }; static const struct si_dte_data dte_data_tahiti = { { 1159409, 0, 0, 0, 0 }, { 777, 0, 0, 0, 0 }, 2, 54000, 127000, 25, 2, 10, 13, { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 }, { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 }, { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 }, 85, false }; #if 0 static const struct si_dte_data dte_data_tahiti_le = { { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 }, { 0x7D, 0x7D, 0x4E4, 0xB00, 0 }, 0x5, 0xAFC8, 0x64, 0x32, 1, 0, 0x10, { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 }, { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 }, { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 }, 85, true }; #endif static const struct si_dte_data dte_data_tahiti_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 45000, 100, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_new_zealand = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, 0x5, 0xAFC8, 0x69, 0x32, 1, 0, 0x10, { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 }, 85, true }; static const struct si_dte_data dte_data_aruba_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 45000, 100, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_malta = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 45000, 100, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_cac_config_reg cac_weights_pitcairn[] = { { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg lcac_pitcairn[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_override_pitcairn[] = { { 0xFFFFFFFF } }; static const struct si_powertune_data powertune_data_pitcairn = { ((1 << 16) | 27027), 5, 0, 6, 100, { 51600000UL, 1800000UL, 7194395UL, 309631529UL, -1270850L, 4513710L, 100 }, 117830498UL, 12, { 0, 0, 0, 0, 0, 0, 0, 0 }, true }; static const struct si_dte_data dte_data_pitcairn = { { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0, 0, 0, 0, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, false }; static const struct si_dte_data dte_data_curacao_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 45000, 100, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_curacao_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 45000, 100, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_neptune_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 45000, 100, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_cac_config_reg cac_weights_chelsea_pro[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_chelsea_xt[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_heathrow[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_cape_verde[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg lcac_cape_verde[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_override_cape_verde[] = { { 0xFFFFFFFF } }; static const struct si_powertune_data powertune_data_cape_verde = { ((1 << 16) | 0x6993), 5, 0, 7, 105, { 0UL, 0UL, 7194395UL, 309631529UL, -1270850L, 4513710L, 100 }, 117830498UL, 12, { 0, 0, 0, 0, 0, 0, 0, 0 }, true }; static const struct si_dte_data dte_data_cape_verde = { { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0, 0, 0, 0, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, false }; static const struct si_dte_data dte_data_venus_xtx = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, 5, 55000, 0x69, 0xA, 1, 0, 0x3, { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_venus_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, 5, 55000, 0x69, 0xA, 1, 0, 0x3, { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_venus_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, 5, 55000, 0x69, 0xA, 1, 0, 0x3, { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_cac_config_reg cac_weights_oland[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_mars_pro[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_mars_xt[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_oland_pro[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_weights_oland_xt[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg lcac_oland[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg lcac_mars_pro[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_cac_config_reg cac_override_oland[] = { { 0xFFFFFFFF } }; static const struct si_powertune_data powertune_data_oland = { ((1 << 16) | 0x6993), 5, 0, 7, 105, { 0UL, 0UL, 7194395UL, 309631529UL, -1270850L, 4513710L, 100 }, 117830498UL, 12, { 0, 0, 0, 0, 0, 0, 0, 0 }, true }; static const struct si_powertune_data powertune_data_mars_pro = { ((1 << 16) | 0x6993), 5, 0, 7, 105, { 0UL, 0UL, 7194395UL, 309631529UL, -1270850L, 4513710L, 100 }, 117830498UL, 12, { 0, 0, 0, 0, 0, 0, 0, 0 }, true }; static const struct si_dte_data dte_data_oland = { { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0, 0, 0, 0, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, false }; static const struct si_dte_data dte_data_mars_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 55000, 105, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_dte_data dte_data_sun_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, 55000, 105, 0xA, 1, 0, 0x10, { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, 90, true }; static const struct si_cac_config_reg cac_weights_hainan[] = { { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND }, { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND }, { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND }, { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND }, { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND }, { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND }, { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND }, { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND }, { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND }, { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND }, { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND }, { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND }, { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND }, { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND }, { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND }, { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND }, { 0xFFFFFFFF } }; static const struct si_powertune_data powertune_data_hainan = { ((1 << 16) | 0x6993), 5, 0, 9, 105, { 0UL, 0UL, 7194395UL, 309631529UL, -1270850L, 4513710L, 100 }, 117830498UL, 12, { 0, 0, 0, 0, 0, 0, 0, 0 }, true }; static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev); static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev); static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev); static struct si_ps *si_get_ps(struct amdgpu_ps *rps); static int si_populate_voltage_value(struct amdgpu_device *adev, const struct atom_voltage_table *table, u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); static int si_get_std_voltage_value(struct amdgpu_device *adev, SISLANDS_SMC_VOLTAGE_VALUE *voltage, u16 *std_voltage); static int si_write_smc_soft_register(struct amdgpu_device *adev, u16 reg_offset, u32 value); static int si_convert_power_level_to_smc(struct amdgpu_device *adev, struct rv7xx_pl *pl, SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level); static int si_calculate_sclk_params(struct amdgpu_device *adev, u32 engine_clock, SISLANDS_SMC_SCLK_VALUE *sclk); static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev); static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); static struct si_power_info *si_get_pi(struct amdgpu_device *adev) { struct si_power_info *pi = adev->pm.dpm.priv; return pi; } static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, u16 v, s32 t, u32 ileakage, u32 *leakage) { s64 kt, kv, leakage_w, i_leakage, vddc; s64 temperature, t_slope, t_intercept, av, bv, t_ref; s64 tmp; i_leakage = div64_s64(drm_int2fixp(ileakage), 100); vddc = div64_s64(drm_int2fixp(v), 1000); temperature = div64_s64(drm_int2fixp(t), 1000); t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000); t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000); av = div64_s64(drm_int2fixp(coeff->av), 100000000); bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); t_ref = drm_int2fixp(coeff->t_ref); tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); *leakage = drm_fixp2int(leakage_w * 1000); } static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev, const struct ni_leakage_coeffients *coeff, u16 v, s32 t, u32 i_leakage, u32 *leakage) { si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); } static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff, const u32 fixed_kt, u16 v, u32 ileakage, u32 *leakage) { s64 kt, kv, leakage_w, i_leakage, vddc; i_leakage = div64_s64(drm_int2fixp(ileakage), 100); vddc = div64_s64(drm_int2fixp(v), 1000); kt = div64_s64(drm_int2fixp(fixed_kt), 100000000); kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000), drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc))); leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); *leakage = drm_fixp2int(leakage_w * 1000); } static void si_calculate_leakage_for_v(struct amdgpu_device *adev, const struct ni_leakage_coeffients *coeff, const u32 fixed_kt, u16 v, u32 i_leakage, u32 *leakage) { si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage); } static void si_update_dte_from_pl2(struct amdgpu_device *adev, struct si_dte_data *dte_data) { u32 p_limit1 = adev->pm.dpm.tdp_limit; u32 p_limit2 = adev->pm.dpm.near_tdp_limit; u32 k = dte_data->k; u32 t_max = dte_data->max_t; u32 t_split[5] = { 10, 15, 20, 25, 30 }; u32 t_0 = dte_data->t0; u32 i; if (p_limit2 != 0 && p_limit2 <= p_limit1) { dte_data->tdep_count = 3; for (i = 0; i < k; i++) { dte_data->r[i] = (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) / (p_limit2 * (u32)100); } dte_data->tdep_r[1] = dte_data->r[4] * 2; for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) { dte_data->tdep_r[i] = dte_data->r[4]; } } else { DRM_ERROR("Invalid PL2! DTE will not be updated.\n"); } } static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = adev->pm.dpm.priv; return pi; } static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) { struct ni_power_info *pi = adev->pm.dpm.priv; return pi; } static struct si_ps *si_get_ps(struct amdgpu_ps *aps) { struct si_ps *ps = aps->ps_priv; return ps; } static void si_initialize_powertune_defaults(struct amdgpu_device *adev) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); bool update_dte_from_pl2 = false; if (adev->asic_type == CHIP_TAHITI) { si_pi->cac_weights = cac_weights_tahiti; si_pi->lcac_config = lcac_tahiti; si_pi->cac_override = cac_override_tahiti; si_pi->powertune_data = &powertune_data_tahiti; si_pi->dte_data = dte_data_tahiti; switch (adev->pdev->device) { case 0x6798: si_pi->dte_data.enable_dte_by_default = true; break; case 0x6799: si_pi->dte_data = dte_data_new_zealand; break; case 0x6790: case 0x6791: case 0x6792: case 0x679E: si_pi->dte_data = dte_data_aruba_pro; update_dte_from_pl2 = true; break; case 0x679B: si_pi->dte_data = dte_data_malta; update_dte_from_pl2 = true; break; case 0x679A: si_pi->dte_data = dte_data_tahiti_pro; update_dte_from_pl2 = true; break; default: if (si_pi->dte_data.enable_dte_by_default == true) DRM_ERROR("DTE is not enabled!\n"); break; } } else if (adev->asic_type == CHIP_PITCAIRN) { si_pi->cac_weights = cac_weights_pitcairn; si_pi->lcac_config = lcac_pitcairn; si_pi->cac_override = cac_override_pitcairn; si_pi->powertune_data = &powertune_data_pitcairn; switch (adev->pdev->device) { case 0x6810: case 0x6818: si_pi->dte_data = dte_data_curacao_xt; update_dte_from_pl2 = true; break; case 0x6819: case 0x6811: si_pi->dte_data = dte_data_curacao_pro; update_dte_from_pl2 = true; break; case 0x6800: case 0x6806: si_pi->dte_data = dte_data_neptune_xt; update_dte_from_pl2 = true; break; default: si_pi->dte_data = dte_data_pitcairn; break; } } else if (adev->asic_type == CHIP_VERDE) { si_pi->lcac_config = lcac_cape_verde; si_pi->cac_override = cac_override_cape_verde; si_pi->powertune_data = &powertune_data_cape_verde; switch (adev->pdev->device) { case 0x683B: case 0x683F: case 0x6829: case 0x6835: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_cape_verde; break; case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; break; case 0x6825: case 0x6827: si_pi->cac_weights = cac_weights_heathrow; si_pi->dte_data = dte_data_cape_verde; break; case 0x6824: case 0x682D: si_pi->cac_weights = cac_weights_chelsea_xt; si_pi->dte_data = dte_data_cape_verde; break; case 0x682F: si_pi->cac_weights = cac_weights_chelsea_pro; si_pi->dte_data = dte_data_cape_verde; break; case 0x6820: si_pi->cac_weights = cac_weights_heathrow; si_pi->dte_data = dte_data_venus_xtx; break; case 0x6821: si_pi->cac_weights = cac_weights_heathrow; si_pi->dte_data = dte_data_venus_xt; break; case 0x6823: case 0x682B: case 0x6822: case 0x682A: si_pi->cac_weights = cac_weights_chelsea_pro; si_pi->dte_data = dte_data_venus_pro; break; default: si_pi->cac_weights = cac_weights_cape_verde; si_pi->dte_data = dte_data_cape_verde; break; } } else if (adev->asic_type == CHIP_OLAND) { si_pi->lcac_config = lcac_mars_pro; si_pi->cac_override = cac_override_oland; si_pi->powertune_data = &powertune_data_mars_pro; si_pi->dte_data = dte_data_mars_pro; switch (adev->pdev->device) { case 0x6601: case 0x6621: case 0x6603: case 0x6605: si_pi->cac_weights = cac_weights_mars_pro; update_dte_from_pl2 = true; break; case 0x6600: case 0x6606: case 0x6620: case 0x6604: si_pi->cac_weights = cac_weights_mars_xt; update_dte_from_pl2 = true; break; case 0x6611: case 0x6613: case 0x6608: si_pi->cac_weights = cac_weights_oland_pro; update_dte_from_pl2 = true; break; case 0x6610: si_pi->cac_weights = cac_weights_oland_xt; update_dte_from_pl2 = true; break; default: si_pi->cac_weights = cac_weights_oland; si_pi->lcac_config = lcac_oland; si_pi->cac_override = cac_override_oland; si_pi->powertune_data = &powertune_data_oland; si_pi->dte_data = dte_data_oland; break; } } else if (adev->asic_type == CHIP_HAINAN) { si_pi->cac_weights = cac_weights_hainan; si_pi->lcac_config = lcac_oland; si_pi->cac_override = cac_override_oland; si_pi->powertune_data = &powertune_data_hainan; si_pi->dte_data = dte_data_sun_xt; update_dte_from_pl2 = true; } else { DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n"); return; } ni_pi->enable_power_containment = false; ni_pi->enable_cac = false; ni_pi->enable_sq_ramping = false; si_pi->enable_dte = false; if (si_pi->powertune_data->enable_powertune_by_default) { ni_pi->enable_power_containment = true; ni_pi->enable_cac = true; if (si_pi->dte_data.enable_dte_by_default) { si_pi->enable_dte = true; if (update_dte_from_pl2) si_update_dte_from_pl2(adev, &si_pi->dte_data); } ni_pi->enable_sq_ramping = true; } ni_pi->driver_calculate_cac_leakage = true; ni_pi->cac_configuration_required = true; if (ni_pi->cac_configuration_required) { ni_pi->support_cac_long_term_average = true; si_pi->dyn_powertune_data.l2_lta_window_size = si_pi->powertune_data->l2_lta_window_size_default; si_pi->dyn_powertune_data.lts_truncate = si_pi->powertune_data->lts_truncate_default; } else { ni_pi->support_cac_long_term_average = false; si_pi->dyn_powertune_data.l2_lta_window_size = 0; si_pi->dyn_powertune_data.lts_truncate = 0; } si_pi->dyn_powertune_data.disable_uvd_powertune = false; } static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev) { return 1; } static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) { u32 xclk; u32 wintime; u32 cac_window; u32 cac_window_size; xclk = amdgpu_asic_get_xclk(adev); if (xclk == 0) return 0; cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); wintime = (cac_window_size * 100) / xclk; return wintime; } static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) { return power_in_watts; } static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev, bool adjust_polarity, u32 tdp_adjustment, u32 *tdp_limit, u32 *near_tdp_limit) { u32 adjustment_delta, max_tdp_limit; if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit) return -EINVAL; max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100; if (adjust_polarity) { *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit); } else { *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit; if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted) *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta; else *near_tdp_limit = 0; } if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit)) return -EINVAL; if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit)) return -EINVAL; return 0; } static int si_populate_smc_tdp_limits(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); if (ni_pi->enable_power_containment) { SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; PP_SIslands_PAPMParameters *papm_parm; struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; u32 scaling_factor = si_get_smc_power_scaling_factor(adev); u32 tdp_limit; u32 near_tdp_limit; int ret; if (scaling_factor == 0) return -EINVAL; memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); ret = si_calculate_adjusted_tdp_limits(adev, false, /* ??? */ adev->pm.dpm.tdp_adjustment, &tdp_limit, &near_tdp_limit); if (ret) return ret; smc_table->dpm2Params.TDPLimit = cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); smc_table->dpm2Params.NearTDPLimit = cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000); smc_table->dpm2Params.SafePowerLimit = cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); ret = amdgpu_si_copy_bytes_to_smc(adev, (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + offsetof(PP_SIslands_DPM2Parameters, TDPLimit)), (u8 *)(&(smc_table->dpm2Params.TDPLimit)), sizeof(u32) * 3, si_pi->sram_end); if (ret) return ret; if (si_pi->enable_ppm) { papm_parm = &si_pi->papm_parm; memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters)); papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp); papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max); papm_parm->dGPU_T_Warning = cpu_to_be32(95); papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5); papm_parm->PlatformPowerLimit = 0xffffffff; papm_parm->NearTDPLimitPAPM = 0xffffffff; ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start, (u8 *)papm_parm, sizeof(PP_SIslands_PAPMParameters), si_pi->sram_end); if (ret) return ret; } } return 0; } static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); if (ni_pi->enable_power_containment) { SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; u32 scaling_factor = si_get_smc_power_scaling_factor(adev); int ret; memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); smc_table->dpm2Params.NearTDPLimit = cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); smc_table->dpm2Params.SafePowerLimit = cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); ret = amdgpu_si_copy_bytes_to_smc(adev, (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)), (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)), sizeof(u32) * 2, si_pi->sram_end); if (ret) return ret; } return 0; } static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev, const u16 prev_std_vddc, const u16 curr_std_vddc) { u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN; u64 prev_vddc = (u64)prev_std_vddc; u64 curr_vddc = (u64)curr_std_vddc; u64 pwr_efficiency_ratio, n, d; if ((prev_vddc == 0) || (curr_vddc == 0)) return 0; n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); d = prev_vddc * prev_vddc; pwr_efficiency_ratio = div64_u64(n, d); if (pwr_efficiency_ratio > (u64)0xFFFF) return 0; return (u16)pwr_efficiency_ratio; } static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state) { struct si_power_info *si_pi = si_get_pi(adev); if (si_pi->dyn_powertune_data.disable_uvd_powertune && amdgpu_state->vclk && amdgpu_state->dclk) return true; return false; } struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev) { struct evergreen_power_info *pi = adev->pm.dpm.priv; return pi; } static int si_populate_power_containment_values(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, SISLANDS_SMC_SWSTATE *smc_state) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_ps *state = si_get_ps(amdgpu_state); SISLANDS_SMC_VOLTAGE_VALUE vddc; u32 prev_sclk; u32 max_sclk; u32 min_sclk; u16 prev_std_vddc; u16 curr_std_vddc; int i; u16 pwr_efficiency_ratio; u8 max_ps_percent; bool disable_uvd_power_tune; int ret; if (ni_pi->enable_power_containment == false) return 0; if (state->performance_level_count == 0) return -EINVAL; if (smc_state->levelCount != state->performance_level_count) return -EINVAL; disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state); smc_state->levels[0].dpm2.MaxPS = 0; smc_state->levels[0].dpm2.NearTDPDec = 0; smc_state->levels[0].dpm2.AboveSafeInc = 0; smc_state->levels[0].dpm2.BelowSafeInc = 0; smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0; for (i = 1; i < state->performance_level_count; i++) { prev_sclk = state->performance_levels[i-1].sclk; max_sclk = state->performance_levels[i].sclk; if (i == 1) max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M; else max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H; if (prev_sclk > max_sclk) return -EINVAL; if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || disable_uvd_power_tune) min_sclk = max_sclk; else if (i == 1) min_sclk = prev_sclk; else min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; if (min_sclk < state->performance_levels[0].sclk) min_sclk = state->performance_levels[0].sclk; if (min_sclk == 0) return -EINVAL; ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, state->performance_levels[i-1].vddc, &vddc); if (ret) return ret; ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc); if (ret) return ret; ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, state->performance_levels[i].vddc, &vddc); if (ret) return ret; ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc); if (ret) return ret; pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev, prev_std_vddc, curr_std_vddc); smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC; smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC; smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC; smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio); } return 0; } static int si_populate_sq_ramping_values(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, SISLANDS_SMC_SWSTATE *smc_state) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_ps *state = si_get_ps(amdgpu_state); u32 sq_power_throttle, sq_power_throttle2; bool enable_sq_ramping = ni_pi->enable_sq_ramping; int i; if (state->performance_level_count == 0) return -EINVAL; if (smc_state->levelCount != state->performance_level_count) return -EINVAL; if (adev->pm.dpm.sq_ramping_threshold == 0) return -EINVAL; if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) enable_sq_ramping = false; if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) enable_sq_ramping = false; if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) enable_sq_ramping = false; if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { sq_power_throttle = 0; sq_power_throttle2 = 0; if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && enable_sq_ramping) { sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); } else { sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; } smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); } return 0; } static int si_enable_power_containment(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state, bool enable) { struct ni_power_info *ni_pi = ni_get_pi(adev); PPSMC_Result smc_result; int ret = 0; if (ni_pi->enable_power_containment) { if (enable) { if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); if (smc_result != PPSMC_Result_OK) { ret = -EINVAL; ni_pi->pc_enabled = false; } else { ni_pi->pc_enabled = true; } } } else { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); if (smc_result != PPSMC_Result_OK) ret = -EINVAL; ni_pi->pc_enabled = false; } } return ret; } static int si_initialize_smc_dte_tables(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); int ret = 0; struct si_dte_data *dte_data = &si_pi->dte_data; Smc_SIslands_DTE_Configuration *dte_tables = NULL; u32 table_size; u8 tdep_count; u32 i; if (dte_data == NULL) si_pi->enable_dte = false; if (si_pi->enable_dte == false) return 0; if (dte_data->k <= 0) return -EINVAL; dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL); if (dte_tables == NULL) { si_pi->enable_dte = false; return -ENOMEM; } table_size = dte_data->k; if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES) table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES; tdep_count = dte_data->tdep_count; if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE) tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; dte_tables->K = cpu_to_be32(table_size); dte_tables->T0 = cpu_to_be32(dte_data->t0); dte_tables->MaxT = cpu_to_be32(dte_data->max_t); dte_tables->WindowSize = dte_data->window_size; dte_tables->temp_select = dte_data->temp_select; dte_tables->DTE_mode = dte_data->dte_mode; dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold); if (tdep_count > 0) table_size--; for (i = 0; i < table_size; i++) { dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]); dte_tables->R[i] = cpu_to_be32(dte_data->r[i]); } dte_tables->Tdep_count = tdep_count; for (i = 0; i < (u32)tdep_count; i++) { dte_tables->T_limits[i] = dte_data->t_limits[i]; dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]); dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]); } ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start, (u8 *)dte_tables, sizeof(Smc_SIslands_DTE_Configuration), si_pi->sram_end); kfree(dte_tables); return ret; } static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, u16 *max, u16 *min) { struct si_power_info *si_pi = si_get_pi(adev); struct amdgpu_cac_leakage_table *table = &adev->pm.dpm.dyn_state.cac_leakage_table; u32 i; u32 v0_loadline; if (table == NULL) return -EINVAL; *max = 0; *min = 0xFFFF; for (i = 0; i < table->count; i++) { if (table->entries[i].vddc > *max) *max = table->entries[i].vddc; if (table->entries[i].vddc < *min) *min = table->entries[i].vddc; } if (si_pi->powertune_data->lkge_lut_v0_percent > 100) return -EINVAL; v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100; if (v0_loadline > 0xFFFFUL) return -EINVAL; *min = (u16)v0_loadline; if ((*min > *max) || (*max == 0) || (*min == 0)) return -EINVAL; return 0; } static u16 si_get_cac_std_voltage_step(u16 max, u16 min) { return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) / SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; } static int si_init_dte_leakage_table(struct amdgpu_device *adev, PP_SIslands_CacConfig *cac_tables, u16 vddc_max, u16 vddc_min, u16 vddc_step, u16 t0, u16 t_step) { struct si_power_info *si_pi = si_get_pi(adev); u32 leakage; unsigned int i, j; s32 t; u32 smc_leakage; u32 scaling_factor; u16 voltage; scaling_factor = si_get_smc_power_scaling_factor(adev); for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) { t = (1000 * (i * t_step + t0)); for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { voltage = vddc_max - (vddc_step * j); si_calculate_leakage_for_v_and_t(adev, &si_pi->powertune_data->leakage_coefficients, voltage, t, si_pi->dyn_powertune_data.cac_leakage, &leakage); smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; if (smc_leakage > 0xFFFF) smc_leakage = 0xFFFF; cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = cpu_to_be16((u16)smc_leakage); } } return 0; } static int si_init_simplified_leakage_table(struct amdgpu_device *adev, PP_SIslands_CacConfig *cac_tables, u16 vddc_max, u16 vddc_min, u16 vddc_step) { struct si_power_info *si_pi = si_get_pi(adev); u32 leakage; unsigned int i, j; u32 smc_leakage; u32 scaling_factor; u16 voltage; scaling_factor = si_get_smc_power_scaling_factor(adev); for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { voltage = vddc_max - (vddc_step * j); si_calculate_leakage_for_v(adev, &si_pi->powertune_data->leakage_coefficients, si_pi->powertune_data->fixed_kt, voltage, si_pi->dyn_powertune_data.cac_leakage, &leakage); smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; if (smc_leakage > 0xFFFF) smc_leakage = 0xFFFF; for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = cpu_to_be16((u16)smc_leakage); } return 0; } static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); PP_SIslands_CacConfig *cac_tables = NULL; u16 vddc_max, vddc_min, vddc_step; u16 t0, t_step; u32 load_line_slope, reg; int ret = 0; u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100; if (ni_pi->enable_cac == false) return 0; cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL); if (!cac_tables) return -ENOMEM; reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); WREG32(CG_CAC_CTRL, reg); si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; si_pi->dyn_powertune_data.dc_pwr_value = si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0]; si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev); si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default; si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000; ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min); if (ret) goto done_free; vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min); vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)); t_step = 4; t0 = 60; if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage) ret = si_init_dte_leakage_table(adev, cac_tables, vddc_max, vddc_min, vddc_step, t0, t_step); else ret = si_init_simplified_leakage_table(adev, cac_tables, vddc_max, vddc_min, vddc_step); if (ret) goto done_free; load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100; cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size); cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate; cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n; cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min); cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step); cac_tables->R_LL = cpu_to_be32(load_line_slope); cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime); cac_tables->calculation_repeats = cpu_to_be32(2); cac_tables->dc_cac = cpu_to_be32(0); cac_tables->log2_PG_LKG_SCALE = 12; cac_tables->cac_temp = si_pi->powertune_data->operating_temp; cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0); cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step); ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start, (u8 *)cac_tables, sizeof(PP_SIslands_CacConfig), si_pi->sram_end); if (ret) goto done_free; ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us); done_free: if (ret) { ni_pi->enable_cac = false; ni_pi->enable_power_containment = false; } kfree(cac_tables); return ret; } static int si_program_cac_config_registers(struct amdgpu_device *adev, const struct si_cac_config_reg *cac_config_regs) { const struct si_cac_config_reg *config_regs = cac_config_regs; u32 data = 0, offset; if (!config_regs) return -EINVAL; while (config_regs->offset != 0xFFFFFFFF) { switch (config_regs->type) { case SISLANDS_CACCONFIG_CGIND: offset = SMC_CG_IND_START + config_regs->offset; if (offset < SMC_CG_IND_END) data = RREG32_SMC(offset); break; default: data = RREG32(config_regs->offset); break; } data &= ~config_regs->mask; data |= ((config_regs->value << config_regs->shift) & config_regs->mask); switch (config_regs->type) { case SISLANDS_CACCONFIG_CGIND: offset = SMC_CG_IND_START + config_regs->offset; if (offset < SMC_CG_IND_END) WREG32_SMC(offset, data); break; default: WREG32(config_regs->offset, data); break; } config_regs++; } return 0; } static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); int ret; if ((ni_pi->enable_cac == false) || (ni_pi->cac_configuration_required == false)) return 0; ret = si_program_cac_config_registers(adev, si_pi->lcac_config); if (ret) return ret; ret = si_program_cac_config_registers(adev, si_pi->cac_override); if (ret) return ret; ret = si_program_cac_config_registers(adev, si_pi->cac_weights); if (ret) return ret; return 0; } static int si_enable_smc_cac(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state, bool enable) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); PPSMC_Result smc_result; int ret = 0; if (ni_pi->enable_cac) { if (enable) { if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { if (ni_pi->support_cac_long_term_average) { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable); if (smc_result != PPSMC_Result_OK) ni_pi->support_cac_long_term_average = false; } smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); if (smc_result != PPSMC_Result_OK) { ret = -EINVAL; ni_pi->cac_enabled = false; } else { ni_pi->cac_enabled = true; } if (si_pi->enable_dte) { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); if (smc_result != PPSMC_Result_OK) ret = -EINVAL; } } } else if (ni_pi->cac_enabled) { if (si_pi->enable_dte) smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); ni_pi->cac_enabled = false; if (ni_pi->support_cac_long_term_average) smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable); } } return ret; } static int si_init_smc_spll_table(struct amdgpu_device *adev) { struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); SMC_SISLANDS_SPLL_DIV_TABLE *spll_table; SISLANDS_SMC_SCLK_VALUE sclk_params; u32 fb_div, p_div; u32 clk_s, clk_v; u32 sclk = 0; int ret = 0; u32 tmp; int i; if (si_pi->spll_table_start == 0) return -EINVAL; spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); if (spll_table == NULL) return -ENOMEM; for (i = 0; i < 256; i++) { ret = si_calculate_sclk_params(adev, sclk, &sclk_params); if (ret) break; p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; fb_div &= ~0x00001FFF; fb_div >>= 1; clk_v >>= 6; if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) ret = -EINVAL; if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) ret = -EINVAL; if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ret = -EINVAL; if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) ret = -EINVAL; if (ret) break; tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK); spll_table->freq[i] = cpu_to_be32(tmp); tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK); spll_table->ss[i] = cpu_to_be32(tmp); sclk += 512; } if (!ret) ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start, (u8 *)spll_table, sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), si_pi->sram_end); if (ret) ni_pi->enable_power_containment = false; kfree(spll_table); return ret; } struct si_dpm_quirk { u32 chip_vendor; u32 chip_device; u32 subsys_vendor; u32 subsys_device; u32 max_sclk; u32 max_mclk; }; /* cards with dpm stability problems */ static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, { 0, 0, 0, 0 }, }; static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, u16 vce_voltage) { u16 highest_leakage = 0; struct si_power_info *si_pi = si_get_pi(adev); int i; for (i = 0; i < si_pi->leakage_voltage.count; i++){ if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) highest_leakage = si_pi->leakage_voltage.entries[i].voltage; } if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) return highest_leakage; return vce_voltage; } static int si_get_vce_clock_voltage(struct amdgpu_device *adev, u32 evclk, u32 ecclk, u16 *voltage) { u32 i; int ret = -EINVAL; struct amdgpu_vce_clock_voltage_dependency_table *table = &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; if (((evclk == 0) && (ecclk == 0)) || (table && (table->count == 0))) { *voltage = 0; return 0; } for (i = 0; i < table->count; i++) { if ((evclk <= table->entries[i].evclk) && (ecclk <= table->entries[i].ecclk)) { *voltage = table->entries[i].v; ret = 0; break; } } /* if no match return the highest voltage */ if (ret) *voltage = table->entries[table->count - 1].v; *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage); return ret; } static bool si_dpm_vblank_too_short(struct amdgpu_device *adev) { u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); /* we never hit the non-gddr5 limit so disable it */ u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; if (vblank_time < switch_limit) return true; else return false; } static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, u32 arb_freq_src, u32 arb_freq_dest) { u32 mc_arb_dram_timing; u32 mc_arb_dram_timing2; u32 burst_time; u32 mc_cg_config; switch (arb_freq_src) { case MC_CG_ARB_FREQ_F0: mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; break; case MC_CG_ARB_FREQ_F1: mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; break; case MC_CG_ARB_FREQ_F2: mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; break; case MC_CG_ARB_FREQ_F3: mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; break; default: return -EINVAL; } switch (arb_freq_dest) { case MC_CG_ARB_FREQ_F0: WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); break; case MC_CG_ARB_FREQ_F1: WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); break; case MC_CG_ARB_FREQ_F2: WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); break; case MC_CG_ARB_FREQ_F3: WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); break; default: return -EINVAL; } mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; WREG32(MC_CG_CONFIG, mc_cg_config); WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); return 0; } static void ni_update_current_ps(struct amdgpu_device *adev, struct amdgpu_ps *rps) { struct si_ps *new_ps = si_get_ps(rps); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct ni_power_info *ni_pi = ni_get_pi(adev); eg_pi->current_rps = *rps; ni_pi->current_ps = *new_ps; eg_pi->current_rps.ps_priv = &ni_pi->current_ps; adev->pm.dpm.current_ps = &eg_pi->current_rps; } static void ni_update_requested_ps(struct amdgpu_device *adev, struct amdgpu_ps *rps) { struct si_ps *new_ps = si_get_ps(rps); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct ni_power_info *ni_pi = ni_get_pi(adev); eg_pi->requested_rps = *rps; ni_pi->requested_ps = *new_ps; eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; adev->pm.dpm.requested_ps = &eg_pi->requested_rps; } static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, struct amdgpu_ps *new_ps, struct amdgpu_ps *old_ps) { struct si_ps *new_state = si_get_ps(new_ps); struct si_ps *current_state = si_get_ps(old_ps); if ((new_ps->vclk == old_ps->vclk) && (new_ps->dclk == old_ps->dclk)) return; if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >= current_state->performance_levels[current_state->performance_level_count - 1].sclk) return; amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); } static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, struct amdgpu_ps *new_ps, struct amdgpu_ps *old_ps) { struct si_ps *new_state = si_get_ps(new_ps); struct si_ps *current_state = si_get_ps(old_ps); if ((new_ps->vclk == old_ps->vclk) && (new_ps->dclk == old_ps->dclk)) return; if (new_state->performance_levels[new_state->performance_level_count - 1].sclk < current_state->performance_levels[current_state->performance_level_count - 1].sclk) return; amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); } static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) { unsigned int i; for (i = 0; i < table->count; i++) if (voltage <= table->entries[i].value) return table->entries[i].value; return table->entries[table->count - 1].value; } static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, u32 max_clock, u32 requested_clock) { unsigned int i; if ((clocks == NULL) || (clocks->count == 0)) return (requested_clock < max_clock) ? requested_clock : max_clock; for (i = 0; i < clocks->count; i++) { if (clocks->values[i] >= requested_clock) return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; } return (clocks->values[clocks->count - 1] < max_clock) ? clocks->values[clocks->count - 1] : max_clock; } static u32 btc_get_valid_mclk(struct amdgpu_device *adev, u32 max_mclk, u32 requested_mclk) { return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, max_mclk, requested_mclk); } static u32 btc_get_valid_sclk(struct amdgpu_device *adev, u32 max_sclk, u32 requested_sclk) { return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, max_sclk, requested_sclk); } static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, u32 *max_clock) { u32 i, clock = 0; if ((table == NULL) || (table->count == 0)) { *max_clock = clock; return; } for (i = 0; i < table->count; i++) { if (clock < table->entries[i].clk) clock = table->entries[i].clk; } *max_clock = clock; } static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage) { u32 i; if ((table == NULL) || (table->count == 0)) return; for (i= 0; i < table->count; i++) { if (clock <= table->entries[i].clk) { if (*voltage < table->entries[i].v) *voltage = (u16)((table->entries[i].v < max_voltage) ? table->entries[i].v : max_voltage); return; } } *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; } static void btc_adjust_clock_combinations(struct amdgpu_device *adev, const struct amdgpu_clock_and_voltage_limits *max_limits, struct rv7xx_pl *pl) { if ((pl->mclk == 0) || (pl->sclk == 0)) return; if (pl->mclk == pl->sclk) return; if (pl->mclk > pl->sclk) { if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) pl->sclk = btc_get_valid_sclk(adev, max_limits->sclk, (pl->mclk + (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / adev->pm.dpm.dyn_state.mclk_sclk_ratio); } else { if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) pl->mclk = btc_get_valid_mclk(adev, max_limits->mclk, pl->sclk - adev->pm.dpm.dyn_state.sclk_mclk_delta); } } static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, u16 max_vddc, u16 max_vddci, u16 *vddc, u16 *vddci) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); u16 new_voltage; if ((0 == *vddc) || (0 == *vddci)) return; if (*vddc > *vddci) { if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; } } else { if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; } } } static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, u32 sys_mask, enum amdgpu_pcie_gen asic_gen, enum amdgpu_pcie_gen default_gen) { switch (asic_gen) { case AMDGPU_PCIE_GEN1: return AMDGPU_PCIE_GEN1; case AMDGPU_PCIE_GEN2: return AMDGPU_PCIE_GEN2; case AMDGPU_PCIE_GEN3: return AMDGPU_PCIE_GEN3; default: if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) return AMDGPU_PCIE_GEN3; else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) return AMDGPU_PCIE_GEN2; else return AMDGPU_PCIE_GEN1; } return AMDGPU_PCIE_GEN1; } static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u) { u32 b_c = 0; u32 i_c; u32 tmp; i_c = (i * r_c) / 100; tmp = i_c >> p_b; while (tmp) { b_c++; tmp >>= 1; } *u = (b_c + 1) / 2; *p = i_c / (1 << (2 * (*u))); } static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) { u32 k, a, ah, al; u32 t1; if ((fl == 0) || (fh == 0) || (fl > fh)) return -EINVAL; k = (100 * fh) / fl; t1 = (t * (k - 100)); a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); a = (a + 5) / 10; ah = ((a * t) + 5000) / 10000; al = a - ah; *th = t - ah; *tl = t + al; return 0; } static bool r600_is_uvd_state(u32 class, u32 class2) { if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) return true; if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) return true; if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) return true; if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) return true; if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) return true; return false; } static u8 rv770_get_memory_module_index(struct amdgpu_device *adev) { return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); } static void rv770_get_max_vddc(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); u16 vddc; if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc)) pi->max_vddc = 0; else pi->max_vddc = vddc; } static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct amdgpu_atom_ss ss; pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, ASIC_INTERNAL_ENGINE_SS, 0); pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else pi->dynamic_ss = false; } static void si_apply_state_adjust_rules(struct amdgpu_device *adev, struct amdgpu_ps *rps) { struct si_ps *ps = si_get_ps(rps); struct amdgpu_clock_and_voltage_limits *max_limits; bool disable_mclk_switching = false; bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci, min_vce_voltage = 0; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk = 0, max_mclk = 0; int i; struct si_dpm_quirk *p = si_dpm_quirk_list; /* limit all SI kickers */ if (adev->asic_type == CHIP_PITCAIRN) { if ((adev->pdev->revision == 0x81) || (adev->pdev->device == 0x6810) || (adev->pdev->device == 0x6811) || (adev->pdev->device == 0x6816) || (adev->pdev->device == 0x6817) || (adev->pdev->device == 0x6806)) max_mclk = 120000; } else if (adev->asic_type == CHIP_VERDE) { if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0x87) || (adev->pdev->device == 0x6820) || (adev->pdev->device == 0x6821) || (adev->pdev->device == 0x6822) || (adev->pdev->device == 0x6823) || (adev->pdev->device == 0x682A) || (adev->pdev->device == 0x682B)) { max_sclk = 75000; max_mclk = 80000; } } else if (adev->asic_type == CHIP_OLAND) { if ((adev->pdev->revision == 0xC7) || (adev->pdev->revision == 0x80) || (adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0x87) || (adev->pdev->device == 0x6604) || (adev->pdev->device == 0x6605)) { max_sclk = 75000; max_mclk = 80000; } } else if (adev->asic_type == CHIP_HAINAN) { if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0xC3) || (adev->pdev->device == 0x6664) || (adev->pdev->device == 0x6665) || (adev->pdev->device == 0x6667)) { max_sclk = 75000; max_mclk = 80000; } } /* Apply dpm quirks */ while (p && p->chip_device != 0) { if (adev->pdev->vendor == p->chip_vendor && adev->pdev->device == p->chip_device && adev->pdev->subsystem_vendor == p->subsys_vendor && adev->pdev->subsystem_device == p->subsys_device) { max_sclk = p->max_sclk; max_mclk = p->max_mclk; break; } ++p; } if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk, &min_vce_voltage); } else { rps->evclk = 0; rps->ecclk = 0; } if ((adev->pm.dpm.new_active_crtc_count > 1) || si_dpm_vblank_too_short(adev)) disable_mclk_switching = true; if (rps->vclk || rps->dclk) { disable_mclk_switching = true; disable_sclk_switching = true; } if (adev->pm.dpm.ac_power) max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; else max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; for (i = ps->performance_level_count - 2; i >= 0; i--) { if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc) ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc; } if (adev->pm.dpm.ac_power == false) { for (i = 0; i < ps->performance_level_count; i++) { if (ps->performance_levels[i].mclk > max_limits->mclk) ps->performance_levels[i].mclk = max_limits->mclk; if (ps->performance_levels[i].sclk > max_limits->sclk) ps->performance_levels[i].sclk = max_limits->sclk; if (ps->performance_levels[i].vddc > max_limits->vddc) ps->performance_levels[i].vddc = max_limits->vddc; if (ps->performance_levels[i].vddci > max_limits->vddci) ps->performance_levels[i].vddci = max_limits->vddci; } } /* limit clocks to max supported clocks based on voltage dependency tables */ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, &max_sclk_vddc); btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, &max_mclk_vddci); btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, &max_mclk_vddc); for (i = 0; i < ps->performance_level_count; i++) { if (max_sclk_vddc) { if (ps->performance_levels[i].sclk > max_sclk_vddc) ps->performance_levels[i].sclk = max_sclk_vddc; } if (max_mclk_vddci) { if (ps->performance_levels[i].mclk > max_mclk_vddci) ps->performance_levels[i].mclk = max_mclk_vddci; } if (max_mclk_vddc) { if (ps->performance_levels[i].mclk > max_mclk_vddc) ps->performance_levels[i].mclk = max_mclk_vddc; } if (max_mclk) { if (ps->performance_levels[i].mclk > max_mclk) ps->performance_levels[i].mclk = max_mclk; } if (max_sclk) { if (ps->performance_levels[i].sclk > max_sclk) ps->performance_levels[i].sclk = max_sclk; } } /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; } else { mclk = ps->performance_levels[0].mclk; vddci = ps->performance_levels[0].vddci; } if (disable_sclk_switching) { sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; } else { sclk = ps->performance_levels[0].sclk; vddc = ps->performance_levels[0].vddc; } if (rps->vce_active) { if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; } /* adjusted low state */ ps->performance_levels[0].sclk = sclk; ps->performance_levels[0].mclk = mclk; ps->performance_levels[0].vddc = vddc; ps->performance_levels[0].vddci = vddci; if (disable_sclk_switching) { sclk = ps->performance_levels[0].sclk; for (i = 1; i < ps->performance_level_count; i++) { if (sclk < ps->performance_levels[i].sclk) sclk = ps->performance_levels[i].sclk; } for (i = 0; i < ps->performance_level_count; i++) { ps->performance_levels[i].sclk = sclk; ps->performance_levels[i].vddc = vddc; } } else { for (i = 1; i < ps->performance_level_count; i++) { if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; } } if (disable_mclk_switching) { mclk = ps->performance_levels[0].mclk; for (i = 1; i < ps->performance_level_count; i++) { if (mclk < ps->performance_levels[i].mclk) mclk = ps->performance_levels[i].mclk; } for (i = 0; i < ps->performance_level_count; i++) { ps->performance_levels[i].mclk = mclk; ps->performance_levels[i].vddci = vddci; } } else { for (i = 1; i < ps->performance_level_count; i++) { if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; } } for (i = 0; i < ps->performance_level_count; i++) btc_adjust_clock_combinations(adev, max_limits, &ps->performance_levels[i]); for (i = 0; i < ps->performance_level_count; i++) { if (ps->performance_levels[i].vddc < min_vce_voltage) ps->performance_levels[i].vddc = min_vce_voltage; btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, ps->performance_levels[i].sclk, max_limits->vddc, &ps->performance_levels[i].vddc); btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, ps->performance_levels[i].mclk, max_limits->vddci, &ps->performance_levels[i].vddci); btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, ps->performance_levels[i].mclk, max_limits->vddc, &ps->performance_levels[i].vddc); btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, adev->clock.current_dispclk, max_limits->vddc, &ps->performance_levels[i].vddc); } for (i = 0; i < ps->performance_level_count; i++) { btc_apply_voltage_delta_rules(adev, max_limits->vddc, max_limits->vddci, &ps->performance_levels[i].vddc, &ps->performance_levels[i].vddci); } ps->dc_compatible = true; for (i = 0; i < ps->performance_level_count; i++) { if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) ps->dc_compatible = false; } } #if 0 static int si_read_smc_soft_register(struct amdgpu_device *adev, u16 reg_offset, u32 *value) { struct si_power_info *si_pi = si_get_pi(adev); return amdgpu_si_read_smc_sram_dword(adev, si_pi->soft_regs_start + reg_offset, value, si_pi->sram_end); } #endif static int si_write_smc_soft_register(struct amdgpu_device *adev, u16 reg_offset, u32 value) { struct si_power_info *si_pi = si_get_pi(adev); return amdgpu_si_write_smc_sram_dword(adev, si_pi->soft_regs_start + reg_offset, value, si_pi->sram_end); } static bool si_is_special_1gb_platform(struct amdgpu_device *adev) { bool ret = false; u32 tmp, width, row, column, bank, density; bool is_memory_gddr5, is_special; tmp = RREG32(MC_SEQ_MISC0); is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT)); is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT)) & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT)); WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; tmp = RREG32(MC_ARB_RAMCFG); row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; density = (1 << (row + column - 20 + bank)) * width; if ((adev->pdev->device == 0x6819) && is_memory_gddr5 && is_special && (density == 0x400)) ret = true; return ret; } static void si_get_leakage_vddc(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); u16 vddc, count = 0; int i, ret; for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) { ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i); if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) { si_pi->leakage_voltage.entries[count].voltage = vddc; si_pi->leakage_voltage.entries[count].leakage_index = SISLANDS_LEAKAGE_INDEX0 + i; count++; } } si_pi->leakage_voltage.count = count; } static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev, u32 index, u16 *leakage_voltage) { struct si_power_info *si_pi = si_get_pi(adev); int i; if (leakage_voltage == NULL) return -EINVAL; if ((index & 0xff00) != 0xff00) return -EINVAL; if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1) return -EINVAL; if (index < SISLANDS_LEAKAGE_INDEX0) return -EINVAL; for (i = 0; i < si_pi->leakage_voltage.count; i++) { if (si_pi->leakage_voltage.entries[i].leakage_index == index) { *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; return 0; } } return -EAGAIN; } static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) { struct rv7xx_power_info *pi = rv770_get_pi(adev); bool want_thermal_protection; enum amdgpu_dpm_event_src dpm_event_src; switch (sources) { case 0: default: want_thermal_protection = false; break; case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): want_thermal_protection = true; dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; break; case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): want_thermal_protection = true; dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; break; case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): want_thermal_protection = true; dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; break; } if (want_thermal_protection) { WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); if (pi->thermal_protection) WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); } else { WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); } } static void si_enable_auto_throttle_source(struct amdgpu_device *adev, enum amdgpu_dpm_auto_throttle_src source, bool enable) { struct rv7xx_power_info *pi = rv770_get_pi(adev); if (enable) { if (!(pi->active_auto_throttle_sources & (1 << source))) { pi->active_auto_throttle_sources |= 1 << source; si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); } } else { if (pi->active_auto_throttle_sources & (1 << source)) { pi->active_auto_throttle_sources &= ~(1 << source); si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); } } } static void si_start_dpm(struct amdgpu_device *adev) { WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); } static void si_stop_dpm(struct amdgpu_device *adev) { WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); } static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) { if (enable) WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); else WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); } #if 0 static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev, u32 thermal_level) { PPSMC_Result ret; if (thermal_level == 0) { ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); if (ret == PPSMC_Result_OK) return 0; else return -EINVAL; } return 0; } static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev) { si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true); } #endif #if 0 static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power) { if (ac_power) return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? 0 : -EINVAL; return 0; } #endif static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, PPSMC_Msg msg, u32 parameter) { WREG32(SMC_SCRATCH0, parameter); return amdgpu_si_send_msg_to_smc(adev, msg); } static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev) { if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) return -EINVAL; return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? 0 : -EINVAL; } static int si_dpm_force_performance_level(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level) { struct amdgpu_ps *rps = adev->pm.dpm.current_ps; struct si_ps *ps = si_get_ps(rps); u32 levels = ps->performance_level_count; if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) return -EINVAL; if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) return -EINVAL; } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) return -EINVAL; } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) return -EINVAL; } adev->pm.dpm.forced_level = level; return 0; } #if 0 static int si_set_boot_state(struct amdgpu_device *adev) { return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? 0 : -EINVAL; } #endif static int si_set_sw_state(struct amdgpu_device *adev) { return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? 0 : -EINVAL; } static int si_halt_smc(struct amdgpu_device *adev) { if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK) return -EINVAL; return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ? 0 : -EINVAL; } static int si_resume_smc(struct amdgpu_device *adev) { if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK) return -EINVAL; return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ? 0 : -EINVAL; } static void si_dpm_start_smc(struct amdgpu_device *adev) { amdgpu_si_program_jump_on_start(adev); amdgpu_si_start_smc(adev); amdgpu_si_smc_clock(adev, true); } static void si_dpm_stop_smc(struct amdgpu_device *adev) { amdgpu_si_reset_smc(adev); amdgpu_si_smc_clock(adev, false); } static int si_process_firmware_header(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; int ret; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_stateTable, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->state_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_softRegisters, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->soft_regs_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->mc_reg_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_fanTable, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->fan_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->arb_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->cac_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->dte_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_spllTable, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->spll_table_start = tmp; ret = amdgpu_si_read_smc_sram_dword(adev, SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters, &tmp, si_pi->sram_end); if (ret) return ret; si_pi->papm_cfg_table_start = tmp; return ret; } static void si_read_clock_registers(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); } static void si_enable_thermal_protection(struct amdgpu_device *adev, bool enable) { if (enable) WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); else WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); } static void si_enable_acpi_power_management(struct amdgpu_device *adev) { WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); } #if 0 static int si_enter_ulp_state(struct amdgpu_device *adev) { WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); udelay(25000); return 0; } static int si_exit_ulp_state(struct amdgpu_device *adev) { int i; WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); udelay(7000); for (i = 0; i < adev->usec_timeout; i++) { if (RREG32(SMC_RESP_0) == 1) break; udelay(1000); } return 0; } #endif static int si_notify_smc_display_change(struct amdgpu_device *adev, bool has_display) { PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; } static void si_program_response_times(struct amdgpu_device *adev) { u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out; u32 vddc_dly, acpi_dly, vbi_dly; u32 reference_clock; si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; backbias_response_time = (u32)adev->pm.dpm.backbias_response_time; if (voltage_response_time == 0) voltage_response_time = 1000; acpi_delay_time = 15000; vbi_time_out = 100000; reference_clock = amdgpu_asic_get_xclk(adev); vddc_dly = (voltage_response_time * reference_clock) / 100; acpi_dly = (acpi_delay_time * reference_clock) / 100; vbi_dly = (vbi_time_out * reference_clock) / 100; si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); } static void si_program_ds_registers(struct amdgpu_device *adev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); u32 tmp; /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */ if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0) tmp = 0x10; else tmp = 0x1; if (eg_pi->sclk_deep_sleep) { WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, ~AUTOSCALE_ON_SS_CLEAR); } } static void si_program_display_gap(struct amdgpu_device *adev) { u32 tmp, pipe; int i; tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); if (adev->pm.dpm.new_active_crtc_count > 0) tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); else tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); if (adev->pm.dpm.new_active_crtc_count > 1) tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); else tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); WREG32(CG_DISPLAY_GAP_CNTL, tmp); tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; if ((adev->pm.dpm.new_active_crtc_count > 0) && (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { /* find the first active crtc */ for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->pm.dpm.new_active_crtcs & (1 << i)) break; } if (i == adev->mode_info.num_crtc) pipe = 0; else pipe = i; tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; tmp |= DCCG_DISP1_SLOW_SELECT(pipe); WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); } /* Setting this to false forces the performance state to low if the crtcs are disabled. * This can be a problem on PowerXpress systems or if you want to use the card * for offscreen rendering or compute if there are no crtcs enabled. */ si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); } static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) { struct rv7xx_power_info *pi = rv770_get_pi(adev); if (enable) { if (pi->sclk_ss) WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); } else { WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); } } static void si_setup_bsp(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); u32 xclk = amdgpu_asic_get_xclk(adev); r600_calculate_u_and_p(pi->asi, xclk, 16, &pi->bsp, &pi->bsu); r600_calculate_u_and_p(pi->pasi, xclk, 16, &pi->pbsp, &pi->pbsu); pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); WREG32(CG_BSP, pi->dsp); } static void si_program_git(struct amdgpu_device *adev) { WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); } static void si_program_tp(struct amdgpu_device *adev) { int i; enum r600_td td = R600_TD_DFLT; for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); if (td == R600_TD_AUTO) WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); else WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); if (td == R600_TD_UP) WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); if (td == R600_TD_DOWN) WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); } static void si_program_tpp(struct amdgpu_device *adev) { WREG32(CG_TPC, R600_TPC_DFLT); } static void si_program_sstp(struct amdgpu_device *adev) { WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); } static void si_enable_display_gap(struct amdgpu_device *adev) { u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); WREG32(CG_DISPLAY_GAP_CNTL, tmp); } static void si_program_vc(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); WREG32(CG_FTV, pi->vrc); } static void si_clear_vc(struct amdgpu_device *adev) { WREG32(CG_FTV, 0); } static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) { u8 mc_para_index; if (memory_clock < 10000) mc_para_index = 0; else if (memory_clock >= 80000) mc_para_index = 0x0f; else mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); return mc_para_index; } static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) { u8 mc_para_index; if (strobe_mode) { if (memory_clock < 12500) mc_para_index = 0x00; else if (memory_clock > 47500) mc_para_index = 0x0f; else mc_para_index = (u8)((memory_clock - 10000) / 2500); } else { if (memory_clock < 65000) mc_para_index = 0x00; else if (memory_clock > 135000) mc_para_index = 0x0f; else mc_para_index = (u8)((memory_clock - 60000) / 5000); } return mc_para_index; } static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) { struct rv7xx_power_info *pi = rv770_get_pi(adev); bool strobe_mode = false; u8 result = 0; if (mclk <= pi->mclk_strobe_mode_threshold) strobe_mode = true; if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) result = si_get_mclk_frequency_ratio(mclk, strobe_mode); else result = si_get_ddr3_mclk_frequency_ratio(mclk); if (strobe_mode) result |= SISLANDS_SMC_STROBE_ENABLE; return result; } static int si_upload_firmware(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); amdgpu_si_reset_smc(adev); amdgpu_si_smc_clock(adev, false); return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end); } static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, const struct atom_voltage_table *table, const struct amdgpu_phase_shedding_limits_table *limits) { u32 data, num_bits, num_levels; if ((table == NULL) || (limits == NULL)) return false; data = table->mask_low; num_bits = hweight32(data); if (num_bits == 0) return false; num_levels = (1 << num_bits); if (table->count != num_levels) return false; if (limits->count != (num_levels - 1)) return false; return true; } static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, u32 max_voltage_steps, struct atom_voltage_table *voltage_table) { unsigned int i, diff; if (voltage_table->count <= max_voltage_steps) return; diff = voltage_table->count - max_voltage_steps; for (i= 0; i < max_voltage_steps; i++) voltage_table->entries[i] = voltage_table->entries[i + diff]; voltage_table->count = max_voltage_steps; } static int si_get_svi2_voltage_table(struct amdgpu_device *adev, struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, struct atom_voltage_table *voltage_table) { u32 i; if (voltage_dependency_table == NULL) return -EINVAL; voltage_table->mask_low = 0; voltage_table->phase_delay = 0; voltage_table->count = voltage_dependency_table->count; for (i = 0; i < voltage_table->count; i++) { voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; voltage_table->entries[i].smio_low = 0; } return 0; } static int si_construct_voltage_tables(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); int ret; if (pi->voltage_control) { ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); if (ret) return ret; if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) si_trim_voltage_table_to_fit_state_table(adev, SISLANDS_MAX_NO_VREG_STEPS, &eg_pi->vddc_voltage_table); } else if (si_pi->voltage_control_svi2) { ret = si_get_svi2_voltage_table(adev, &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, &eg_pi->vddc_voltage_table); if (ret) return ret; } else { return -EINVAL; } if (eg_pi->vddci_control) { ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table); if (ret) return ret; if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) si_trim_voltage_table_to_fit_state_table(adev, SISLANDS_MAX_NO_VREG_STEPS, &eg_pi->vddci_voltage_table); } if (si_pi->vddci_control_svi2) { ret = si_get_svi2_voltage_table(adev, &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, &eg_pi->vddci_voltage_table); if (ret) return ret; } if (pi->mvdd_control) { ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table); if (ret) { pi->mvdd_control = false; return ret; } if (si_pi->mvdd_voltage_table.count == 0) { pi->mvdd_control = false; return -EINVAL; } if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) si_trim_voltage_table_to_fit_state_table(adev, SISLANDS_MAX_NO_VREG_STEPS, &si_pi->mvdd_voltage_table); } if (si_pi->vddc_phase_shed_control) { ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table); if (ret) si_pi->vddc_phase_shed_control = false; if ((si_pi->vddc_phase_shed_table.count == 0) || (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS)) si_pi->vddc_phase_shed_control = false; } return 0; } static void si_populate_smc_voltage_table(struct amdgpu_device *adev, const struct atom_voltage_table *voltage_table, SISLANDS_SMC_STATETABLE *table) { unsigned int i; for (i = 0; i < voltage_table->count; i++) table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); } static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, SISLANDS_SMC_STATETABLE *table) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); u8 i; if (si_pi->voltage_control_svi2) { si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, si_pi->svc_gpio_id); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, si_pi->svd_gpio_id); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, 2); } else { if (eg_pi->vddc_voltage_table.count) { si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { table->maxVDDCIndexInPPTable = i; break; } } } if (eg_pi->vddci_voltage_table.count) { si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } if (si_pi->mvdd_voltage_table.count) { si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); } if (si_pi->vddc_phase_shed_control) { if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table, &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, (u32)si_pi->vddc_phase_shed_table.phase_delay); } else { si_pi->vddc_phase_shed_control = false; } } } return 0; } static int si_populate_voltage_value(struct amdgpu_device *adev, const struct atom_voltage_table *table, u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage) { unsigned int i; for (i = 0; i < table->count; i++) { if (value <= table->entries[i].value) { voltage->index = (u8)i; voltage->value = cpu_to_be16(table->entries[i].value); break; } } if (i >= table->count) return -EINVAL; return 0; } static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, SISLANDS_SMC_VOLTAGE_VALUE *voltage) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); if (pi->mvdd_control) { if (mclk <= pi->mvdd_split_frequency) voltage->index = 0; else voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1; voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); } return 0; } static int si_get_std_voltage_value(struct amdgpu_device *adev, SISLANDS_SMC_VOLTAGE_VALUE *voltage, u16 *std_voltage) { u16 v_index; bool voltage_found = false; *std_voltage = be16_to_cpu(voltage->value); if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) { if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) return -EINVAL; for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { if (be16_to_cpu(voltage->value) == (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { voltage_found = true; if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; else *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; break; } } if (!voltage_found) { for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { if (be16_to_cpu(voltage->value) <= (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { voltage_found = true; if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; else *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; break; } } } } else { if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count) *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; } } return 0; } static int si_populate_std_voltage_value(struct amdgpu_device *adev, u16 value, u8 index, SISLANDS_SMC_VOLTAGE_VALUE *voltage) { voltage->index = index; voltage->value = cpu_to_be16(value); return 0; } static int si_populate_phase_shedding_value(struct amdgpu_device *adev, const struct amdgpu_phase_shedding_limits_table *limits, u16 voltage, u32 sclk, u32 mclk, SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage) { unsigned int i; for (i = 0; i < limits->count; i++) { if ((voltage <= limits->entries[i].voltage) && (sclk <= limits->entries[i].sclk) && (mclk <= limits->entries[i].mclk)) break; } smc_voltage->phase_settings = (u8)i; return 0; } static int si_init_arb_table_index(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; int ret; ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, &tmp, si_pi->sram_end); if (ret) return ret; tmp &= 0x00FFFFFF; tmp |= MC_CG_ARB_FREQ_F1 << 24; return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start, tmp, si_pi->sram_end); } static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) { return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } static int si_reset_to_default(struct amdgpu_device *adev) { return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 0 : -EINVAL; } static int si_force_switch_to_arb_f0(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; int ret; ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, &tmp, si_pi->sram_end); if (ret) return ret; tmp = (tmp >> 24) & 0xff; if (tmp == MC_CG_ARB_FREQ_F0) return 0; return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); } static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, u32 engine_clock) { u32 dram_rows; u32 dram_refresh_rate; u32 mc_arb_rfsh_rate; u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; if (tmp >= 4) dram_rows = 16384; else dram_rows = 1 << (tmp + 10); dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; return mc_arb_rfsh_rate; } static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, struct rv7xx_pl *pl, SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs) { u32 dram_timing; u32 dram_timing2; u32 burst_time; arb_regs->mc_arb_rfsh_rate = (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk, pl->mclk); dram_timing = RREG32(MC_ARB_DRAM_TIMING); dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); arb_regs->mc_arb_burst_time = (u8)burst_time; return 0; } static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, unsigned int first_arb_set) { struct si_power_info *si_pi = si_get_pi(adev); struct si_ps *state = si_get_ps(amdgpu_state); SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; int i, ret = 0; for (i = 0; i < state->performance_level_count; i++) { ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs); if (ret) break; ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->arb_table_start + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i), (u8 *)&arb_regs, sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), si_pi->sram_end); if (ret) break; } return ret; } static int si_program_memory_timing_parameters(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state) { return si_do_program_memory_timing_parameters(adev, amdgpu_new_state, SISLANDS_DRIVER_STATE_ARB_INDEX); } static int si_populate_initial_mvdd_value(struct amdgpu_device *adev, struct SISLANDS_SMC_VOLTAGE_VALUE *voltage) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); if (pi->mvdd_control) return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table, si_pi->mvdd_bootup_value, voltage); return 0; } static int si_populate_smc_initial_state(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_initial_state, SISLANDS_SMC_STATETABLE *table) { struct si_ps *initial_state = si_get_ps(amdgpu_initial_state); struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); u32 reg; int ret; table->initialState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(si_pi->clock_registers.dll_cntl); table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); table->initialState.levels[0].mclk.vMPLL_SS = cpu_to_be32(si_pi->clock_registers.mpll_ss1); table->initialState.levels[0].mclk.vMPLL_SS2 = cpu_to_be32(si_pi->clock_registers.mpll_ss2); table->initialState.levels[0].mclk.mclk_value = cpu_to_be32(initial_state->performance_levels[0].mclk); table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); table->initialState.levels[0].sclk.sclk_value = cpu_to_be32(initial_state->performance_levels[0].sclk); table->initialState.levels[0].arbRefreshState = SISLANDS_INITIAL_STATE_ARB_INDEX; table->initialState.levels[0].ACIndex = 0; ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, initial_state->performance_levels[0].vddc, &table->initialState.levels[0].vddc); if (!ret) { u16 std_vddc; ret = si_get_std_voltage_value(adev, &table->initialState.levels[0].vddc, &std_vddc); if (!ret) si_populate_std_voltage_value(adev, std_vddc, table->initialState.levels[0].vddc.index, &table->initialState.levels[0].std_vddc); } if (eg_pi->vddci_control) si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, initial_state->performance_levels[0].vddci, &table->initialState.levels[0].vddci); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, &adev->pm.dpm.dyn_state.phase_shedding_limits_table, initial_state->performance_levels[0].vddc, initial_state->performance_levels[0].sclk, initial_state->performance_levels[0].mclk, &table->initialState.levels[0].vddc); si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd); reg = CG_R(0xffff) | CG_L(0); table->initialState.levels[0].aT = cpu_to_be32(reg); table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { table->initialState.levels[0].strobeMode = si_get_strobe_mode_settings(adev, initial_state->performance_levels[0].mclk); if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; else table->initialState.levels[0].mcFlags = 0; } table->initialState.levelCount = 1; table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table->initialState.levels[0].dpm2.MaxPS = 0; table->initialState.levels[0].dpm2.NearTDPDec = 0; table->initialState.levels[0].dpm2.AboveSafeInc = 0; table->initialState.levels[0].dpm2.BelowSafeInc = 0; table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; reg = MIN_POWER_MASK | MAX_POWER_MASK; table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); return 0; } static int si_populate_smc_acpi_state(struct amdgpu_device *adev, SISLANDS_SMC_STATETABLE *table) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; u32 dll_cntl = si_pi->clock_registers.dll_cntl; u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; u32 reg; int ret; table->ACPIState = table->initialState; table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; if (pi->acpi_vddc) { ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, pi->acpi_vddc, &table->ACPIState.levels[0].vddc); if (!ret) { u16 std_vddc; ret = si_get_std_voltage_value(adev, &table->ACPIState.levels[0].vddc, &std_vddc); if (!ret) si_populate_std_voltage_value(adev, std_vddc, table->ACPIState.levels[0].vddc.index, &table->ACPIState.levels[0].std_vddc); } table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; if (si_pi->vddc_phase_shed_control) { si_populate_phase_shedding_value(adev, &adev->pm.dpm.dyn_state.phase_shedding_limits_table, pi->acpi_vddc, 0, 0, &table->ACPIState.levels[0].vddc); } } else { ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); if (!ret) { u16 std_vddc; ret = si_get_std_voltage_value(adev, &table->ACPIState.levels[0].vddc, &std_vddc); if (!ret) si_populate_std_voltage_value(adev, std_vddc, table->ACPIState.levels[0].vddc.index, &table->ACPIState.levels[0].std_vddc); } table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, si_pi->sys_pcie_mask, si_pi->boot_pcie_gen, AMDGPU_PCIE_GEN1); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, &adev->pm.dpm.dyn_state.phase_shedding_limits_table, pi->min_vddc_in_table, 0, 0, &table->ACPIState.levels[0].vddc); } if (pi->acpi_vddc) { if (eg_pi->acpi_vddci) si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, eg_pi->acpi_vddci, &table->ACPIState.levels[0].vddci); } mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; spll_func_cntl_2 |= SCLK_MUX_SEL(4); table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl); table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); table->ACPIState.levels[0].mclk.vMPLL_SS = cpu_to_be32(si_pi->clock_registers.mpll_ss1); table->ACPIState.levels[0].mclk.vMPLL_SS2 = cpu_to_be32(si_pi->clock_registers.mpll_ss2); table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4); table->ACPIState.levels[0].mclk.mclk_value = 0; table->ACPIState.levels[0].sclk.sclk_value = 0; si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd); if (eg_pi->dynamic_ac_timing) table->ACPIState.levels[0].ACIndex = 0; table->ACPIState.levels[0].dpm2.MaxPS = 0; table->ACPIState.levels[0].dpm2.NearTDPDec = 0; table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; reg = MIN_POWER_MASK | MAX_POWER_MASK; table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); return 0; } static int si_populate_ulv_state(struct amdgpu_device *adev, SISLANDS_SMC_SWSTATE *state) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); struct si_ulv_param *ulv = &si_pi->ulv; u32 sclk_in_sr = 1350; /* ??? */ int ret; ret = si_convert_power_level_to_smc(adev, &ulv->pl, &state->levels[0]); if (!ret) { if (eg_pi->sclk_deep_sleep) { if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; else state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; } if (ulv->one_pcie_lane_in_ulv) state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); state->levels[0].ACIndex = 1; state->levels[0].std_vddc = state->levels[0].vddc; state->levelCount = 1; state->flags |= PPSMC_SWSTATE_FLAG_DC; } return ret; } static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); struct si_ulv_param *ulv = &si_pi->ulv; SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; int ret; ret = si_populate_memory_timing_parameters(adev, &ulv->pl, &arb_regs); if (ret) return ret; si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay, ulv->volt_change_delay); ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->arb_table_start + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX, (u8 *)&arb_regs, sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), si_pi->sram_end); return ret; } static void si_get_mvdd_configuration(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); pi->mvdd_split_frequency = 30000; } static int si_init_smc_table(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; const struct si_ulv_param *ulv = &si_pi->ulv; SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; int ret; u32 lane_width; u32 vr_hot_gpio; si_populate_smc_voltage_tables(adev, table); switch (adev->pm.int_thermal_type) { case THERMAL_TYPE_SI: case THERMAL_TYPE_EMC2103_WITH_INTERNAL: table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; break; case THERMAL_TYPE_NONE: table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; break; default: table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; break; } if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) { if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819)) table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; } if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; vr_hot_gpio = adev->pm.dpm.backbias_response_time; si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio, vr_hot_gpio); } ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); if (ret) return ret; ret = si_populate_smc_acpi_state(adev, table); if (ret) return ret; table->driverState = table->initialState; ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, SISLANDS_INITIAL_STATE_ARB_INDEX); if (ret) return ret; if (ulv->supported && ulv->pl.vddc) { ret = si_populate_ulv_state(adev, &table->ULVState); if (ret) return ret; ret = si_program_ulv_memory_timing_parameters(adev); if (ret) return ret; WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); lane_width = amdgpu_get_pcie_lanes(adev); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); } else { table->ULVState = table->initialState; } return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start, (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), si_pi->sram_end); } static int si_calculate_sclk_params(struct amdgpu_device *adev, u32 engine_clock, SISLANDS_SMC_SCLK_VALUE *sclk) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); struct atom_clock_dividers dividers; u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum; u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2; u64 tmp; u32 reference_clock = adev->clock.spll.reference_freq; u32 reference_divider; u32 fbdiv; int ret; ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, engine_clock, false, &dividers); if (ret) return ret; reference_divider = 1 + dividers.ref_div; tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; do_div(tmp, reference_clock); fbdiv = (u32) tmp; spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; spll_func_cntl_2 |= SCLK_MUX_SEL(2); spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); spll_func_cntl_3 |= SPLL_DITHEN; if (pi->sclk_ss) { struct amdgpu_atom_ss ss; u32 vco_freq = engine_clock * dividers.post_div; if (amdgpu_atombios_get_asic_ss_info(adev, &ss, ASIC_INTERNAL_ENGINE_SS, vco_freq)) { u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); cg_spll_spread_spectrum &= ~CLK_S_MASK; cg_spll_spread_spectrum |= CLK_S(clk_s); cg_spll_spread_spectrum |= SSEN; cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; cg_spll_spread_spectrum_2 |= CLK_V(clk_v); } } sclk->sclk_value = engine_clock; sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; return 0; } static int si_populate_sclk_value(struct amdgpu_device *adev, u32 engine_clock, SISLANDS_SMC_SCLK_VALUE *sclk) { SISLANDS_SMC_SCLK_VALUE sclk_tmp; int ret; ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp); if (!ret) { sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); } return ret; } static int si_populate_mclk_value(struct amdgpu_device *adev, u32 engine_clock, u32 memory_clock, SISLANDS_SMC_MCLK_VALUE *mclk, bool strobe_mode, bool dll_state_on) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); u32 dll_cntl = si_pi->clock_registers.dll_cntl; u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1; u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2; struct atom_mpll_param mpll_param; int ret; ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); if (ret) return ret; mpll_func_cntl &= ~BWCTRL_MASK; mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | YCLK_POST_DIV(mpll_param.post_div); } if (pi->mclk_ss) { struct amdgpu_atom_ss ss; u32 freq_nom; u32 tmp; u32 reference_clock = adev->clock.mpll.reference_freq; if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) freq_nom = memory_clock * 4; else freq_nom = memory_clock * 2; tmp = freq_nom / reference_clock; tmp = tmp * tmp; if (amdgpu_atombios_get_asic_ss_info(adev, &ss, ASIC_INTERNAL_MEMORY_SS, freq_nom)) { u32 clks = reference_clock * 5 / ss.rate; u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); mpll_ss1 &= ~CLKV_MASK; mpll_ss1 |= CLKV(clkv); mpll_ss2 &= ~CLKS_MASK; mpll_ss2 |= CLKS(clks); } } mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); if (dll_state_on) mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; else mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); mclk->mclk_value = cpu_to_be32(memory_clock); mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); return 0; } static void si_populate_smc_sp(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, SISLANDS_SMC_SWSTATE *smc_state) { struct si_ps *ps = si_get_ps(amdgpu_state); struct rv7xx_power_info *pi = rv770_get_pi(adev); int i; for (i = 0; i < ps->performance_level_count - 1; i++) smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); smc_state->levels[ps->performance_level_count - 1].bSP = cpu_to_be32(pi->psp); } static int si_convert_power_level_to_smc(struct amdgpu_device *adev, struct rv7xx_pl *pl, SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); int ret; bool dll_state_on; u16 std_vddc; bool gmc_pg = false; if (eg_pi->pcie_performance_request && (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID)) level->gen2PCIE = (u8)si_pi->force_pcie_gen; else level->gen2PCIE = (u8)pl->pcie_gen; ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); if (ret) return ret; level->mcFlags = 0; if (pi->mclk_stutter_mode_threshold && (pl->mclk <= pi->mclk_stutter_mode_threshold) && !eg_pi->uvd_enabled && (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && (adev->pm.dpm.new_active_crtc_count <= 2)) { level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; if (gmc_pg) level->mcFlags |= SISLANDS_SMC_MC_PG_EN; } if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { if (pl->mclk > pi->mclk_edc_enable_threshold) level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG; level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) { if (si_get_mclk_frequency_ratio(pl->mclk, true) >= ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; else dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; } else { dll_state_on = false; } } else { level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; } ret = si_populate_mclk_value(adev, pl->sclk, pl->mclk, &level->mclk, (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on); if (ret) return ret; ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, pl->vddc, &level->vddc); if (ret) return ret; ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc); if (ret) return ret; ret = si_populate_std_voltage_value(adev, std_vddc, level->vddc.index, &level->std_vddc); if (ret) return ret; if (eg_pi->vddci_control) { ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, pl->vddci, &level->vddci); if (ret) return ret; } if (si_pi->vddc_phase_shed_control) { ret = si_populate_phase_shedding_value(adev, &adev->pm.dpm.dyn_state.phase_shedding_limits_table, pl->vddc, pl->sclk, pl->mclk, &level->vddc); if (ret) return ret; } level->MaxPoweredUpCU = si_pi->max_cu; ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); return ret; } static int si_populate_smc_t(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, SISLANDS_SMC_SWSTATE *smc_state) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct si_ps *state = si_get_ps(amdgpu_state); u32 a_t; u32 t_l, t_h; u32 high_bsp; int i, ret; if (state->performance_level_count >= 9) return -EINVAL; if (state->performance_level_count < 2) { a_t = CG_R(0xffff) | CG_L(0); smc_state->levels[0].aT = cpu_to_be32(a_t); return 0; } smc_state->levels[0].aT = cpu_to_be32(0); for (i = 0; i <= state->performance_level_count - 2; i++) { ret = r600_calculate_at( (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1), 100 * R600_AH_DFLT, state->performance_levels[i + 1].sclk, state->performance_levels[i].sclk, &t_l, &t_h); if (ret) { t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; } a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; a_t |= CG_R(t_l * pi->bsp / 20000); smc_state->levels[i].aT = cpu_to_be32(a_t); high_bsp = (i == state->performance_level_count - 2) ? pi->pbsp : pi->bsp; a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); smc_state->levels[i + 1].aT = cpu_to_be32(a_t); } return 0; } static int si_disable_ulv(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); struct si_ulv_param *ulv = &si_pi->ulv; if (ulv->supported) return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 0 : -EINVAL; return 0; } static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state) { const struct si_power_info *si_pi = si_get_pi(adev); const struct si_ulv_param *ulv = &si_pi->ulv; const struct si_ps *state = si_get_ps(amdgpu_state); int i; if (state->performance_levels[0].mclk != ulv->pl.mclk) return false; /* XXX validate against display requirements! */ for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { if (adev->clock.current_dispclk <= adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { if (ulv->pl.vddc < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) return false; } } if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0)) return false; return true; } static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state) { const struct si_power_info *si_pi = si_get_pi(adev); const struct si_ulv_param *ulv = &si_pi->ulv; if (ulv->supported) { if (si_is_state_ulv_compatible(adev, amdgpu_new_state)) return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 0 : -EINVAL; } return 0; } static int si_convert_power_state_to_smc(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, SISLANDS_SMC_SWSTATE *smc_state) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct ni_power_info *ni_pi = ni_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); struct si_ps *state = si_get_ps(amdgpu_state); int i, ret; u32 threshold; u32 sclk_in_sr = 1350; /* ??? */ if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS) return -EINVAL; threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100; if (amdgpu_state->vclk && amdgpu_state->dclk) { eg_pi->uvd_enabled = true; if (eg_pi->smu_uvd_hs) smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD; } else { eg_pi->uvd_enabled = false; } if (state->dc_compatible) smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; smc_state->levelCount = 0; for (i = 0; i < state->performance_level_count; i++) { if (eg_pi->sclk_deep_sleep) { if ((i == 0) || si_pi->sclk_deep_sleep_above_low) { if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; else smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; } } ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i], &smc_state->levels[i]); smc_state->levels[i].arbRefreshState = (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i); if (ret) return ret; if (ni_pi->enable_power_containment) smc_state->levels[i].displayWatermark = (state->performance_levels[i].sclk < threshold) ? PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; else smc_state->levels[i].displayWatermark = (i < 2) ? PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; if (eg_pi->dynamic_ac_timing) smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; else smc_state->levels[i].ACIndex = 0; smc_state->levelCount++; } si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_watermark_threshold, threshold / 512); si_populate_smc_sp(adev, amdgpu_state, smc_state); ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state); if (ret) ni_pi->enable_power_containment = false; ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); if (ret) ni_pi->enable_sq_ramping = false; return si_populate_smc_t(adev, amdgpu_state, smc_state); } static int si_upload_sw_state(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state) { struct si_power_info *si_pi = si_get_pi(adev); struct si_ps *new_state = si_get_ps(amdgpu_new_state); int ret; u32 address = si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, driverState); u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) + ((new_state->performance_level_count - 1) * sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL)); SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; memset(smc_state, 0, state_size); ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); if (ret) return ret; return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, state_size, si_pi->sram_end); } static int si_upload_ulv_state(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); struct si_ulv_param *ulv = &si_pi->ulv; int ret = 0; if (ulv->supported && ulv->pl.vddc) { u32 address = si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, ULVState); SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState; u32 state_size = sizeof(SISLANDS_SMC_SWSTATE); memset(smc_state, 0, state_size); ret = si_populate_ulv_state(adev, smc_state); if (!ret) ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, state_size, si_pi->sram_end); } return ret; } static int si_upload_smc_data(struct amdgpu_device *adev) { struct amdgpu_crtc *amdgpu_crtc = NULL; int i; if (adev->pm.dpm.new_active_crtc_count == 0) return 0; for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->pm.dpm.new_active_crtcs & (1 << i)) { amdgpu_crtc = adev->mode_info.crtcs[i]; break; } } if (amdgpu_crtc == NULL) return 0; if (amdgpu_crtc->line_time <= 0) return 0; if (si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_crtc_index, amdgpu_crtc->crtc_id) != PPSMC_Result_OK) return 0; if (si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) return 0; if (si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) return 0; return 0; } static int si_set_mc_special_registers(struct amdgpu_device *adev, struct si_mc_reg_table *table) { u8 i, j, k; u32 temp_reg; for (i = 0, j = table->last; i < table->last; i++) { if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; switch (table->mc_reg_address[i].s1) { case MC_SEQ_MISC1: temp_reg = RREG32(MC_PMG_CMD_EMRS); table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; for (k = 0; k < table->num_entries; k++) table->mc_reg_table_entry[k].mc_data[j] = ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); j++; if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; temp_reg = RREG32(MC_PMG_CMD_MRS); table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; for (k = 0; k < table->num_entries; k++) { table->mc_reg_table_entry[k].mc_data[j] = (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; for (k = 0; k < table->num_entries; k++) table->mc_reg_table_entry[k].mc_data[j] = (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; j++; if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; } break; case MC_SEQ_RESERVE_M: temp_reg = RREG32(MC_PMG_CMD_MRS1); table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; for(k = 0; k < table->num_entries; k++) table->mc_reg_table_entry[k].mc_data[j] = (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); j++; if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; break; default: break; } } table->last = j; return 0; } static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) { bool result = true; switch (in_reg) { case MC_SEQ_RAS_TIMING: *out_reg = MC_SEQ_RAS_TIMING_LP; break; case MC_SEQ_CAS_TIMING: *out_reg = MC_SEQ_CAS_TIMING_LP; break; case MC_SEQ_MISC_TIMING: *out_reg = MC_SEQ_MISC_TIMING_LP; break; case MC_SEQ_MISC_TIMING2: *out_reg = MC_SEQ_MISC_TIMING2_LP; break; case MC_SEQ_RD_CTL_D0: *out_reg = MC_SEQ_RD_CTL_D0_LP; break; case MC_SEQ_RD_CTL_D1: *out_reg = MC_SEQ_RD_CTL_D1_LP; break; case MC_SEQ_WR_CTL_D0: *out_reg = MC_SEQ_WR_CTL_D0_LP; break; case MC_SEQ_WR_CTL_D1: *out_reg = MC_SEQ_WR_CTL_D1_LP; break; case MC_PMG_CMD_EMRS: *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; break; case MC_PMG_CMD_MRS: *out_reg = MC_SEQ_PMG_CMD_MRS_LP; break; case MC_PMG_CMD_MRS1: *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; break; case MC_SEQ_PMG_TIMING: *out_reg = MC_SEQ_PMG_TIMING_LP; break; case MC_PMG_CMD_MRS2: *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; break; case MC_SEQ_WR_CTL_2: *out_reg = MC_SEQ_WR_CTL_2_LP; break; default: result = false; break; } return result; } static void si_set_valid_flag(struct si_mc_reg_table *table) { u8 i, j; for (i = 0; i < table->last; i++) { for (j = 1; j < table->num_entries; j++) { if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { table->valid_flag |= 1 << i; break; } } } } static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) { u32 i; u16 address; for (i = 0; i < table->last; i++) table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? address : table->mc_reg_address[i].s1; } static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, struct si_mc_reg_table *si_table) { u8 i, j; if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (table->num_entries > MAX_AC_TIMING_ENTRIES) return -EINVAL; for (i = 0; i < table->last; i++) si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; si_table->last = table->last; for (i = 0; i < table->num_entries; i++) { si_table->mc_reg_table_entry[i].mclk_max = table->mc_reg_table_entry[i].mclk_max; for (j = 0; j < table->last; j++) { si_table->mc_reg_table_entry[i].mc_data[j] = table->mc_reg_table_entry[i].mc_data[j]; } } si_table->num_entries = table->num_entries; return 0; } static int si_initialize_mc_reg_table(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); struct atom_mc_reg_table *table; struct si_mc_reg_table *si_table = &si_pi->mc_reg_table; u8 module_index = rv770_get_memory_module_index(adev); int ret; table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); if (!table) return -ENOMEM; WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); if (ret) goto init_mc_done; ret = si_copy_vbios_mc_reg_table(table, si_table); if (ret) goto init_mc_done; si_set_s0_mc_reg_index(si_table); ret = si_set_mc_special_registers(adev, si_table); if (ret) goto init_mc_done; si_set_valid_flag(si_table); init_mc_done: kfree(table); return ret; } static void si_populate_mc_reg_addresses(struct amdgpu_device *adev, SMC_SIslands_MCRegisters *mc_reg_table) { struct si_power_info *si_pi = si_get_pi(adev); u32 i, j; for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { if (si_pi->mc_reg_table.valid_flag & (1 << j)) { if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) break; mc_reg_table->address[i].s0 = cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); mc_reg_table->address[i].s1 = cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1); i++; } } mc_reg_table->last = (u8)i; } static void si_convert_mc_registers(const struct si_mc_reg_entry *entry, SMC_SIslands_MCRegisterSet *data, u32 num_entries, u32 valid_flag) { u32 i, j; for(i = 0, j = 0; j < num_entries; j++) { if (valid_flag & (1 << j)) { data->value[i] = cpu_to_be32(entry->mc_data[j]); i++; } } } static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, struct rv7xx_pl *pl, SMC_SIslands_MCRegisterSet *mc_reg_table_data) { struct si_power_info *si_pi = si_get_pi(adev); u32 i = 0; for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) { if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) break; } if ((i == si_pi->mc_reg_table.num_entries) && (i > 0)) --i; si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i], mc_reg_table_data, si_pi->mc_reg_table.last, si_pi->mc_reg_table.valid_flag); } static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state, SMC_SIslands_MCRegisters *mc_reg_table) { struct si_ps *state = si_get_ps(amdgpu_state); int i; for (i = 0; i < state->performance_level_count; i++) { si_convert_mc_reg_table_entry_to_smc(adev, &state->performance_levels[i], &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); } } static int si_populate_mc_reg_table(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_boot_state) { struct si_ps *boot_state = si_get_ps(amdgpu_boot_state); struct si_power_info *si_pi = si_get_pi(adev); struct si_ulv_param *ulv = &si_pi->ulv; SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1); si_populate_mc_reg_addresses(adev, smc_mc_reg_table); si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0], &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]); si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT], si_pi->mc_reg_table.last, si_pi->mc_reg_table.valid_flag); if (ulv->supported && ulv->pl.vddc != 0) si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]); else si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT], si_pi->mc_reg_table.last, si_pi->mc_reg_table.valid_flag); si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table); return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start, (u8 *)smc_mc_reg_table, sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end); } static int si_upload_mc_reg_table(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state) { struct si_ps *new_state = si_get_ps(amdgpu_new_state); struct si_power_info *si_pi = si_get_pi(adev); u32 address = si_pi->mc_reg_table_start + offsetof(SMC_SIslands_MCRegisters, data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, si_pi->sram_end); } static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) { if (enable) WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); else WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); } static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_state) { struct si_ps *state = si_get_ps(amdgpu_state); int i; u16 pcie_speed, max_speed = 0; for (i = 0; i < state->performance_level_count; i++) { pcie_speed = state->performance_levels[i].pcie_gen; if (max_speed < pcie_speed) max_speed = pcie_speed; } return max_speed; } static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) { u32 speed_cntl; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; return (u16)speed_cntl; } static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state, struct amdgpu_ps *amdgpu_current_state) { struct si_power_info *si_pi = si_get_pi(adev); enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); enum amdgpu_pcie_gen current_link_speed; if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); else current_link_speed = si_pi->force_pcie_gen; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; si_pi->pspp_notify_required = false; if (target_link_speed > current_link_speed) { switch (target_link_speed) { #if defined(CONFIG_ACPI) case AMDGPU_PCIE_GEN3: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) break; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; if (current_link_speed == AMDGPU_PCIE_GEN2) break; case AMDGPU_PCIE_GEN2: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; #endif default: si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); break; } } else { if (target_link_speed < current_link_speed) si_pi->pspp_notify_required = true; } } static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state, struct amdgpu_ps *amdgpu_current_state) { struct si_power_info *si_pi = si_get_pi(adev); enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); u8 request; if (si_pi->pspp_notify_required) { if (target_link_speed == AMDGPU_PCIE_GEN3) request = PCIE_PERF_REQ_PECI_GEN3; else if (target_link_speed == AMDGPU_PCIE_GEN2) request = PCIE_PERF_REQ_PECI_GEN2; else request = PCIE_PERF_REQ_PECI_GEN1; if ((request == PCIE_PERF_REQ_PECI_GEN1) && (si_get_current_pcie_speed(adev) > 0)) return; #if defined(CONFIG_ACPI) amdgpu_acpi_pcie_performance_request(adev, request, false); #endif } } #if 0 static int si_ds_request(struct amdgpu_device *adev, bool ds_status_on, u32 count_write) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); if (eg_pi->sclk_deep_sleep) { if (ds_status_on) return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) == PPSMC_Result_OK) ? 0 : -EINVAL; else return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) == PPSMC_Result_OK) ? 0 : -EINVAL; } return 0; } #endif static void si_set_max_cu_value(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); if (adev->asic_type == CHIP_VERDE) { switch (adev->pdev->device) { case 0x6820: case 0x6825: case 0x6821: case 0x6823: case 0x6827: si_pi->max_cu = 10; break; case 0x682D: case 0x6824: case 0x682F: case 0x6826: si_pi->max_cu = 8; break; case 0x6828: case 0x6830: case 0x6831: case 0x6838: case 0x6839: case 0x683D: si_pi->max_cu = 10; break; case 0x683B: case 0x683F: case 0x6829: si_pi->max_cu = 8; break; default: si_pi->max_cu = 0; break; } } else { si_pi->max_cu = 0; } } static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev, struct amdgpu_clock_voltage_dependency_table *table) { u32 i; int j; u16 leakage_voltage; if (table) { for (i = 0; i < table->count; i++) { switch (si_get_leakage_voltage_from_leakage_index(adev, table->entries[i].v, &leakage_voltage)) { case 0: table->entries[i].v = leakage_voltage; break; case -EAGAIN: return -EINVAL; case -EINVAL: default: break; } } for (j = (table->count - 2); j >= 0; j--) { table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? table->entries[j].v : table->entries[j + 1].v; } } return 0; } static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev) { int ret = 0; ret = si_patch_single_dependency_table_based_on_leakage(adev, &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); if (ret) DRM_ERROR("Could not patch vddc_on_sclk leakage table\n"); ret = si_patch_single_dependency_table_based_on_leakage(adev, &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); if (ret) DRM_ERROR("Could not patch vddc_on_mclk leakage table\n"); ret = si_patch_single_dependency_table_based_on_leakage(adev, &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); if (ret) DRM_ERROR("Could not patch vddci_on_mclk leakage table\n"); return ret; } static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, struct amdgpu_ps *amdgpu_new_state, struct amdgpu_ps *amdgpu_current_state) { u32 lane_width; u32 new_lane_width = (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; u32 current_lane_width = (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; if (new_lane_width != current_lane_width) { amdgpu_set_pcie_lanes(adev, new_lane_width); lane_width = amdgpu_get_pcie_lanes(adev); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); } } static void si_dpm_setup_asic(struct amdgpu_device *adev) { si_read_clock_registers(adev); si_enable_acpi_power_management(adev); } static int si_thermal_enable_alert(struct amdgpu_device *adev, bool enable) { u32 thermal_int = RREG32(CG_THERMAL_INT); if (enable) { PPSMC_Result result; thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); WREG32(CG_THERMAL_INT, thermal_int); result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); if (result != PPSMC_Result_OK) { DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); return -EINVAL; } } else { thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; WREG32(CG_THERMAL_INT, thermal_int); } return 0; } static int si_thermal_set_temperature_range(struct amdgpu_device *adev, int min_temp, int max_temp) { int low_temp = 0 * 1000; int high_temp = 255 * 1000; if (low_temp < min_temp) low_temp = min_temp; if (high_temp > max_temp) high_temp = max_temp; if (high_temp < low_temp) { DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); return -EINVAL; } WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); adev->pm.dpm.thermal.min_temp = low_temp; adev->pm.dpm.thermal.max_temp = high_temp; return 0; } static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; if (si_pi->fan_ctrl_is_in_default_mode) { tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; si_pi->fan_ctrl_default_mode = tmp; tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; si_pi->t_min = tmp; si_pi->fan_ctrl_is_in_default_mode = false; } tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; tmp |= TMIN(0); WREG32(CG_FDO_CTRL2, tmp); tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; tmp |= FDO_PWM_MODE(mode); WREG32(CG_FDO_CTRL2, tmp); } static int si_thermal_setup_fan_table(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; u32 duty100; u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; u16 fdo_min, slope1, slope2; u32 reference_clock, tmp; int ret; u64 tmp64; if (!si_pi->fan_table_start) { adev->pm.dpm.fan.ucode_fan_control = false; return 0; } duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; if (duty100 == 0) { adev->pm.dpm.fan.ucode_fan_control = false; return 0; } tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; do_div(tmp64, 10000); fdo_min = (u16)tmp64; t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); fan_table.slope1 = cpu_to_be16(slope1); fan_table.slope2 = cpu_to_be16(slope2); fan_table.fdo_min = cpu_to_be16(fdo_min); fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); fan_table.hys_up = cpu_to_be16(1); fan_table.hys_slope = cpu_to_be16(1); fan_table.temp_resp_lim = cpu_to_be16(5); reference_clock = amdgpu_asic_get_xclk(adev); fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * reference_clock) / 1600); fan_table.fdo_max = cpu_to_be16((u16)duty100); tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; fan_table.temp_src = (uint8_t)tmp; ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->fan_table_start, (u8 *)(&fan_table), sizeof(fan_table), si_pi->sram_end); if (ret) { DRM_ERROR("Failed to load fan table to the SMC."); adev->pm.dpm.fan.ucode_fan_control = false; } return ret; } static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); PPSMC_Result ret; ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl); if (ret == PPSMC_Result_OK) { si_pi->fan_is_controlled_by_smc = true; return 0; } else { return -EINVAL; } } static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); PPSMC_Result ret; ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl); if (ret == PPSMC_Result_OK) { si_pi->fan_is_controlled_by_smc = false; return 0; } else { return -EINVAL; } } static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev, u32 *speed) { u32 duty, duty100; u64 tmp64; if (adev->pm.no_fan) return -ENOENT; duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; if (duty100 == 0) return -EINVAL; tmp64 = (u64)duty * 100; do_div(tmp64, duty100); *speed = (u32)tmp64; if (*speed > 100) *speed = 100; return 0; } static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev, u32 speed) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; u32 duty, duty100; u64 tmp64; if (adev->pm.no_fan) return -ENOENT; if (si_pi->fan_is_controlled_by_smc) return -EINVAL; if (speed > 100) return -EINVAL; duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; if (duty100 == 0) return -EINVAL; tmp64 = (u64)speed * duty100; do_div(tmp64, 100); duty = (u32)tmp64; tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; tmp |= FDO_STATIC_DUTY(duty); WREG32(CG_FDO_CTRL0, tmp); return 0; } static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) { if (mode) { /* stop auto-manage */ if (adev->pm.dpm.fan.ucode_fan_control) si_fan_ctrl_stop_smc_fan_control(adev); si_fan_ctrl_set_static_mode(adev, mode); } else { /* restart auto-manage */ if (adev->pm.dpm.fan.ucode_fan_control) si_thermal_start_smc_fan_control(adev); else si_fan_ctrl_set_default_mode(adev); } } static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; if (si_pi->fan_is_controlled_by_smc) return 0; tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; return (tmp >> FDO_PWM_MODE_SHIFT); } #if 0 static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, u32 *speed) { u32 tach_period; u32 xclk = amdgpu_asic_get_xclk(adev); if (adev->pm.no_fan) return -ENOENT; if (adev->pm.fan_pulses_per_revolution == 0) return -ENOENT; tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; if (tach_period == 0) return -ENOENT; *speed = 60 * xclk * 10000 / tach_period; return 0; } static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, u32 speed) { u32 tach_period, tmp; u32 xclk = amdgpu_asic_get_xclk(adev); if (adev->pm.no_fan) return -ENOENT; if (adev->pm.fan_pulses_per_revolution == 0) return -ENOENT; if ((speed < adev->pm.fan_min_rpm) || (speed > adev->pm.fan_max_rpm)) return -EINVAL; if (adev->pm.dpm.fan.ucode_fan_control) si_fan_ctrl_stop_smc_fan_control(adev); tach_period = 60 * xclk * 10000 / (8 * speed); tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; tmp |= TARGET_PERIOD(tach_period); WREG32(CG_TACH_CTRL, tmp); si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); return 0; } #endif static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; if (!si_pi->fan_ctrl_is_in_default_mode) { tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); WREG32(CG_FDO_CTRL2, tmp); tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; tmp |= TMIN(si_pi->t_min); WREG32(CG_FDO_CTRL2, tmp); si_pi->fan_ctrl_is_in_default_mode = true; } } static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev) { if (adev->pm.dpm.fan.ucode_fan_control) { si_fan_ctrl_start_smc_fan_control(adev); si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); } } static void si_thermal_initialize(struct amdgpu_device *adev) { u32 tmp; if (adev->pm.fan_pulses_per_revolution) { tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); WREG32(CG_TACH_CTRL, tmp); } tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; tmp |= TACH_PWM_RESP_RATE(0x28); WREG32(CG_FDO_CTRL2, tmp); } static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) { int ret; si_thermal_initialize(adev); ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); if (ret) return ret; ret = si_thermal_enable_alert(adev, true); if (ret) return ret; if (adev->pm.dpm.fan.ucode_fan_control) { ret = si_halt_smc(adev); if (ret) return ret; ret = si_thermal_setup_fan_table(adev); if (ret) return ret; ret = si_resume_smc(adev); if (ret) return ret; si_thermal_start_smc_fan_control(adev); } return 0; } static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev) { if (!adev->pm.no_fan) { si_fan_ctrl_set_default_mode(adev); si_fan_ctrl_stop_smc_fan_control(adev); } } static int si_dpm_enable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; int ret; if (amdgpu_si_is_smc_running(adev)) return -EINVAL; if (pi->voltage_control || si_pi->voltage_control_svi2) si_enable_voltage_control(adev, true); if (pi->mvdd_control) si_get_mvdd_configuration(adev); if (pi->voltage_control || si_pi->voltage_control_svi2) { ret = si_construct_voltage_tables(adev); if (ret) { DRM_ERROR("si_construct_voltage_tables failed\n"); return ret; } } if (eg_pi->dynamic_ac_timing) { ret = si_initialize_mc_reg_table(adev); if (ret) eg_pi->dynamic_ac_timing = false; } if (pi->dynamic_ss) si_enable_spread_spectrum(adev, true); if (pi->thermal_protection) si_enable_thermal_protection(adev, true); si_setup_bsp(adev); si_program_git(adev); si_program_tp(adev); si_program_tpp(adev); si_program_sstp(adev); si_enable_display_gap(adev); si_program_vc(adev); ret = si_upload_firmware(adev); if (ret) { DRM_ERROR("si_upload_firmware failed\n"); return ret; } ret = si_process_firmware_header(adev); if (ret) { DRM_ERROR("si_process_firmware_header failed\n"); return ret; } ret = si_initial_switch_from_arb_f0_to_f1(adev); if (ret) { DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n"); return ret; } ret = si_init_smc_table(adev); if (ret) { DRM_ERROR("si_init_smc_table failed\n"); return ret; } ret = si_init_smc_spll_table(adev); if (ret) { DRM_ERROR("si_init_smc_spll_table failed\n"); return ret; } ret = si_init_arb_table_index(adev); if (ret) { DRM_ERROR("si_init_arb_table_index failed\n"); return ret; } if (eg_pi->dynamic_ac_timing) { ret = si_populate_mc_reg_table(adev, boot_ps); if (ret) { DRM_ERROR("si_populate_mc_reg_table failed\n"); return ret; } } ret = si_initialize_smc_cac_tables(adev); if (ret) { DRM_ERROR("si_initialize_smc_cac_tables failed\n"); return ret; } ret = si_initialize_hardware_cac_manager(adev); if (ret) { DRM_ERROR("si_initialize_hardware_cac_manager failed\n"); return ret; } ret = si_initialize_smc_dte_tables(adev); if (ret) { DRM_ERROR("si_initialize_smc_dte_tables failed\n"); return ret; } ret = si_populate_smc_tdp_limits(adev, boot_ps); if (ret) { DRM_ERROR("si_populate_smc_tdp_limits failed\n"); return ret; } ret = si_populate_smc_tdp_limits_2(adev, boot_ps); if (ret) { DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n"); return ret; } si_program_response_times(adev); si_program_ds_registers(adev); si_dpm_start_smc(adev); ret = si_notify_smc_display_change(adev, false); if (ret) { DRM_ERROR("si_notify_smc_display_change failed\n"); return ret; } si_enable_sclk_control(adev, true); si_start_dpm(adev); si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); ni_update_current_ps(adev, boot_ps); return 0; } static int si_set_temperature_range(struct amdgpu_device *adev) { int ret; ret = si_thermal_enable_alert(adev, false); if (ret) return ret; ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); if (ret) return ret; ret = si_thermal_enable_alert(adev, true); if (ret) return ret; return ret; } static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; if (!amdgpu_si_is_smc_running(adev)) return; si_thermal_stop_thermal_controller(adev); si_disable_ulv(adev); si_clear_vc(adev); if (pi->thermal_protection) si_enable_thermal_protection(adev, false); si_enable_power_containment(adev, boot_ps, false); si_enable_smc_cac(adev, boot_ps, false); si_enable_spread_spectrum(adev, false); si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); si_stop_dpm(adev); si_reset_to_default(adev); si_dpm_stop_smc(adev); si_force_switch_to_arb_f0(adev); ni_update_current_ps(adev, boot_ps); } static int si_dpm_pre_set_power_state(struct amdgpu_device *adev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; struct amdgpu_ps *new_ps = &requested_ps; ni_update_requested_ps(adev, new_ps); si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); return 0; } static int si_power_control_set_level(struct amdgpu_device *adev) { struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps; int ret; ret = si_restrict_performance_levels_before_switch(adev); if (ret) return ret; ret = si_halt_smc(adev); if (ret) return ret; ret = si_populate_smc_tdp_limits(adev, new_ps); if (ret) return ret; ret = si_populate_smc_tdp_limits_2(adev, new_ps); if (ret) return ret; ret = si_resume_smc(adev); if (ret) return ret; ret = si_set_sw_state(adev); if (ret) return ret; return 0; } static int si_dpm_set_power_state(struct amdgpu_device *adev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *new_ps = &eg_pi->requested_rps; struct amdgpu_ps *old_ps = &eg_pi->current_rps; int ret; ret = si_disable_ulv(adev); if (ret) { DRM_ERROR("si_disable_ulv failed\n"); return ret; } ret = si_restrict_performance_levels_before_switch(adev); if (ret) { DRM_ERROR("si_restrict_performance_levels_before_switch failed\n"); return ret; } if (eg_pi->pcie_performance_request) si_request_link_speed_change_before_state_change(adev, new_ps, old_ps); ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps); ret = si_enable_power_containment(adev, new_ps, false); if (ret) { DRM_ERROR("si_enable_power_containment failed\n"); return ret; } ret = si_enable_smc_cac(adev, new_ps, false); if (ret) { DRM_ERROR("si_enable_smc_cac failed\n"); return ret; } ret = si_halt_smc(adev); if (ret) { DRM_ERROR("si_halt_smc failed\n"); return ret; } ret = si_upload_sw_state(adev, new_ps); if (ret) { DRM_ERROR("si_upload_sw_state failed\n"); return ret; } ret = si_upload_smc_data(adev); if (ret) { DRM_ERROR("si_upload_smc_data failed\n"); return ret; } ret = si_upload_ulv_state(adev); if (ret) { DRM_ERROR("si_upload_ulv_state failed\n"); return ret; } if (eg_pi->dynamic_ac_timing) { ret = si_upload_mc_reg_table(adev, new_ps); if (ret) { DRM_ERROR("si_upload_mc_reg_table failed\n"); return ret; } } ret = si_program_memory_timing_parameters(adev, new_ps); if (ret) { DRM_ERROR("si_program_memory_timing_parameters failed\n"); return ret; } si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps); ret = si_resume_smc(adev); if (ret) { DRM_ERROR("si_resume_smc failed\n"); return ret; } ret = si_set_sw_state(adev); if (ret) { DRM_ERROR("si_set_sw_state failed\n"); return ret; } ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps); if (eg_pi->pcie_performance_request) si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps); if (ret) { DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n"); return ret; } ret = si_enable_smc_cac(adev, new_ps, true); if (ret) { DRM_ERROR("si_enable_smc_cac failed\n"); return ret; } ret = si_enable_power_containment(adev, new_ps, true); if (ret) { DRM_ERROR("si_enable_power_containment failed\n"); return ret; } ret = si_power_control_set_level(adev); if (ret) { DRM_ERROR("si_power_control_set_level failed\n"); return ret; } return 0; } static void si_dpm_post_set_power_state(struct amdgpu_device *adev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *new_ps = &eg_pi->requested_rps; ni_update_current_ps(adev, new_ps); } #if 0 void si_dpm_reset_asic(struct amdgpu_device *adev) { si_restrict_performance_levels_before_switch(adev); si_disable_ulv(adev); si_set_boot_state(adev); } #endif static void si_dpm_display_configuration_changed(struct amdgpu_device *adev) { si_program_display_gap(adev); } static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev, struct amdgpu_ps *rps, struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, u8 table_rev) { rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); rps->class = le16_to_cpu(non_clock_info->usClassification); rps->class2 = le16_to_cpu(non_clock_info->usClassification2); if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); } else if (r600_is_uvd_state(rps->class, rps->class2)) { rps->vclk = RV770_DEFAULT_VCLK_FREQ; rps->dclk = RV770_DEFAULT_DCLK_FREQ; } else { rps->vclk = 0; rps->dclk = 0; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) adev->pm.dpm.boot_ps = rps; if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) adev->pm.dpm.uvd_ps = rps; } static void si_parse_pplib_clock_info(struct amdgpu_device *adev, struct amdgpu_ps *rps, int index, union pplib_clock_info *clock_info) { struct rv7xx_power_info *pi = rv770_get_pi(adev); struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); struct si_ps *ps = si_get_ps(rps); u16 leakage_voltage; struct rv7xx_pl *pl = &ps->performance_levels[index]; int ret; ps->performance_level_count = index + 1; pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); pl->sclk |= clock_info->si.ucEngineClockHigh << 16; pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl->flags = le32_to_cpu(clock_info->si.ulFlags); pl->pcie_gen = r600_get_pcie_gen_support(adev, si_pi->sys_pcie_mask, si_pi->boot_pcie_gen, clock_info->si.ucPCIEGen); /* patch up vddc if necessary */ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, &leakage_voltage); if (ret == 0) pl->vddc = leakage_voltage; if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { pi->acpi_vddc = pl->vddc; eg_pi->acpi_vddci = pl->vddci; si_pi->acpi_pcie_gen = pl->pcie_gen; } if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && index == 0) { /* XXX disable for A0 tahiti */ si_pi->ulv.supported = false; si_pi->ulv.pl = *pl; si_pi->ulv.one_pcie_lane_in_ulv = false; si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT; si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT; } if (pi->min_vddc_in_table > pl->vddc) pi->min_vddc_in_table = pl->vddc; if (pi->max_vddc_in_table < pl->vddc) pi->max_vddc_in_table = pl->vddc; /* patch up boot state */ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { u16 vddc, vddci, mvdd; amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd); pl->mclk = adev->clock.default_mclk; pl->sclk = adev->clock.default_sclk; pl->vddc = vddc; pl->vddci = vddci; si_pi->mvdd_bootup_value = mvdd; } if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; } } union pplib_power_state { struct _ATOM_PPLIB_STATE v1; struct _ATOM_PPLIB_STATE_V2 v2; }; static int si_parse_power_table(struct amdgpu_device *adev) { struct amdgpu_mode_info *mode_info = &adev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j, k, non_clock_array_index, clock_array_index; union pplib_clock_info *clock_info; struct _StateArray *state_array; struct _ClockInfoArray *clock_info_array; struct _NonClockInfoArray *non_clock_info_array; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; u8 *power_state_offset; struct si_ps *ps; if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return -EINVAL; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); amdgpu_add_thermal_controller(adev); state_array = (struct _StateArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset)); clock_info_array = (struct _ClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); non_clock_info_array = (struct _NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * state_array->ucNumEntries, GFP_KERNEL); if (!adev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); if (ps == NULL) { kfree(adev->pm.dpm.ps); return -ENOMEM; } adev->pm.dpm.ps[i].ps_priv = ps; si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], non_clock_info, non_clock_info_array->ucEntrySize); k = 0; idx = (u8 *)&power_state->v2.clockInfoIndex[0]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { clock_array_index = idx[j]; if (clock_array_index >= clock_info_array->ucNumEntries) continue; if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) break; clock_info = (union pplib_clock_info *) ((u8 *)&clock_info_array->clockInfo[0] + (clock_array_index * clock_info_array->ucEntrySize)); si_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i], k, clock_info); k++; } power_state_offset += 2 + power_state->v2.ucNumDPMLevels; } adev->pm.dpm.num_ps = state_array->ucNumEntries; /* fill in the vce power states */ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { u32 sclk, mclk; clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; clock_info = (union pplib_clock_info *) &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; sclk = le16_to_cpu(clock_info->si.usEngineClockLow); sclk |= clock_info->si.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); mclk |= clock_info->si.ucMemoryClockHigh << 16; adev->pm.dpm.vce_states[i].sclk = sclk; adev->pm.dpm.vce_states[i].mclk = mclk; } return 0; } static int si_dpm_init(struct amdgpu_device *adev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; struct si_power_info *si_pi; struct atom_clock_dividers dividers; int ret; u32 mask; si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); if (si_pi == NULL) return -ENOMEM; adev->pm.dpm.priv = si_pi; ni_pi = &si_pi->ni; eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); if (ret) si_pi->sys_pcie_mask = 0; else si_pi->sys_pcie_mask = mask; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); si_set_max_cu_value(adev); rv770_get_max_vddc(adev); si_get_leakage_vddc(adev); si_patch_dependency_tables_based_on_leakage(adev); pi->acpi_vddc = 0; eg_pi->acpi_vddci = 0; pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; ret = amdgpu_get_platform_caps(adev); if (ret) return ret; ret = amdgpu_parse_extended_power_table(adev); if (ret) return ret; ret = si_parse_power_table(adev); if (ret) return ret; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { amdgpu_free_extended_power_table(adev); return -ENOMEM; } adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; if (adev->pm.dpm.voltage_response_time == 0) adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; if (adev->pm.dpm.backbias_response_time == 0) adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 0, false, &dividers); if (ret) pi->ref_div = dividers.ref_div + 1; else pi->ref_div = R600_REFERENCEDIVIDER_DFLT; eg_pi->smu_uvd_hs = false; pi->mclk_strobe_mode_threshold = 40000; if (si_is_special_1gb_platform(adev)) pi->mclk_stutter_mode_threshold = 0; else pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold; pi->mclk_edc_enable_threshold = 40000; eg_pi->mclk_edc_wr_enable_threshold = 40000; ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; pi->voltage_control = amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT); if (!pi->voltage_control) { si_pi->voltage_control_svi2 = amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_SVID2); if (si_pi->voltage_control_svi2) amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); } pi->mvdd_control = amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT); eg_pi->vddci_control = amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT); if (!eg_pi->vddci_control) si_pi->vddci_control_svi2 = amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_SVID2); si_pi->vddc_phase_shed_control = amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); rv770_get_engine_memory_ss(adev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; pi->vrc = SISLANDS_VRC_DFLT; pi->gfx_clock_gating = true; eg_pi->sclk_deep_sleep = true; si_pi->sclk_deep_sleep_above_low = false; if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; eg_pi->dynamic_ac_timing = true; eg_pi->light_sleep = true; #if defined(CONFIG_ACPI) eg_pi->pcie_performance_request = amdgpu_acpi_is_pcie_performance_request_supported(adev); #else eg_pi->pcie_performance_request = false; #endif si_pi->sram_end = SMC_RAM_END; adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; si_initialize_powertune_defaults(adev); /* make sure dc limits are valid */ if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; si_pi->fan_ctrl_is_in_default_mode = true; return 0; } static void si_dpm_fini(struct amdgpu_device *adev) { int i; if (adev->pm.dpm.ps) for (i = 0; i < adev->pm.dpm.num_ps; i++) kfree(adev->pm.dpm.ps[i].ps_priv); kfree(adev->pm.dpm.ps); kfree(adev->pm.dpm.priv); kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); amdgpu_free_extended_power_table(adev); } static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *rps = &eg_pi->current_rps; struct si_ps *ps = si_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> CURRENT_STATE_INDEX_SHIFT; if (current_index >= ps->performance_level_count) { seq_printf(m, "invalid dpm profile %d\n", current_index); } else { pl = &ps->performance_levels[current_index]; seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } } static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) { u32 cg_thermal_int; switch (type) { case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); cg_thermal_int |= THERM_INT_MASK_HIGH; WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); cg_thermal_int &= ~THERM_INT_MASK_HIGH; WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); break; default: break; } break; case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); cg_thermal_int |= THERM_INT_MASK_LOW; WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); cg_thermal_int &= ~THERM_INT_MASK_LOW; WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); break; default: break; } break; default: break; } return 0; } static int si_dpm_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { bool queue_thermal = false; if (entry == NULL) return -EINVAL; switch (entry->src_id) { case 230: /* thermal low to high */ DRM_DEBUG("IH: thermal low to high\n"); adev->pm.dpm.thermal.high_to_low = false; queue_thermal = true; break; case 231: /* thermal high to low */ DRM_DEBUG("IH: thermal high to low\n"); adev->pm.dpm.thermal.high_to_low = true; queue_thermal = true; break; default: break; } if (queue_thermal) schedule_work(&adev->pm.dpm.thermal.work); return 0; } static int si_dpm_late_init(void *handle) { int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (!amdgpu_dpm) return 0; /* init the sysfs and debugfs files late */ ret = amdgpu_pm_sysfs_init(adev); if (ret) return ret; ret = si_set_temperature_range(adev); if (ret) return ret; #if 0 //TODO ? si_dpm_powergate_uvd(adev, true); #endif return 0; } /** * si_dpm_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure. */ static int si_dpm_init_microcode(struct amdgpu_device *adev) { const char *chip_name; char fw_name[30]; int err; DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_TAHITI: chip_name = "tahiti"; break; case CHIP_PITCAIRN: if ((adev->pdev->revision == 0x81) || (adev->pdev->device == 0x6810) || (adev->pdev->device == 0x6811) || (adev->pdev->device == 0x6816) || (adev->pdev->device == 0x6817) || (adev->pdev->device == 0x6806)) chip_name = "pitcairn_k"; else chip_name = "pitcairn"; break; case CHIP_VERDE: if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0x87) || (adev->pdev->device == 0x6820) || (adev->pdev->device == 0x6821) || (adev->pdev->device == 0x6822) || (adev->pdev->device == 0x6823) || (adev->pdev->device == 0x682A) || (adev->pdev->device == 0x682B)) chip_name = "verde_k"; else chip_name = "verde"; break; case CHIP_OLAND: if ((adev->pdev->revision == 0xC7) || (adev->pdev->revision == 0x80) || (adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0x87) || (adev->pdev->device == 0x6604) || (adev->pdev->device == 0x6605)) chip_name = "oland_k"; else chip_name = "oland"; break; case CHIP_HAINAN: if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0xC3) || (adev->pdev->device == 0x6664) || (adev->pdev->device == 0x6665) || (adev->pdev->device == 0x6667)) chip_name = "hainan_k"; else chip_name = "hainan"; break; default: BUG(); } snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&adev->pm.fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->pm.fw); out: if (err) { DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", err, fw_name); release_firmware(adev->pm.fw); adev->pm.fw = NULL; } return err; } static int si_dpm_sw_init(void *handle) { int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); if (ret) return ret; ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); if (ret) return ret; /* default to balanced state */ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; adev->pm.default_sclk = adev->clock.default_sclk; adev->pm.default_mclk = adev->clock.default_mclk; adev->pm.current_sclk = adev->clock.default_sclk; adev->pm.current_mclk = adev->clock.default_mclk; adev->pm.int_thermal_type = THERMAL_TYPE_NONE; if (amdgpu_dpm == 0) return 0; ret = si_dpm_init_microcode(adev); if (ret) return ret; INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); mutex_lock(&adev->pm.mutex); ret = si_dpm_init(adev); if (ret) goto dpm_failed; adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); return 0; dpm_failed: si_dpm_fini(adev); mutex_unlock(&adev->pm.mutex); DRM_ERROR("amdgpu: dpm initialization failed\n"); return ret; } static int si_dpm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; flush_work(&adev->pm.dpm.thermal.work); mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); si_dpm_fini(adev); mutex_unlock(&adev->pm.mutex); return 0; } static int si_dpm_hw_init(void *handle) { int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (!amdgpu_dpm) return 0; mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) adev->pm.dpm_enabled = false; else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); return ret; } static int si_dpm_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); si_dpm_disable(adev); mutex_unlock(&adev->pm.mutex); } return 0; } static int si_dpm_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); /* disable dpm */ si_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; mutex_unlock(&adev->pm.mutex); } return 0; } static int si_dpm_resume(void *handle) { int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) adev->pm.dpm_enabled = false; else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) amdgpu_pm_compute_clocks(adev); } return 0; } static bool si_dpm_is_idle(void *handle) { /* XXX */ return true; } static int si_dpm_wait_for_idle(void *handle) { /* XXX */ return 0; } static int si_dpm_soft_reset(void *handle) { return 0; } static int si_dpm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { return 0; } static int si_dpm_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } /* get temperature in millidegrees */ static int si_dpm_get_temp(struct amdgpu_device *adev) { u32 temp; int actual_temp = 0; temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> CTF_TEMP_SHIFT; if (temp & 0x200) actual_temp = 255; else actual_temp = temp & 0x1ff; actual_temp = (actual_temp * 1000); return actual_temp; } static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); if (low) return requested_state->performance_levels[0].sclk; else return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; } static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); if (low) return requested_state->performance_levels[0].mclk; else return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; } static void si_dpm_print_power_state(struct amdgpu_device *adev, struct amdgpu_ps *rps) { struct si_ps *ps = si_get_ps(rps); struct rv7xx_pl *pl; int i; amdgpu_dpm_print_class_info(rps->class, rps->class2); amdgpu_dpm_print_cap_info(rps->caps); DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; if (adev->asic_type >= CHIP_TAHITI) DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); else DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); } amdgpu_dpm_print_ps_status(adev, rps); } static int si_dpm_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; si_dpm_set_dpm_funcs(adev); si_dpm_set_irq_funcs(adev); return 0; } static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, const struct rv7xx_pl *si_cpl2) { return ((si_cpl1->mclk == si_cpl2->mclk) && (si_cpl1->sclk == si_cpl2->sclk) && (si_cpl1->pcie_gen == si_cpl2->pcie_gen) && (si_cpl1->vddc == si_cpl2->vddc) && (si_cpl1->vddci == si_cpl2->vddci)); } static int si_check_state_equal(struct amdgpu_device *adev, struct amdgpu_ps *cps, struct amdgpu_ps *rps, bool *equal) { struct si_ps *si_cps; struct si_ps *si_rps; int i; if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) return -EINVAL; si_cps = si_get_ps(cps); si_rps = si_get_ps(rps); if (si_cps == NULL) { printk("si_cps is NULL\n"); *equal = false; return 0; } if (si_cps->performance_level_count != si_rps->performance_level_count) { *equal = false; return 0; } for (i = 0; i < si_cps->performance_level_count; i++) { if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]), &(si_rps->performance_levels[i]))) { *equal = false; return 0; } } /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); return 0; } const struct amd_ip_funcs si_dpm_ip_funcs = { .name = "si_dpm", .early_init = si_dpm_early_init, .late_init = si_dpm_late_init, .sw_init = si_dpm_sw_init, .sw_fini = si_dpm_sw_fini, .hw_init = si_dpm_hw_init, .hw_fini = si_dpm_hw_fini, .suspend = si_dpm_suspend, .resume = si_dpm_resume, .is_idle = si_dpm_is_idle, .wait_for_idle = si_dpm_wait_for_idle, .soft_reset = si_dpm_soft_reset, .set_clockgating_state = si_dpm_set_clockgating_state, .set_powergating_state = si_dpm_set_powergating_state, }; static const struct amdgpu_dpm_funcs si_dpm_funcs = { .get_temperature = &si_dpm_get_temp, .pre_set_power_state = &si_dpm_pre_set_power_state, .set_power_state = &si_dpm_set_power_state, .post_set_power_state = &si_dpm_post_set_power_state, .display_configuration_changed = &si_dpm_display_configuration_changed, .get_sclk = &si_dpm_get_sclk, .get_mclk = &si_dpm_get_mclk, .print_power_state = &si_dpm_print_power_state, .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, .force_performance_level = &si_dpm_force_performance_level, .vblank_too_short = &si_dpm_vblank_too_short, .set_fan_control_mode = &si_dpm_set_fan_control_mode, .get_fan_control_mode = &si_dpm_get_fan_control_mode, .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, .check_state_equal = &si_check_state_equal, .get_vce_clock_state = amdgpu_get_vce_clock_state, }; static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) { if (adev->pm.funcs == NULL) adev->pm.funcs = &si_dpm_funcs; } static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { .set = si_dpm_set_interrupt_state, .process = si_dpm_process_interrupt, }; static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) { adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; } const struct amdgpu_ip_block_version si_dpm_ip_block = { .type = AMD_IP_BLOCK_TYPE_SMC, .major = 6, .minor = 0, .rev = 0, .funcs = &si_dpm_ip_funcs, };
gpl-2.0
huan5765/codelite
sdk/codelite_indexer/ethread_win.cpp
18
1450
#if defined(__WXMSW__) #include "ethread_win.h" #include <process.h> #include "ethread.h" static unsigned __stdcall startFunc(void* arg) { eThread *thread = reinterpret_cast<eThread*>(arg); if(thread) { // call the thread main loop thread->start(); } // terminate the thread _endthreadex( 0 ); return 0; } eThreadImpl::eThreadImpl() : m_stopEvent(INVALID_HANDLE_VALUE) , m_handle(INVALID_HANDLE_VALUE) { m_stopEvent = CreateEvent(NULL, FALSE, FALSE, NULL); } eThreadImpl::~eThreadImpl() { CloseHandle(m_stopEvent); m_stopEvent = INVALID_HANDLE_VALUE; } void eThreadImpl::run(eThread *thread) { m_handle = (HANDLE)_beginthreadex ( NULL, // default security 0, // default stack size &startFunc, // entry point thread, 0, (unsigned int *)&m_tid ); } bool eThreadImpl::testDestroy() { DWORD dwRet = WaitForSingleObject(m_stopEvent, 0); if(dwRet == WAIT_OBJECT_0) { // the event is signaled return true; } return false; } void eThreadImpl::requestStop() { SetEvent(m_stopEvent); } void eThreadImpl::wait(long timeout) { if(m_handle != INVALID_HANDLE_VALUE){ if(WaitForSingleObject(m_handle, timeout) == WAIT_OBJECT_0){ CloseHandle(m_handle); m_handle = INVALID_HANDLE_VALUE; } } } #endif // __WXMSW__
gpl-2.0
micchie/mptcp
arch/arm/mach-orion5x/terastation_pro2-setup.c
274
9477
/* * Buffalo Terastation Pro II/Live Board Setup * * Maintainer: Sylver Bruneau <sylver.bruneau@googlemail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/i2c.h> #include <linux/serial_reg.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * Terastation Pro 2/Live Info ****************************************************************************/ /* * Terastation Pro 2 hardware : * - Marvell 88F5281-D0 * - Marvell 88SX6042 SATA controller (PCI) * - Marvell 88E1118 Gigabit Ethernet PHY * - 256KB NOR flash * - 128MB of DDR RAM * - PCIe port (not equipped) */ /* * 256K NOR flash Device bus boot chip select */ #define TSP2_NOR_BOOT_BASE 0xf4000000 #define TSP2_NOR_BOOT_SIZE SZ_256K /***************************************************************************** * 256KB NOR Flash on BOOT Device ****************************************************************************/ static struct physmap_flash_data tsp2_nor_flash_data = { .width = 1, }; static struct resource tsp2_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = TSP2_NOR_BOOT_BASE, .end = TSP2_NOR_BOOT_BASE + TSP2_NOR_BOOT_SIZE - 1, }; static struct platform_device tsp2_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &tsp2_nor_flash_data, }, .num_resources = 1, .resource = &tsp2_nor_flash_resource, }; /***************************************************************************** * PCI ****************************************************************************/ #define TSP2_PCI_SLOT0_OFFS 7 #define TSP2_PCI_SLOT0_IRQ_PIN 11 void __init tsp2_pci_preinit(void) { int pin; /* * Configure PCI GPIO IRQ pins */ pin = TSP2_PCI_SLOT0_IRQ_PIN; if (gpio_request(pin, "PCI Int1") == 0) { if (gpio_direction_input(pin) == 0) { irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); } else { printk(KERN_ERR "tsp2_pci_preinit failed " "to set_irq_type pin %d\n", pin); gpio_free(pin); } } else { printk(KERN_ERR "tsp2_pci_preinit failed to " "gpio_request %d\n", pin); } } static int __init tsp2_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; /* * Check for devices with hard-wired IRQs. */ irq = orion5x_pci_map_irq(dev, slot, pin); if (irq != -1) return irq; /* * PCI IRQs are connected via GPIOs. */ if (slot == TSP2_PCI_SLOT0_OFFS) return gpio_to_irq(TSP2_PCI_SLOT0_IRQ_PIN); return -1; } static struct hw_pci tsp2_pci __initdata = { .nr_controllers = 2, .preinit = tsp2_pci_preinit, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = tsp2_pci_map_irq, }; static int __init tsp2_pci_init(void) { if (machine_is_terastation_pro2()) pci_common_init(&tsp2_pci); return 0; } subsys_initcall(tsp2_pci_init); /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data tsp2_eth_data = { .phy_addr = 0, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ #define TSP2_RTC_GPIO 9 static struct i2c_board_info __initdata tsp2_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), }; /***************************************************************************** * Terastation Pro II specific power off method via UART1-attached * microcontroller ****************************************************************************/ #define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2)) static int tsp2_miconread(unsigned char *buf, int count) { int i; int timeout; for (i = 0; i < count; i++) { timeout = 10; while (!(readl(UART1_REG(LSR)) & UART_LSR_DR)) { if (--timeout == 0) break; udelay(1000); } if (timeout == 0) break; buf[i] = readl(UART1_REG(RX)); } /* return read bytes */ return i; } static int tsp2_miconwrite(const unsigned char *buf, int count) { int i = 0; while (count--) { while (!(readl(UART1_REG(LSR)) & UART_LSR_THRE)) barrier(); writel(buf[i++], UART1_REG(TX)); } return 0; } static int tsp2_miconsend(const unsigned char *data, int count) { int i; unsigned char checksum = 0; unsigned char recv_buf[40]; unsigned char send_buf[40]; unsigned char correct_ack[3]; int retry = 2; /* Generate checksum */ for (i = 0; i < count; i++) checksum -= data[i]; do { /* Send data */ tsp2_miconwrite(data, count); /* send checksum */ tsp2_miconwrite(&checksum, 1); if (tsp2_miconread(recv_buf, sizeof(recv_buf)) <= 3) { printk(KERN_ERR ">%s: receive failed.\n", __func__); /* send preamble to clear the receive buffer */ memset(&send_buf, 0xff, sizeof(send_buf)); tsp2_miconwrite(send_buf, sizeof(send_buf)); /* make dummy reads */ mdelay(100); tsp2_miconread(recv_buf, sizeof(recv_buf)); } else { /* Generate expected ack */ correct_ack[0] = 0x01; correct_ack[1] = data[1]; correct_ack[2] = 0x00; /* checksum Check */ if ((recv_buf[0] + recv_buf[1] + recv_buf[2] + recv_buf[3]) & 0xFF) { printk(KERN_ERR ">%s: Checksum Error : " "Received data[%02x, %02x, %02x, %02x]" "\n", __func__, recv_buf[0], recv_buf[1], recv_buf[2], recv_buf[3]); } else { /* Check Received Data */ if (correct_ack[0] == recv_buf[0] && correct_ack[1] == recv_buf[1] && correct_ack[2] == recv_buf[2]) { /* Interval for next command */ mdelay(10); /* Receive ACK */ return 0; } } /* Received NAK or illegal Data */ printk(KERN_ERR ">%s: Error : NAK or Illegal Data " "Received\n", __func__); } } while (retry--); /* Interval for next command */ mdelay(10); return -1; } static void tsp2_power_off(void) { const unsigned char watchdogkill[] = {0x01, 0x35, 0x00}; const unsigned char shutdownwait[] = {0x00, 0x0c}; const unsigned char poweroff[] = {0x00, 0x06}; /* 38400 baud divisor */ const unsigned divisor = ((orion5x_tclk + (8 * 38400)) / (16 * 38400)); pr_info("%s: triggering power-off...\n", __func__); /* hijack uart1 and reset into sane state (38400,8n1,even parity) */ writel(0x83, UART1_REG(LCR)); writel(divisor & 0xff, UART1_REG(DLL)); writel((divisor >> 8) & 0xff, UART1_REG(DLM)); writel(0x1b, UART1_REG(LCR)); writel(0x00, UART1_REG(IER)); writel(0x07, UART1_REG(FCR)); writel(0x00, UART1_REG(MCR)); /* Send the commands to shutdown the Terastation Pro II */ tsp2_miconsend(watchdogkill, sizeof(watchdogkill)) ; tsp2_miconsend(shutdownwait, sizeof(shutdownwait)) ; tsp2_miconsend(poweroff, sizeof(poweroff)); } /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int tsp2_mpp_modes[] __initdata = { MPP0_PCIE_RST_OUTn, MPP1_UNUSED, MPP2_UNUSED, MPP3_UNUSED, MPP4_NAND, /* BOOT NAND Flash REn */ MPP5_NAND, /* BOOT NAND Flash WEn */ MPP6_NAND, /* BOOT NAND Flash HREn[0] */ MPP7_NAND, /* BOOT NAND Flash WEn[0] */ MPP8_GPIO, /* MICON int */ MPP9_GPIO, /* RTC int */ MPP10_UNUSED, MPP11_GPIO, /* PCI Int A */ MPP12_UNUSED, MPP13_GPIO, /* UPS on UART0 enable */ MPP14_GPIO, /* UPS low battery detection */ MPP15_UNUSED, MPP16_UART, /* UART1 RXD */ MPP17_UART, /* UART1 TXD */ MPP18_UART, /* UART1 CTSn */ MPP19_UART, /* UART1 RTSn */ 0, }; static void __init tsp2_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(tsp2_mpp_modes); /* * Configure peripherals. */ orion5x_setup_dev_boot_win(TSP2_NOR_BOOT_BASE, TSP2_NOR_BOOT_SIZE); platform_device_register(&tsp2_nor_flash); orion5x_ehci0_init(); orion5x_eth_init(&tsp2_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); orion5x_uart1_init(); /* Get RTC IRQ and register the chip */ if (gpio_request(TSP2_RTC_GPIO, "rtc") == 0) { if (gpio_direction_input(TSP2_RTC_GPIO) == 0) tsp2_i2c_rtc.irq = gpio_to_irq(TSP2_RTC_GPIO); else gpio_free(TSP2_RTC_GPIO); } if (tsp2_i2c_rtc.irq == 0) pr_warning("tsp2_init: failed to get RTC IRQ\n"); i2c_register_board_info(0, &tsp2_i2c_rtc, 1); /* register Terastation Pro II specific power-off method */ pm_power_off = tsp2_power_off; } MACHINE_START(TERASTATION_PRO2, "Buffalo Terastation Pro II/Live") /* Maintainer: Sylver Bruneau <sylver.bruneau@googlemail.com> */ .atag_offset = 0x100, .init_machine = tsp2_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
djbw/linux
drivers/power/supply/collie_battery.c
530
10297
/* * Battery and Power Management code for the Sharp SL-5x00 * * Copyright (C) 2009 Thomas Kunze * * based on tosa_battery.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/mfd/ucb1x00.h> #include <asm/mach/sharpsl_param.h> #include <asm/mach-types.h> #include <mach/collie.h> static DEFINE_MUTEX(bat_lock); /* protects gpio pins */ static struct work_struct bat_work; static struct ucb1x00 *ucb; struct collie_bat { int status; struct power_supply *psy; int full_chrg; struct mutex work_lock; /* protects data */ bool (*is_present)(struct collie_bat *bat); int gpio_full; int gpio_charge_on; int technology; int gpio_bat; int adc_bat; int adc_bat_divider; int bat_max; int bat_min; int gpio_temp; int adc_temp; int adc_temp_divider; }; static struct collie_bat collie_bat_main; static unsigned long collie_read_bat(struct collie_bat *bat) { unsigned long value = 0; if (bat->gpio_bat < 0 || bat->adc_bat < 0) return 0; mutex_lock(&bat_lock); gpio_set_value(bat->gpio_bat, 1); msleep(5); ucb1x00_adc_enable(ucb); value = ucb1x00_adc_read(ucb, bat->adc_bat, UCB_SYNC); ucb1x00_adc_disable(ucb); gpio_set_value(bat->gpio_bat, 0); mutex_unlock(&bat_lock); value = value * 1000000 / bat->adc_bat_divider; return value; } static unsigned long collie_read_temp(struct collie_bat *bat) { unsigned long value = 0; if (bat->gpio_temp < 0 || bat->adc_temp < 0) return 0; mutex_lock(&bat_lock); gpio_set_value(bat->gpio_temp, 1); msleep(5); ucb1x00_adc_enable(ucb); value = ucb1x00_adc_read(ucb, bat->adc_temp, UCB_SYNC); ucb1x00_adc_disable(ucb); gpio_set_value(bat->gpio_temp, 0); mutex_unlock(&bat_lock); value = value * 10000 / bat->adc_temp_divider; return value; } static int collie_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct collie_bat *bat = power_supply_get_drvdata(psy); if (bat->is_present && !bat->is_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) { return -ENODEV; } switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bat->status; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = bat->technology; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = collie_read_bat(bat); break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: if (bat->full_chrg == -1) val->intval = bat->bat_max; else val->intval = bat->full_chrg; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = bat->bat_max; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = bat->bat_min; break; case POWER_SUPPLY_PROP_TEMP: val->intval = collie_read_temp(bat); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = bat->is_present ? bat->is_present(bat) : 1; break; default: ret = -EINVAL; break; } return ret; } static void collie_bat_external_power_changed(struct power_supply *psy) { schedule_work(&bat_work); } static irqreturn_t collie_bat_gpio_isr(int irq, void *data) { pr_info("collie_bat_gpio irq\n"); schedule_work(&bat_work); return IRQ_HANDLED; } static void collie_bat_update(struct collie_bat *bat) { int old; struct power_supply *psy = bat->psy; mutex_lock(&bat->work_lock); old = bat->status; if (bat->is_present && !bat->is_present(bat)) { printk(KERN_NOTICE "%s not present\n", psy->desc->name); bat->status = POWER_SUPPLY_STATUS_UNKNOWN; bat->full_chrg = -1; } else if (power_supply_am_i_supplied(psy)) { if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) { gpio_set_value(bat->gpio_charge_on, 1); mdelay(15); } if (gpio_get_value(bat->gpio_full)) { if (old == POWER_SUPPLY_STATUS_CHARGING || bat->full_chrg == -1) bat->full_chrg = collie_read_bat(bat); gpio_set_value(bat->gpio_charge_on, 0); bat->status = POWER_SUPPLY_STATUS_FULL; } else { gpio_set_value(bat->gpio_charge_on, 1); bat->status = POWER_SUPPLY_STATUS_CHARGING; } } else { gpio_set_value(bat->gpio_charge_on, 0); bat->status = POWER_SUPPLY_STATUS_DISCHARGING; } if (old != bat->status) power_supply_changed(psy); mutex_unlock(&bat->work_lock); } static void collie_bat_work(struct work_struct *work) { collie_bat_update(&collie_bat_main); } static enum power_supply_property collie_bat_main_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TEMP, }; static enum power_supply_property collie_bat_bu_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_PRESENT, }; static const struct power_supply_desc collie_bat_main_desc = { .name = "main-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = collie_bat_main_props, .num_properties = ARRAY_SIZE(collie_bat_main_props), .get_property = collie_bat_get_property, .external_power_changed = collie_bat_external_power_changed, .use_for_apm = 1, }; static struct collie_bat collie_bat_main = { .status = POWER_SUPPLY_STATUS_DISCHARGING, .full_chrg = -1, .psy = NULL, .gpio_full = COLLIE_GPIO_CO, .gpio_charge_on = COLLIE_GPIO_CHARGE_ON, .technology = POWER_SUPPLY_TECHNOLOGY_LIPO, .gpio_bat = COLLIE_GPIO_MBAT_ON, .adc_bat = UCB_ADC_INP_AD1, .adc_bat_divider = 155, .bat_max = 4310000, .bat_min = 1551 * 1000000 / 414, .gpio_temp = COLLIE_GPIO_TMP_ON, .adc_temp = UCB_ADC_INP_AD0, .adc_temp_divider = 10000, }; static const struct power_supply_desc collie_bat_bu_desc = { .name = "backup-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = collie_bat_bu_props, .num_properties = ARRAY_SIZE(collie_bat_bu_props), .get_property = collie_bat_get_property, .external_power_changed = collie_bat_external_power_changed, }; static struct collie_bat collie_bat_bu = { .status = POWER_SUPPLY_STATUS_UNKNOWN, .full_chrg = -1, .psy = NULL, .gpio_full = -1, .gpio_charge_on = -1, .technology = POWER_SUPPLY_TECHNOLOGY_LiMn, .gpio_bat = COLLIE_GPIO_BBAT_ON, .adc_bat = UCB_ADC_INP_AD1, .adc_bat_divider = 155, .bat_max = 3000000, .bat_min = 1900000, .gpio_temp = -1, .adc_temp = -1, .adc_temp_divider = -1, }; static struct gpio collie_batt_gpios[] = { { COLLIE_GPIO_CO, GPIOF_IN, "main battery full" }, { COLLIE_GPIO_MAIN_BAT_LOW, GPIOF_IN, "main battery low" }, { COLLIE_GPIO_CHARGE_ON, GPIOF_OUT_INIT_LOW, "main charge on" }, { COLLIE_GPIO_MBAT_ON, GPIOF_OUT_INIT_LOW, "main battery" }, { COLLIE_GPIO_TMP_ON, GPIOF_OUT_INIT_LOW, "main battery temp" }, { COLLIE_GPIO_BBAT_ON, GPIOF_OUT_INIT_LOW, "backup battery" }, }; #ifdef CONFIG_PM static int wakeup_enabled; static int collie_bat_suspend(struct ucb1x00_dev *dev) { /* flush all pending status updates */ flush_work(&bat_work); if (device_may_wakeup(&dev->ucb->dev) && collie_bat_main.status == POWER_SUPPLY_STATUS_CHARGING) wakeup_enabled = !enable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO)); else wakeup_enabled = 0; return 0; } static int collie_bat_resume(struct ucb1x00_dev *dev) { if (wakeup_enabled) disable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO)); /* things may have changed while we were away */ schedule_work(&bat_work); return 0; } #else #define collie_bat_suspend NULL #define collie_bat_resume NULL #endif static int collie_bat_probe(struct ucb1x00_dev *dev) { int ret; struct power_supply_config psy_main_cfg = {}, psy_bu_cfg = {}; if (!machine_is_collie()) return -ENODEV; ucb = dev->ucb; ret = gpio_request_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios)); if (ret) return ret; mutex_init(&collie_bat_main.work_lock); INIT_WORK(&bat_work, collie_bat_work); psy_main_cfg.drv_data = &collie_bat_main; collie_bat_main.psy = power_supply_register(&dev->ucb->dev, &collie_bat_main_desc, &psy_main_cfg); if (IS_ERR(collie_bat_main.psy)) { ret = PTR_ERR(collie_bat_main.psy); goto err_psy_reg_main; } psy_bu_cfg.drv_data = &collie_bat_bu; collie_bat_bu.psy = power_supply_register(&dev->ucb->dev, &collie_bat_bu_desc, &psy_bu_cfg); if (IS_ERR(collie_bat_bu.psy)) { ret = PTR_ERR(collie_bat_bu.psy); goto err_psy_reg_bu; } ret = request_irq(gpio_to_irq(COLLIE_GPIO_CO), collie_bat_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "main full", &collie_bat_main); if (ret) goto err_irq; device_init_wakeup(&ucb->dev, 1); schedule_work(&bat_work); return 0; err_irq: power_supply_unregister(collie_bat_bu.psy); err_psy_reg_bu: power_supply_unregister(collie_bat_main.psy); err_psy_reg_main: /* see comment in collie_bat_remove */ cancel_work_sync(&bat_work); gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios)); return ret; } static void collie_bat_remove(struct ucb1x00_dev *dev) { free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main); power_supply_unregister(collie_bat_bu.psy); power_supply_unregister(collie_bat_main.psy); /* * Now cancel the bat_work. We won't get any more schedules, * since all sources (isr and external_power_changed) are * unregistered now. */ cancel_work_sync(&bat_work); gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios)); } static struct ucb1x00_driver collie_bat_driver = { .add = collie_bat_probe, .remove = collie_bat_remove, .suspend = collie_bat_suspend, .resume = collie_bat_resume, }; static int __init collie_bat_init(void) { return ucb1x00_register_driver(&collie_bat_driver); } static void __exit collie_bat_exit(void) { ucb1x00_unregister_driver(&collie_bat_driver); } module_init(collie_bat_init); module_exit(collie_bat_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Kunze"); MODULE_DESCRIPTION("Collie battery driver");
gpl-2.0
Angor00/linux-aura-hd-android-2.6.35.3
kernel/gcov/fs.c
530
16270
/* * This code exports profiling data as debugfs files to userspace. * * Copyright IBM Corp. 2009 * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * * Uses gcc-internal data definitions. * Based on the gcov-kernel patch by: * Hubertus Franke <frankeh@us.ibm.com> * Nigel Hinds <nhinds@us.ibm.com> * Rajan Ravindran <rajancr@us.ibm.com> * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * Paul Larson * Yi CDL Yang */ #define pr_fmt(fmt) "gcov: " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include "gcov.h" /** * struct gcov_node - represents a debugfs entry * @list: list head for child node list * @children: child nodes * @all: list head for list of all nodes * @parent: parent node * @info: associated profiling data structure if not a directory * @ghost: when an object file containing profiling data is unloaded we keep a * copy of the profiling data here to allow collecting coverage data * for cleanup code. Such a node is called a "ghost". * @dentry: main debugfs entry, either a directory or data file * @links: associated symbolic links * @name: data file basename * * struct gcov_node represents an entity within the gcov/ subdirectory * of debugfs. There are directory and data file nodes. The latter represent * the actual synthesized data file plus any associated symbolic links which * are needed by the gcov tool to work correctly. */ struct gcov_node { struct list_head list; struct list_head children; struct list_head all; struct gcov_node *parent; struct gcov_info *info; struct gcov_info *ghost; struct dentry *dentry; struct dentry **links; char name[0]; }; static const char objtree[] = OBJTREE; static const char srctree[] = SRCTREE; static struct gcov_node root_node; static struct dentry *reset_dentry; static LIST_HEAD(all_head); static DEFINE_MUTEX(node_lock); /* If non-zero, keep copies of profiling data for unloaded modules. */ static int gcov_persist = 1; static int __init gcov_persist_setup(char *str) { unsigned long val; if (strict_strtoul(str, 0, &val)) { pr_warning("invalid gcov_persist parameter '%s'\n", str); return 0; } gcov_persist = val; pr_info("setting gcov_persist to %d\n", gcov_persist); return 1; } __setup("gcov_persist=", gcov_persist_setup); /* * seq_file.start() implementation for gcov data files. Note that the * gcov_iterator interface is designed to be more restrictive than seq_file * (no start from arbitrary position, etc.), to simplify the iterator * implementation. */ static void *gcov_seq_start(struct seq_file *seq, loff_t *pos) { loff_t i; gcov_iter_start(seq->private); for (i = 0; i < *pos; i++) { if (gcov_iter_next(seq->private)) return NULL; } return seq->private; } /* seq_file.next() implementation for gcov data files. */ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; if (gcov_iter_next(iter)) return NULL; (*pos)++; return iter; } /* seq_file.show() implementation for gcov data files. */ static int gcov_seq_show(struct seq_file *seq, void *data) { struct gcov_iterator *iter = data; if (gcov_iter_write(iter, seq)) return -EINVAL; return 0; } static void gcov_seq_stop(struct seq_file *seq, void *data) { /* Unused. */ } static const struct seq_operations gcov_seq_ops = { .start = gcov_seq_start, .next = gcov_seq_next, .show = gcov_seq_show, .stop = gcov_seq_stop, }; /* * Return the profiling data set for a given node. This can either be the * original profiling data structure or a duplicate (also called "ghost") * in case the associated object file has been unloaded. */ static struct gcov_info *get_node_info(struct gcov_node *node) { if (node->info) return node->info; return node->ghost; } /* * open() implementation for gcov data files. Create a copy of the profiling * data set and initialize the iterator and seq_file interface. */ static int gcov_seq_open(struct inode *inode, struct file *file) { struct gcov_node *node = inode->i_private; struct gcov_iterator *iter; struct seq_file *seq; struct gcov_info *info; int rc = -ENOMEM; mutex_lock(&node_lock); /* * Read from a profiling data copy to minimize reference tracking * complexity and concurrent access. */ info = gcov_info_dup(get_node_info(node)); if (!info) goto out_unlock; iter = gcov_iter_new(info); if (!iter) goto err_free_info; rc = seq_open(file, &gcov_seq_ops); if (rc) goto err_free_iter_info; seq = file->private_data; seq->private = iter; out_unlock: mutex_unlock(&node_lock); return rc; err_free_iter_info: gcov_iter_free(iter); err_free_info: gcov_info_free(info); goto out_unlock; } /* * release() implementation for gcov data files. Release resources allocated * by open(). */ static int gcov_seq_release(struct inode *inode, struct file *file) { struct gcov_iterator *iter; struct gcov_info *info; struct seq_file *seq; seq = file->private_data; iter = seq->private; info = gcov_iter_get_info(iter); gcov_iter_free(iter); gcov_info_free(info); seq_release(inode, file); return 0; } /* * Find a node by the associated data file name. Needs to be called with * node_lock held. */ static struct gcov_node *get_node_by_name(const char *name) { struct gcov_node *node; struct gcov_info *info; list_for_each_entry(node, &all_head, all) { info = get_node_info(node); if (info && (strcmp(info->filename, name) == 0)) return node; } return NULL; } static void remove_node(struct gcov_node *node); /* * write() implementation for gcov data files. Reset profiling data for the * associated file. If the object file has been unloaded (i.e. this is * a "ghost" node), remove the debug fs node as well. */ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) { struct seq_file *seq; struct gcov_info *info; struct gcov_node *node; seq = file->private_data; info = gcov_iter_get_info(seq->private); mutex_lock(&node_lock); node = get_node_by_name(info->filename); if (node) { /* Reset counts or remove node for unloaded modules. */ if (node->ghost) remove_node(node); else gcov_info_reset(node->info); } /* Reset counts for open file. */ gcov_info_reset(info); mutex_unlock(&node_lock); return len; } /* * Given a string <path> representing a file path of format: * path/to/file.gcda * construct and return a new string: * <dir/>path/to/file.<ext> */ static char *link_target(const char *dir, const char *path, const char *ext) { char *target; char *old_ext; char *copy; copy = kstrdup(path, GFP_KERNEL); if (!copy) return NULL; old_ext = strrchr(copy, '.'); if (old_ext) *old_ext = '\0'; if (dir) target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); else target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); kfree(copy); return target; } /* * Construct a string representing the symbolic link target for the given * gcov data file name and link type. Depending on the link type and the * location of the data file, the link target can either point to a * subdirectory of srctree, objtree or in an external location. */ static char *get_link_target(const char *filename, const struct gcov_link *ext) { const char *rel; char *result; if (strncmp(filename, objtree, strlen(objtree)) == 0) { rel = filename + strlen(objtree) + 1; if (ext->dir == SRC_TREE) result = link_target(srctree, rel, ext->ext); else result = link_target(objtree, rel, ext->ext); } else { /* External compilation. */ result = link_target(NULL, filename, ext->ext); } return result; } #define SKEW_PREFIX ".tmp_" /* * For a filename .tmp_filename.ext return filename.ext. Needed to compensate * for filename skewing caused by the mod-versioning mechanism. */ static const char *deskew(const char *basename) { if (strncmp(basename, SKEW_PREFIX, sizeof(SKEW_PREFIX) - 1) == 0) return basename + sizeof(SKEW_PREFIX) - 1; return basename; } /* * Create links to additional files (usually .c and .gcno files) which the * gcov tool expects to find in the same directory as the gcov data file. */ static void add_links(struct gcov_node *node, struct dentry *parent) { char *basename; char *target; int num; int i; for (num = 0; gcov_link[num].ext; num++) /* Nothing. */; node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); if (!node->links) return; for (i = 0; i < num; i++) { target = get_link_target(get_node_info(node)->filename, &gcov_link[i]); if (!target) goto out_err; basename = strrchr(target, '/'); if (!basename) goto out_err; basename++; node->links[i] = debugfs_create_symlink(deskew(basename), parent, target); if (!node->links[i]) goto out_err; kfree(target); } return; out_err: kfree(target); while (i-- > 0) debugfs_remove(node->links[i]); kfree(node->links); node->links = NULL; } static const struct file_operations gcov_data_fops = { .open = gcov_seq_open, .release = gcov_seq_release, .read = seq_read, .llseek = seq_lseek, .write = gcov_seq_write, }; /* Basic initialization of a new node. */ static void init_node(struct gcov_node *node, struct gcov_info *info, const char *name, struct gcov_node *parent) { INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->all); node->info = info; node->parent = parent; if (name) strcpy(node->name, name); } /* * Create a new node and associated debugfs entry. Needs to be called with * node_lock held. */ static struct gcov_node *new_node(struct gcov_node *parent, struct gcov_info *info, const char *name) { struct gcov_node *node; node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); if (!node) { pr_warning("out of memory\n"); return NULL; } init_node(node, info, name, parent); /* Differentiate between gcov data file nodes and directory nodes. */ if (info) { node->dentry = debugfs_create_file(deskew(node->name), 0600, parent->dentry, node, &gcov_data_fops); } else node->dentry = debugfs_create_dir(node->name, parent->dentry); if (!node->dentry) { pr_warning("could not create file\n"); kfree(node); return NULL; } if (info) add_links(node, parent->dentry); list_add(&node->list, &parent->children); list_add(&node->all, &all_head); return node; } /* Remove symbolic links associated with node. */ static void remove_links(struct gcov_node *node) { int i; if (!node->links) return; for (i = 0; gcov_link[i].ext; i++) debugfs_remove(node->links[i]); kfree(node->links); node->links = NULL; } /* * Remove node from all lists and debugfs and release associated resources. * Needs to be called with node_lock held. */ static void release_node(struct gcov_node *node) { list_del(&node->list); list_del(&node->all); debugfs_remove(node->dentry); remove_links(node); if (node->ghost) gcov_info_free(node->ghost); kfree(node); } /* Release node and empty parents. Needs to be called with node_lock held. */ static void remove_node(struct gcov_node *node) { struct gcov_node *parent; while ((node != &root_node) && list_empty(&node->children)) { parent = node->parent; release_node(node); node = parent; } } /* * Find child node with given basename. Needs to be called with node_lock * held. */ static struct gcov_node *get_child_by_name(struct gcov_node *parent, const char *name) { struct gcov_node *node; list_for_each_entry(node, &parent->children, list) { if (strcmp(node->name, name) == 0) return node; } return NULL; } /* * write() implementation for reset file. Reset all profiling data to zero * and remove ghost nodes. */ static ssize_t reset_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) { struct gcov_node *node; mutex_lock(&node_lock); restart: list_for_each_entry(node, &all_head, all) { if (node->info) gcov_info_reset(node->info); else if (list_empty(&node->children)) { remove_node(node); /* Several nodes may have gone - restart loop. */ goto restart; } } mutex_unlock(&node_lock); return len; } /* read() implementation for reset file. Unused. */ static ssize_t reset_read(struct file *file, char __user *addr, size_t len, loff_t *pos) { /* Allow read operation so that a recursive copy won't fail. */ return 0; } static const struct file_operations gcov_reset_fops = { .write = reset_write, .read = reset_read, }; /* * Create a node for a given profiling data set and add it to all lists and * debugfs. Needs to be called with node_lock held. */ static void add_node(struct gcov_info *info) { char *filename; char *curr; char *next; struct gcov_node *parent; struct gcov_node *node; filename = kstrdup(info->filename, GFP_KERNEL); if (!filename) return; parent = &root_node; /* Create directory nodes along the path. */ for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { if (curr == next) continue; *next = 0; if (strcmp(curr, ".") == 0) continue; if (strcmp(curr, "..") == 0) { if (!parent->parent) goto err_remove; parent = parent->parent; continue; } node = get_child_by_name(parent, curr); if (!node) { node = new_node(parent, NULL, curr); if (!node) goto err_remove; } parent = node; } /* Create file node. */ node = new_node(parent, info, curr); if (!node) goto err_remove; out: kfree(filename); return; err_remove: remove_node(parent); goto out; } /* * The profiling data set associated with this node is being unloaded. Store a * copy of the profiling data and turn this node into a "ghost". */ static int ghost_node(struct gcov_node *node) { node->ghost = gcov_info_dup(node->info); if (!node->ghost) { pr_warning("could not save data for '%s' (out of memory)\n", node->info->filename); return -ENOMEM; } node->info = NULL; return 0; } /* * Profiling data for this node has been loaded again. Add profiling data * from previous instantiation and turn this node into a regular node. */ static void revive_node(struct gcov_node *node, struct gcov_info *info) { if (gcov_info_is_compatible(node->ghost, info)) gcov_info_add(info, node->ghost); else { pr_warning("discarding saved data for '%s' (version changed)\n", info->filename); } gcov_info_free(node->ghost); node->ghost = NULL; node->info = info; } /* * Callback to create/remove profiling files when code compiled with * -fprofile-arcs is loaded/unloaded. */ void gcov_event(enum gcov_action action, struct gcov_info *info) { struct gcov_node *node; mutex_lock(&node_lock); node = get_node_by_name(info->filename); switch (action) { case GCOV_ADD: /* Add new node or revive ghost. */ if (!node) { add_node(info); break; } if (gcov_persist) revive_node(node, info); else { pr_warning("could not add '%s' (already exists)\n", info->filename); } break; case GCOV_REMOVE: /* Remove node or turn into ghost. */ if (!node) { pr_warning("could not remove '%s' (not found)\n", info->filename); break; } if (gcov_persist) { if (!ghost_node(node)) break; } remove_node(node); break; } mutex_unlock(&node_lock); } /* Create debugfs entries. */ static __init int gcov_fs_init(void) { int rc = -EIO; init_node(&root_node, NULL, NULL, NULL); /* * /sys/kernel/debug/gcov will be parent for the reset control file * and all profiling files. */ root_node.dentry = debugfs_create_dir("gcov", NULL); if (!root_node.dentry) goto err_remove; /* * Create reset file which resets all profiling counts when written * to. */ reset_dentry = debugfs_create_file("reset", 0600, root_node.dentry, NULL, &gcov_reset_fops); if (!reset_dentry) goto err_remove; /* Replay previous events to get our fs hierarchy up-to-date. */ gcov_enable_events(); return 0; err_remove: pr_err("init failed\n"); if (root_node.dentry) debugfs_remove(root_node.dentry); return rc; } device_initcall(gcov_fs_init);
gpl-2.0
kratos1988/operating_systems
drivers/hwmon/tmp401.c
530
19007
/* tmp401.c * * Copyright (C) 2007,2008 Hans de Goede <hdegoede@redhat.com> * Preliminary tmp411 support by: * Gabriel Konat, Sander Leget, Wouter Willems * Copyright (C) 2009 Andre Prendel <andre.prendel@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Driver for the Texas Instruments TMP401 SMBUS temperature sensor IC. * * Note this IC is in some aspect similar to the LM90, but it has quite a * few differences too, for example the local temp has a higher resolution * and thus has 16 bits registers for its value and limit instead of 8 bits. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD_2(tmp401, tmp411); /* * The TMP401 registers, note some registers have different addresses for * reading and writing */ #define TMP401_STATUS 0x02 #define TMP401_CONFIG_READ 0x03 #define TMP401_CONFIG_WRITE 0x09 #define TMP401_CONVERSION_RATE_READ 0x04 #define TMP401_CONVERSION_RATE_WRITE 0x0A #define TMP401_TEMP_CRIT_HYST 0x21 #define TMP401_CONSECUTIVE_ALERT 0x22 #define TMP401_MANUFACTURER_ID_REG 0xFE #define TMP401_DEVICE_ID_REG 0xFF #define TMP411_N_FACTOR_REG 0x18 static const u8 TMP401_TEMP_MSB[2] = { 0x00, 0x01 }; static const u8 TMP401_TEMP_LSB[2] = { 0x15, 0x10 }; static const u8 TMP401_TEMP_LOW_LIMIT_MSB_READ[2] = { 0x06, 0x08 }; static const u8 TMP401_TEMP_LOW_LIMIT_MSB_WRITE[2] = { 0x0C, 0x0E }; static const u8 TMP401_TEMP_LOW_LIMIT_LSB[2] = { 0x17, 0x14 }; static const u8 TMP401_TEMP_HIGH_LIMIT_MSB_READ[2] = { 0x05, 0x07 }; static const u8 TMP401_TEMP_HIGH_LIMIT_MSB_WRITE[2] = { 0x0B, 0x0D }; static const u8 TMP401_TEMP_HIGH_LIMIT_LSB[2] = { 0x16, 0x13 }; /* These are called the THERM limit / hysteresis / mask in the datasheet */ static const u8 TMP401_TEMP_CRIT_LIMIT[2] = { 0x20, 0x19 }; static const u8 TMP411_TEMP_LOWEST_MSB[2] = { 0x30, 0x34 }; static const u8 TMP411_TEMP_LOWEST_LSB[2] = { 0x31, 0x35 }; static const u8 TMP411_TEMP_HIGHEST_MSB[2] = { 0x32, 0x36 }; static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 }; /* Flags */ #define TMP401_CONFIG_RANGE 0x04 #define TMP401_CONFIG_SHUTDOWN 0x40 #define TMP401_STATUS_LOCAL_CRIT 0x01 #define TMP401_STATUS_REMOTE_CRIT 0x02 #define TMP401_STATUS_REMOTE_OPEN 0x04 #define TMP401_STATUS_REMOTE_LOW 0x08 #define TMP401_STATUS_REMOTE_HIGH 0x10 #define TMP401_STATUS_LOCAL_LOW 0x20 #define TMP401_STATUS_LOCAL_HIGH 0x40 /* Manufacturer / Device ID's */ #define TMP401_MANUFACTURER_ID 0x55 #define TMP401_DEVICE_ID 0x11 #define TMP411_DEVICE_ID 0x12 /* * Functions declarations */ static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id); static int tmp401_detect(struct i2c_client *client, int kind, struct i2c_board_info *info); static int tmp401_remove(struct i2c_client *client); static struct tmp401_data *tmp401_update_device(struct device *dev); /* * Driver data (common to all clients) */ static const struct i2c_device_id tmp401_id[] = { { "tmp401", tmp401 }, { "tmp411", tmp411 }, { } }; MODULE_DEVICE_TABLE(i2c, tmp401_id); static struct i2c_driver tmp401_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "tmp401", }, .probe = tmp401_probe, .remove = tmp401_remove, .id_table = tmp401_id, .detect = tmp401_detect, .address_data = &addr_data, }; /* * Client data (each client gets its own) */ struct tmp401_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ int kind; /* register values */ u8 status; u8 config; u16 temp[2]; u16 temp_low[2]; u16 temp_high[2]; u8 temp_crit[2]; u8 temp_crit_hyst; u16 temp_lowest[2]; u16 temp_highest[2]; }; /* * Sysfs attr show / store functions */ static int tmp401_register_to_temp(u16 reg, u8 config) { int temp = reg; if (config & TMP401_CONFIG_RANGE) temp -= 64 * 256; return (temp * 625 + 80) / 160; } static u16 tmp401_temp_to_register(long temp, u8 config) { if (config & TMP401_CONFIG_RANGE) { temp = SENSORS_LIMIT(temp, -64000, 191000); temp += 64000; } else temp = SENSORS_LIMIT(temp, 0, 127000); return (temp * 160 + 312) / 625; } static int tmp401_crit_register_to_temp(u8 reg, u8 config) { int temp = reg; if (config & TMP401_CONFIG_RANGE) temp -= 64; return temp * 1000; } static u8 tmp401_crit_temp_to_register(long temp, u8 config) { if (config & TMP401_CONFIG_RANGE) { temp = SENSORS_LIMIT(temp, -64000, 191000); temp += 64000; } else temp = SENSORS_LIMIT(temp, 0, 127000); return (temp + 500) / 1000; } static ssize_t show_temp_value(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); return sprintf(buf, "%d\n", tmp401_register_to_temp(data->temp[index], data->config)); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); return sprintf(buf, "%d\n", tmp401_register_to_temp(data->temp_low[index], data->config)); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); return sprintf(buf, "%d\n", tmp401_register_to_temp(data->temp_high[index], data->config)); } static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); return sprintf(buf, "%d\n", tmp401_crit_register_to_temp(data->temp_crit[index], data->config)); } static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { int temp, index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); mutex_lock(&data->update_lock); temp = tmp401_crit_register_to_temp(data->temp_crit[index], data->config); temp -= data->temp_crit_hyst * 1000; mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", temp); } static ssize_t show_temp_lowest(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); return sprintf(buf, "%d\n", tmp401_register_to_temp(data->temp_lowest[index], data->config)); } static ssize_t show_temp_highest(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); return sprintf(buf, "%d\n", tmp401_register_to_temp(data->temp_highest[index], data->config)); } static ssize_t show_status(struct device *dev, struct device_attribute *devattr, char *buf) { int mask = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); if (data->status & mask) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_temp_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); long val; u16 reg; if (strict_strtol(buf, 10, &val)) return -EINVAL; reg = tmp401_temp_to_register(val, data->config); mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_LOW_LIMIT_MSB_WRITE[index], reg >> 8); i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_LOW_LIMIT_LSB[index], reg & 0xFF); data->temp_low[index] = reg; mutex_unlock(&data->update_lock); return count; } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); long val; u16 reg; if (strict_strtol(buf, 10, &val)) return -EINVAL; reg = tmp401_temp_to_register(val, data->config); mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_HIGH_LIMIT_MSB_WRITE[index], reg >> 8); i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_HIGH_LIMIT_LSB[index], reg & 0xFF); data->temp_high[index] = reg; mutex_unlock(&data->update_lock); return count; } static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); long val; u8 reg; if (strict_strtol(buf, 10, &val)) return -EINVAL; reg = tmp401_crit_temp_to_register(val, data->config); mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_CRIT_LIMIT[index], reg); data->temp_crit[index] = reg; mutex_unlock(&data->update_lock); return count; } static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int temp, index = to_sensor_dev_attr(devattr)->index; struct tmp401_data *data = tmp401_update_device(dev); long val; u8 reg; if (strict_strtol(buf, 10, &val)) return -EINVAL; if (data->config & TMP401_CONFIG_RANGE) val = SENSORS_LIMIT(val, -64000, 191000); else val = SENSORS_LIMIT(val, 0, 127000); mutex_lock(&data->update_lock); temp = tmp401_crit_register_to_temp(data->temp_crit[index], data->config); val = SENSORS_LIMIT(val, temp - 255000, temp); reg = ((temp - val) + 500) / 1000; i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_CRIT_HYST, reg); data->temp_crit_hyst = reg; mutex_unlock(&data->update_lock); return count; } /* * Resets the historical measurements of minimum and maximum temperatures. * This is done by writing any value to any of the minimum/maximum registers * (0x30-0x37). */ static ssize_t reset_temp_history(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; if (val != 1) { dev_err(dev, "temp_reset_history value %ld not" " supported. Use 1 to reset the history!\n", val); return -EINVAL; } i2c_smbus_write_byte_data(to_i2c_client(dev), TMP411_TEMP_LOWEST_MSB[0], val); return count; } static struct sensor_device_attribute tmp401_attr[] = { SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0), SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0), SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0), SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst, store_temp_crit_hyst, 0), SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL, TMP401_STATUS_LOCAL_LOW), SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL, TMP401_STATUS_LOCAL_HIGH), SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL, TMP401_STATUS_LOCAL_CRIT), SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1), SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1), SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1), SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1), SENSOR_ATTR(temp2_fault, 0444, show_status, NULL, TMP401_STATUS_REMOTE_OPEN), SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL, TMP401_STATUS_REMOTE_LOW), SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL, TMP401_STATUS_REMOTE_HIGH), SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL, TMP401_STATUS_REMOTE_CRIT), }; /* * Additional features of the TMP411 chip. * The TMP411 stores the minimum and maximum * temperature measured since power-on, chip-reset, or * minimum and maximum register reset for both the local * and remote channels. */ static struct sensor_device_attribute tmp411_attr[] = { SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0), SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0), SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1), SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1), SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0), }; /* * Begin non sysfs callback code (aka Real code) */ static void tmp401_init_client(struct i2c_client *client) { int config, config_orig; /* Set the conversion rate to 2 Hz */ i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5); /* Start conversions (disable shutdown if necessary) */ config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ); if (config < 0) { dev_warn(&client->dev, "Initialization failed!\n"); return; } config_orig = config; config &= ~TMP401_CONFIG_SHUTDOWN; if (config != config_orig) i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config); } static int tmp401_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detect and identify the chip */ if (kind <= 0) { u8 reg; reg = i2c_smbus_read_byte_data(client, TMP401_MANUFACTURER_ID_REG); if (reg != TMP401_MANUFACTURER_ID) return -ENODEV; reg = i2c_smbus_read_byte_data(client, TMP401_DEVICE_ID_REG); switch (reg) { case TMP401_DEVICE_ID: kind = tmp401; break; case TMP411_DEVICE_ID: kind = tmp411; break; default: return -ENODEV; } reg = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ); if (reg & 0x1b) return -ENODEV; reg = i2c_smbus_read_byte_data(client, TMP401_CONVERSION_RATE_READ); /* Datasheet says: 0x1-0x6 */ if (reg > 15) return -ENODEV; } strlcpy(info->type, tmp401_id[kind - 1].name, I2C_NAME_SIZE); return 0; } static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id) { int i, err = 0; struct tmp401_data *data; const char *names[] = { "TMP401", "TMP411" }; data = kzalloc(sizeof(struct tmp401_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); data->kind = id->driver_data; /* Initialize the TMP401 chip */ tmp401_init_client(client); /* Register sysfs hooks */ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) { err = device_create_file(&client->dev, &tmp401_attr[i].dev_attr); if (err) goto exit_remove; } /* Register aditional tmp411 sysfs hooks */ if (data->kind == tmp411) { for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) { err = device_create_file(&client->dev, &tmp411_attr[i].dev_attr); if (err) goto exit_remove; } } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); data->hwmon_dev = NULL; goto exit_remove; } dev_info(&client->dev, "Detected TI %s chip\n", names[data->kind - 1]); return 0; exit_remove: tmp401_remove(client); /* will also free data for us */ return err; } static int tmp401_remove(struct i2c_client *client) { struct tmp401_data *data = i2c_get_clientdata(client); int i; if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) device_remove_file(&client->dev, &tmp401_attr[i].dev_attr); if (data->kind == tmp411) { for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) device_remove_file(&client->dev, &tmp411_attr[i].dev_attr); } kfree(data); return 0; } static struct tmp401_data *tmp401_update_device_reg16( struct i2c_client *client, struct tmp401_data *data) { int i; for (i = 0; i < 2; i++) { /* * High byte must be read first immediately followed * by the low byte */ data->temp[i] = i2c_smbus_read_byte_data(client, TMP401_TEMP_MSB[i]) << 8; data->temp[i] |= i2c_smbus_read_byte_data(client, TMP401_TEMP_LSB[i]); data->temp_low[i] = i2c_smbus_read_byte_data(client, TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8; data->temp_low[i] |= i2c_smbus_read_byte_data(client, TMP401_TEMP_LOW_LIMIT_LSB[i]); data->temp_high[i] = i2c_smbus_read_byte_data(client, TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8; data->temp_high[i] |= i2c_smbus_read_byte_data(client, TMP401_TEMP_HIGH_LIMIT_LSB[i]); data->temp_crit[i] = i2c_smbus_read_byte_data(client, TMP401_TEMP_CRIT_LIMIT[i]); if (data->kind == tmp411) { data->temp_lowest[i] = i2c_smbus_read_byte_data(client, TMP411_TEMP_LOWEST_MSB[i]) << 8; data->temp_lowest[i] |= i2c_smbus_read_byte_data( client, TMP411_TEMP_LOWEST_LSB[i]); data->temp_highest[i] = i2c_smbus_read_byte_data( client, TMP411_TEMP_HIGHEST_MSB[i]) << 8; data->temp_highest[i] |= i2c_smbus_read_byte_data( client, TMP411_TEMP_HIGHEST_LSB[i]); } } return data; } static struct tmp401_data *tmp401_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct tmp401_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS); data->config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ); tmp401_update_device_reg16(client, data); data->temp_crit_hyst = i2c_smbus_read_byte_data(client, TMP401_TEMP_CRIT_HYST); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init tmp401_init(void) { return i2c_add_driver(&tmp401_driver); } static void __exit tmp401_exit(void) { i2c_del_driver(&tmp401_driver); } MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Texas Instruments TMP401 temperature sensor driver"); MODULE_LICENSE("GPL"); module_init(tmp401_init); module_exit(tmp401_exit);
gpl-2.0
Perferom/android_kernel_lge_msm7x27-3.0.x
drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
786
8158
/* * Customer code to add GPIO control during WLAN start/stop * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_custom_gpio.c,v 1.2.42.1 2010-10-19 00:41:09 Exp $ */ #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <bcmutils.h> #include <dngl_stats.h> #include <dhd.h> #include <wlioctl.h> #include <wl_iw.h> #define WL_ERROR(x) printf x #define WL_TRACE(x) #ifdef CUSTOMER_HW extern void bcm_wlan_power_off(int); extern void bcm_wlan_power_on(int); #endif /* CUSTOMER_HW */ #if defined(CUSTOMER_HW2) #ifdef CONFIG_WIFI_CONTROL_FUNC int wifi_set_power(int on, unsigned long msec); int wifi_get_irq_number(unsigned long *irq_flags_ptr); int wifi_get_mac_addr(unsigned char *buf); void *wifi_get_country_code(char *ccode); #else int wifi_set_power(int on, unsigned long msec) { return -1; } int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; } int wifi_get_mac_addr(unsigned char *buf) { return -1; } void *wifi_get_country_code(char *ccode) { return NULL; } #endif /* CONFIG_WIFI_CONTROL_FUNC */ #endif /* CUSTOMER_HW2 */ #if defined(OOB_INTR_ONLY) #if defined(BCMLXSDMMC) extern int sdioh_mmc_irq(int irq); #endif /* (BCMLXSDMMC) */ #ifdef CUSTOMER_HW3 #include <mach/gpio.h> #endif /* Customer specific Host GPIO defintion */ static int dhd_oob_gpio_num = -1; module_param(dhd_oob_gpio_num, int, 0644); MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); /* This function will return: * 1) return : Host gpio interrupt number per customer platform * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge * * NOTE : * Customer should check his platform definitions * and his Host Interrupt spec * to figure out the proper setting for his platform. * Broadcom provides just reference settings as example. * */ int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr) { int host_oob_irq = 0; #ifdef CUSTOMER_HW2 host_oob_irq = wifi_get_irq_number(irq_flags_ptr); #else #if defined(CUSTOM_OOB_GPIO_NUM) if (dhd_oob_gpio_num < 0) { dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; } #endif /* CUSTOMER_HW2 */ if (dhd_oob_gpio_num < 0) { WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", __FUNCTION__)); return (dhd_oob_gpio_num); } WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", __FUNCTION__, dhd_oob_gpio_num)); #if defined CUSTOMER_HW host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num); #elif defined CUSTOMER_HW3 gpio_request(dhd_oob_gpio_num, "oob irq"); host_oob_irq = gpio_to_irq(dhd_oob_gpio_num); gpio_direction_input(dhd_oob_gpio_num); #endif /* CUSTOMER_HW */ #endif /* CUSTOMER_HW2 */ return (host_oob_irq); } #endif /* defined(OOB_INTR_ONLY) */ /* Customer function to control hw specific wlan gpios */ void dhd_customer_gpio_wlan_ctrl(int onoff) { switch (onoff) { case WLAN_RESET_OFF: WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n", __FUNCTION__)); #ifdef CUSTOMER_HW bcm_wlan_power_off(2); #endif /* CUSTOMER_HW */ #ifdef CUSTOMER_HW2 wifi_set_power(0, 0); #endif WL_ERROR(("=========== WLAN placed in RESET ========\n")); break; case WLAN_RESET_ON: WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n", __FUNCTION__)); #ifdef CUSTOMER_HW bcm_wlan_power_on(2); #endif /* CUSTOMER_HW */ #ifdef CUSTOMER_HW2 wifi_set_power(1, 0); #endif WL_ERROR(("=========== WLAN going back to live ========\n")); break; case WLAN_POWER_OFF: WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n", __FUNCTION__)); #ifdef CUSTOMER_HW bcm_wlan_power_off(1); #endif /* CUSTOMER_HW */ break; case WLAN_POWER_ON: WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n", __FUNCTION__)); #ifdef CUSTOMER_HW bcm_wlan_power_on(1); /* Lets customer power to get stable */ OSL_DELAY(200); #endif /* CUSTOMER_HW */ break; } } #ifdef GET_CUSTOM_MAC_ENABLE /* Function to get custom MAC address */ int dhd_custom_get_mac_address(unsigned char *buf) { int ret = 0; WL_TRACE(("%s Enter\n", __FUNCTION__)); if (!buf) return -EINVAL; /* Customer access to MAC address stored outside of DHD driver */ #if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) ret = wifi_get_mac_addr(buf); #endif #ifdef EXAMPLE_GET_MAC /* EXAMPLE code */ { struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); } #endif /* EXAMPLE_GET_MAC */ return ret; } #endif /* GET_CUSTOM_MAC_ENABLE */ /* Customized Locale table : OPTIONAL feature */ const struct cntry_locales_custom translate_custom_table[] = { /* Table should be filled out based on custom platform regulatory requirement */ #ifdef EXAMPLE_TABLE {"", "XY", 4}, /* Universal if Country code is unknown or empty */ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ {"AT", "EU", 5}, {"BE", "EU", 5}, {"BG", "EU", 5}, {"CY", "EU", 5}, {"CZ", "EU", 5}, {"DK", "EU", 5}, {"EE", "EU", 5}, {"FI", "EU", 5}, {"FR", "EU", 5}, {"DE", "EU", 5}, {"GR", "EU", 5}, {"HU", "EU", 5}, {"IE", "EU", 5}, {"IT", "EU", 5}, {"LV", "EU", 5}, {"LI", "EU", 5}, {"LT", "EU", 5}, {"LU", "EU", 5}, {"MT", "EU", 5}, {"NL", "EU", 5}, {"PL", "EU", 5}, {"PT", "EU", 5}, {"RO", "EU", 5}, {"SK", "EU", 5}, {"SI", "EU", 5}, {"ES", "EU", 5}, {"SE", "EU", 5}, {"GB", "EU", 5}, {"KR", "XY", 3}, {"AU", "XY", 3}, {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ {"TW", "XY", 3}, {"AR", "XY", 3}, {"MX", "XY", 3}, {"IL", "IL", 0}, {"CH", "CH", 0}, {"TR", "TR", 0}, {"NO", "NO", 0}, #endif /* EXMAPLE_TABLE */ }; /* Customized Locale convertor * input : ISO 3166-1 country abbreviation * output: customized cspec */ void get_customized_country_code(char *country_iso_code, wl_country_t *cspec) { #if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) struct cntry_locales_custom *cloc_ptr; if (!cspec) return; cloc_ptr = wifi_get_country_code(country_iso_code); if (cloc_ptr) { strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = cloc_ptr->custom_locale_rev; } return; #else int size, i; size = ARRAYSIZE(translate_custom_table); if (cspec == 0) return; if (size == 0) return; for (i = 0; i < size; i++) { if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { memcpy(cspec->ccode, translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = translate_custom_table[i].custom_locale_rev; return; } } #ifdef EXAMPLE_TABLE /* if no country code matched return first universal code from translate_custom_table */ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = translate_custom_table[0].custom_locale_rev; #endif /* EXMAPLE_TABLE */ return; #endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ }
gpl-2.0
jigpu/input
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
786
1552
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "ram.h" int nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) { struct nvkm_device *device = fb->subdev.device; u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000; u32 cfg0 = nvkm_rd32(device, 0x100200); enum nvkm_ram_type type; if (cfg0 & 0x00000001) type = NVKM_RAM_TYPE_DDR1; else type = NVKM_RAM_TYPE_SDRAM; return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram); }
gpl-2.0
Multirom-mi4i/android_kernel_xiaomi_ferrari
arch/mips/mm/gup.c
2066
7697
/* * Lockless get_user_pages_fast for MIPS * * Copyright (C) 2008 Nick Piggin * Copyright (C) 2008 Novell Inc. * Copyright (C) 2011 Ralf Baechle */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/hugetlb.h> #include <asm/pgtable.h> static inline pte_t gup_get_pte(pte_t *ptep) { #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) pte_t pte; retry: pte.pte_low = ptep->pte_low; smp_rmb(); pte.pte_high = ptep->pte_high; smp_rmb(); if (unlikely(pte.pte_low != ptep->pte_low)) goto retry; return pte; #else return ACCESS_ONCE(*ptep); #endif } static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { pte_t *ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *page; if (!pte_present(pte) || pte_special(pte) || (write && !pte_write(pte))) { pte_unmap(ptep); return 0; } VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); pte_unmap(ptep - 1); return 1; } static inline void get_head_page_multiple(struct page *page, int nr) { VM_BUG_ON(page != compound_head(page)); VM_BUG_ON(page_count(page) == 0); atomic_add(nr, &page->_count); SetPageReferenced(page); } static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { pte_t pte = *(pte_t *)&pmd; struct page *head, *page; int refs; if (write && !pte_write(pte)) return 0; /* hugepages are never "special" */ VM_BUG_ON(pte_special(pte)); VM_BUG_ON(!pfn_valid(pte_pfn(pte))); refs = 0; head = pte_page(pte); page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; if (PageTail(page)) get_huge_page_tail(page); (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); get_head_page_multiple(head, refs); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = *pmdp; next = pmd_addr_end(addr, end); /* * The pmd_trans_splitting() check below explains why * pmdp_splitting_flush has to flush the tlb, to stop * this gup-fast code from running while we set the * splitting bit in the pmd. Returning zero will take * the slow path that will call wait_split_huge_page() * if the pmd is still in splitting state. gup-fast * can't because it has irq disabled and * wait_split_huge_page() would never return as the * tlb flush IPI wouldn't run. */ if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_huge(pmd))) { if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) return 0; } else { if (!gup_pte_range(pmd, addr, next, write, pages,nr)) return 0; } } while (pmdp++, addr = next, addr != end); return 1; } static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { pte_t pte = *(pte_t *)&pud; struct page *head, *page; int refs; if (write && !pte_write(pte)) return 0; /* hugepages are never "special" */ VM_BUG_ON(pte_special(pte)); VM_BUG_ON(!pfn_valid(pte_pfn(pte))); refs = 0; head = pte_page(pte); page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; if (PageTail(page)) get_huge_page_tail(page); (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); get_head_page_multiple(head, refs); return 1; } static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&pgd, addr); do { pud_t pud = *pudp; next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, addr, next, write, pages,nr)) return 0; } else { if (!gup_pmd_range(pud, addr, next, write, pages,nr)) return 0; } } while (pudp++, addr = next, addr != end); return 1; } /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; unsigned long flags; pgd_t *pgdp; int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) return 0; /* * XXX: batch / limit 'nr', to avoid large irq off latency * needs some instrumenting to determine the common sizes used by * important workloads (eg. DB2), and whether limiting the batch * size will decrease performance. * * It seems like we're in the clear for the moment. Direct-IO is * the main guy that batches up lots of get_user_pages, and even * they are limited to 64-at-a-time which is not so many. */ /* * This doesn't prevent pagetable teardown, but does prevent * the pagetables and pages from being freed. * * So long as we atomically load page table pointers versus teardown, * we can follow the address down to the page and take a ref on it. */ local_irq_save(flags); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = *pgdp; next = pgd_addr_end(addr, end); if (pgd_none(pgd)) break; if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; pgd_t *pgdp; int ret, nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (end < start) goto slow_irqon; /* XXX: batch / limit 'nr' */ local_irq_disable(); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = *pgdp; next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); local_irq_enable(); VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); return nr; slow: local_irq_enable(); slow_irqon: /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, start, (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); up_read(&mm->mmap_sem); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } return ret; }
gpl-2.0
aeroevan/android_kernel_moto_shamu
drivers/hwmon/twl4030-madc-hwmon.c
2834
4687
/* * * TWL4030 MADC Hwmon driver-This driver monitors the real time * conversion of analog signals like battery temperature, * battery type, battery level etc. User can ask for the conversion on a * particular channel using the sysfs nodes. * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * J Keerthy <j-keerthy@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c/twl.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/i2c/twl4030-madc.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/stddef.h> #include <linux/sysfs.h> #include <linux/err.h> #include <linux/types.h> /* * sysfs hook function */ static ssize_t madc_read(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct twl4030_madc_request req = { .channels = 1 << attr->index, .method = TWL4030_MADC_SW2, .type = TWL4030_MADC_WAIT, }; long val; val = twl4030_madc_conversion(&req); if (val < 0) return val; return sprintf(buf, "%d\n", req.rbuf[attr->index]); } /* sysfs nodes to read individual channels from user side */ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, madc_read, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, madc_read, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, madc_read, NULL, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, madc_read, NULL, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, madc_read, NULL, 4); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, madc_read, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, madc_read, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, madc_read, NULL, 7); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, madc_read, NULL, 8); static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, madc_read, NULL, 9); static SENSOR_DEVICE_ATTR(curr10_input, S_IRUGO, madc_read, NULL, 10); static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, madc_read, NULL, 11); static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, madc_read, NULL, 12); static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, madc_read, NULL, 15); static struct attribute *twl4030_madc_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_curr10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_in12_input.dev_attr.attr, &sensor_dev_attr_in15_input.dev_attr.attr, NULL }; static const struct attribute_group twl4030_madc_group = { .attrs = twl4030_madc_attributes, }; static int twl4030_madc_hwmon_probe(struct platform_device *pdev) { int ret; struct device *hwmon; ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group); if (ret) goto err_sysfs; hwmon = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon)) { dev_err(&pdev->dev, "hwmon_device_register failed.\n"); ret = PTR_ERR(hwmon); goto err_reg; } return 0; err_reg: sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group); err_sysfs: return ret; } static int twl4030_madc_hwmon_remove(struct platform_device *pdev) { hwmon_device_unregister(&pdev->dev); sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group); return 0; } static struct platform_driver twl4030_madc_hwmon_driver = { .probe = twl4030_madc_hwmon_probe, .remove = twl4030_madc_hwmon_remove, .driver = { .name = "twl4030_madc_hwmon", .owner = THIS_MODULE, }, }; module_platform_driver(twl4030_madc_hwmon_driver); MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("J Keerthy"); MODULE_ALIAS("platform:twl4030_madc_hwmon");
gpl-2.0
mifl/android_kernel_pantech_ef34k
fs/xfs/linux-2.6/xfs_quotaops.c
2834
3151
/* * Copyright (c) 2008, Christoph Hellwig * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_sb.h" #include "xfs_inum.h" #include "xfs_log.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_quota.h" #include "xfs_trans.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "quota/xfs_qm.h" #include <linux/quota.h> STATIC int xfs_quota_type(int type) { switch (type) { case USRQUOTA: return XFS_DQ_USER; case GRPQUOTA: return XFS_DQ_GROUP; default: return XFS_DQ_PROJ; } } STATIC int xfs_fs_get_xstate( struct super_block *sb, struct fs_quota_stat *fqs) { struct xfs_mount *mp = XFS_M(sb); if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; return -xfs_qm_scall_getqstat(mp, fqs); } STATIC int xfs_fs_set_xstate( struct super_block *sb, unsigned int uflags, int op) { struct xfs_mount *mp = XFS_M(sb); unsigned int flags = 0; if (sb->s_flags & MS_RDONLY) return -EROFS; if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; if (uflags & FS_QUOTA_UDQ_ACCT) flags |= XFS_UQUOTA_ACCT; if (uflags & FS_QUOTA_PDQ_ACCT) flags |= XFS_PQUOTA_ACCT; if (uflags & FS_QUOTA_GDQ_ACCT) flags |= XFS_GQUOTA_ACCT; if (uflags & FS_QUOTA_UDQ_ENFD) flags |= XFS_UQUOTA_ENFD; if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD)) flags |= XFS_OQUOTA_ENFD; switch (op) { case Q_XQUOTAON: return -xfs_qm_scall_quotaon(mp, flags); case Q_XQUOTAOFF: if (!XFS_IS_QUOTA_ON(mp)) return -EINVAL; return -xfs_qm_scall_quotaoff(mp, flags); case Q_XQUOTARM: if (XFS_IS_QUOTA_ON(mp)) return -EINVAL; return -xfs_qm_scall_trunc_qfiles(mp, flags); } return -EINVAL; } STATIC int xfs_fs_get_dqblk( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { struct xfs_mount *mp = XFS_M(sb); if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; if (!XFS_IS_QUOTA_ON(mp)) return -ESRCH; return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq); } STATIC int xfs_fs_set_dqblk( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { struct xfs_mount *mp = XFS_M(sb); if (sb->s_flags & MS_RDONLY) return -EROFS; if (!XFS_IS_QUOTA_RUNNING(mp)) return -ENOSYS; if (!XFS_IS_QUOTA_ON(mp)) return -ESRCH; return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); } const struct quotactl_ops xfs_quotactl_operations = { .get_xstate = xfs_fs_get_xstate, .set_xstate = xfs_fs_set_xstate, .get_dqblk = xfs_fs_get_dqblk, .set_dqblk = xfs_fs_set_dqblk, };
gpl-2.0
mkasick/android_kernel_samsung_d2vzw
drivers/media/dvb/ttpci/av7110_v4l.c
3090
28008
/* * av7110_v4l.c: av7110 video4linux interface for DVB and Siemens DVB-C analog module * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * originally based on code by: * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * the project's page is at http://www.linuxtv.org/ */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/timer.h> #include <linux/poll.h> #include "av7110.h" #include "av7110_hw.h" #include "av7110_av.h" int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val) { u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff }; struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg }; switch (av7110->adac_type) { case DVB_ADAC_MSP34x0: msgs.addr = 0x40; break; case DVB_ADAC_MSP34x5: msgs.addr = 0x42; break; default: return 0; } if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) { dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n", av7110->dvb_adapter.num, reg, val); return -EIO; } return 0; } static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val) { u8 msg1[3] = { dev, reg >> 8, reg & 0xff }; u8 msg2[2]; struct i2c_msg msgs[2] = { { .flags = 0 , .len = 3, .buf = msg1 }, { .flags = I2C_M_RD, .len = 2, .buf = msg2 } }; switch (av7110->adac_type) { case DVB_ADAC_MSP34x0: msgs[0].addr = 0x40; msgs[1].addr = 0x40; break; case DVB_ADAC_MSP34x5: msgs[0].addr = 0x42; msgs[1].addr = 0x42; break; default: return 0; } if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) { dprintk(1, "dvb-ttpci: failed @ card %d, %u\n", av7110->dvb_adapter.num, reg); return -EIO; } *val = (msg2[0] << 8) | msg2[1]; return 0; } static struct v4l2_input inputs[4] = { { .index = 0, .name = "DVB", .type = V4L2_INPUT_TYPE_CAMERA, .audioset = 1, .tuner = 0, /* ignored */ .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, }, { .index = 1, .name = "Television", .type = V4L2_INPUT_TYPE_TUNER, .audioset = 2, .tuner = 0, .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, }, { .index = 2, .name = "Video", .type = V4L2_INPUT_TYPE_CAMERA, .audioset = 0, .tuner = 0, .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, }, { .index = 3, .name = "Y/C", .type = V4L2_INPUT_TYPE_CAMERA, .audioset = 0, .tuner = 0, .std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, .status = 0, .capabilities = V4L2_IN_CAP_STD, } }; static int ves1820_writereg(struct saa7146_dev *dev, u8 addr, u8 reg, u8 data) { struct av7110 *av7110 = dev->ext_priv; u8 buf[] = { 0x00, reg, data }; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 }; dprintk(4, "dev: %p\n", dev); if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1)) return -1; return 0; } static int tuner_write(struct saa7146_dev *dev, u8 addr, u8 data [4]) { struct av7110 *av7110 = dev->ext_priv; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = data, .len = 4 }; dprintk(4, "dev: %p\n", dev); if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1)) return -1; return 0; } static int ves1820_set_tv_freq(struct saa7146_dev *dev, u32 freq) { u32 div; u8 config; u8 buf[4]; dprintk(4, "freq: 0x%08x\n", freq); /* magic number: 614. tuning with the frequency given by v4l2 is always off by 614*62.5 = 38375 kHz...*/ div = freq + 614; buf[0] = (div >> 8) & 0x7f; buf[1] = div & 0xff; buf[2] = 0x8e; if (freq < (u32) (16 * 168.25)) config = 0xa0; else if (freq < (u32) (16 * 447.25)) config = 0x90; else config = 0x30; config &= ~0x02; buf[3] = config; return tuner_write(dev, 0x61, buf); } static int stv0297_set_tv_freq(struct saa7146_dev *dev, u32 freq) { struct av7110 *av7110 = (struct av7110*)dev->ext_priv; u32 div; u8 data[4]; div = (freq + 38900000 + 31250) / 62500; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = 0xce; if (freq < 45000000) return -EINVAL; else if (freq < 137000000) data[3] = 0x01; else if (freq < 403000000) data[3] = 0x02; else if (freq < 860000000) data[3] = 0x04; else return -EINVAL; if (av7110->fe->ops.i2c_gate_ctrl) av7110->fe->ops.i2c_gate_ctrl(av7110->fe, 1); return tuner_write(dev, 0x63, data); } static struct saa7146_standard analog_standard[]; static struct saa7146_standard dvb_standard[]; static struct saa7146_standard standard[]; static struct v4l2_audio msp3400_v4l2_audio = { .index = 0, .name = "Television", .capability = V4L2_AUDCAP_STEREO }; static int av7110_dvb_c_switch(struct saa7146_fh *fh) { struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; struct av7110 *av7110 = (struct av7110*)dev->ext_priv; u16 adswitch; int source, sync, err; dprintk(4, "%p\n", av7110); if ((vv->video_status & STATUS_OVERLAY) != 0) { vv->ov_suspend = vv->video_fh; err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */ if (err != 0) { dprintk(2, "suspending video failed\n"); vv->ov_suspend = NULL; } } if (0 != av7110->current_input) { dprintk(1, "switching to analog TV:\n"); adswitch = 1; source = SAA7146_HPS_SOURCE_PORT_B; sync = SAA7146_HPS_SYNC_PORT_B; memcpy(standard, analog_standard, sizeof(struct saa7146_standard) * 2); switch (av7110->current_input) { case 1: dprintk(1, "switching SAA7113 to Analog Tuner Input.\n"); msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0000); // loudspeaker source msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0000); // headphone source msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0000); // SCART 1 source msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); // loudspeaker + headphone msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); // SCART 1 volume if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) { if (ves1820_writereg(dev, 0x09, 0x0f, 0x60)) dprintk(1, "setting band in demodulator failed.\n"); } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD) saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF) } if (i2c_writereg(av7110, 0x48, 0x02, 0xd0) != 1) dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); break; case 2: dprintk(1, "switching SAA7113 to Video AV CVBS Input.\n"); if (i2c_writereg(av7110, 0x48, 0x02, 0xd2) != 1) dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); break; case 3: dprintk(1, "switching SAA7113 to Video AV Y/C Input.\n"); if (i2c_writereg(av7110, 0x48, 0x02, 0xd9) != 1) dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num); break; default: dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input.\n"); } } else { adswitch = 0; source = SAA7146_HPS_SOURCE_PORT_A; sync = SAA7146_HPS_SYNC_PORT_A; memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2); dprintk(1, "switching DVB mode\n"); msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) { if (ves1820_writereg(dev, 0x09, 0x0f, 0x20)) dprintk(1, "setting band in demodulator failed.\n"); } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) } } /* hmm, this does not do anything!? */ if (av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, adswitch)) dprintk(1, "ADSwitch error\n"); saa7146_set_hps_source_and_sync(dev, source, sync); if (vv->ov_suspend != NULL) { saa7146_start_preview(vv->ov_suspend); vv->ov_suspend = NULL; } return 0; } static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; u16 stereo_det; s8 stereo; dprintk(2, "VIDIOC_G_TUNER: %d\n", t->index); if (!av7110->analog_tuner_flags || t->index != 0) return -EINVAL; memset(t, 0, sizeof(*t)); strcpy((char *)t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; t->rangelow = 772; /* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */ t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */ /* FIXME: add the real signal strength here */ t->signal = 0xffff; t->afc = 0; /* FIXME: standard / stereo detection is still broken */ msp_readreg(av7110, MSP_RD_DEM, 0x007e, &stereo_det); dprintk(1, "VIDIOC_G_TUNER: msp3400 TV standard detection: 0x%04x\n", stereo_det); msp_readreg(av7110, MSP_RD_DSP, 0x0018, &stereo_det); dprintk(1, "VIDIOC_G_TUNER: msp3400 stereo detection: 0x%04x\n", stereo_det); stereo = (s8)(stereo_det >> 8); if (stereo > 0x10) { /* stereo */ t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; t->audmode = V4L2_TUNER_MODE_STEREO; } else if (stereo < -0x10) { /* bilingual */ t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; t->audmode = V4L2_TUNER_MODE_LANG1; } else /* mono */ t->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; } static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *t) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; u16 fm_matrix, src; dprintk(2, "VIDIOC_S_TUNER: %d\n", t->index); if (!av7110->analog_tuner_flags || av7110->current_input != 1) return -EINVAL; switch (t->audmode) { case V4L2_TUNER_MODE_STEREO: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_STEREO\n"); fm_matrix = 0x3001; /* stereo */ src = 0x0020; break; case V4L2_TUNER_MODE_LANG1_LANG2: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1_LANG2\n"); fm_matrix = 0x3000; /* bilingual */ src = 0x0020; break; case V4L2_TUNER_MODE_LANG1: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1\n"); fm_matrix = 0x3000; /* mono */ src = 0x0000; break; case V4L2_TUNER_MODE_LANG2: dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG2\n"); fm_matrix = 0x3000; /* mono */ src = 0x0010; break; default: /* case V4L2_TUNER_MODE_MONO: */ dprintk(2, "VIDIOC_S_TUNER: TDA9840_SET_MONO\n"); fm_matrix = 0x3000; /* mono */ src = 0x0030; break; } msp_writereg(av7110, MSP_WR_DSP, 0x000e, fm_matrix); msp_writereg(av7110, MSP_WR_DSP, 0x0008, src); msp_writereg(av7110, MSP_WR_DSP, 0x0009, src); msp_writereg(av7110, MSP_WR_DSP, 0x000a, src); return 0; } static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x.\n", f->frequency); if (!av7110->analog_tuner_flags || av7110->current_input != 1) return -EINVAL; memset(f, 0, sizeof(*f)); f->type = V4L2_TUNER_ANALOG_TV; f->frequency = av7110->current_freq; return 0; } static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x.\n", f->frequency); if (!av7110->analog_tuner_flags || av7110->current_input != 1) return -EINVAL; if (V4L2_TUNER_ANALOG_TV != f->type) return -EINVAL; msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0xffe0); /* fast mute */ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0xffe0); /* tune in desired frequency */ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) ves1820_set_tv_freq(dev, f->frequency); else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) stv0297_set_tv_freq(dev, f->frequency); av7110->current_freq = f->frequency; msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x003f); /* start stereo detection */ msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x0000); msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); /* loudspeaker + headphone */ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); /* SCART 1 volume */ return 0; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index); if (av7110->analog_tuner_flags) { if (i->index >= 4) return -EINVAL; } else { if (i->index != 0) return -EINVAL; } memcpy(i, &inputs[i->index], sizeof(struct v4l2_input)); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; *input = av7110->current_input; dprintk(2, "VIDIOC_G_INPUT: %d\n", *input); return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_S_INPUT: %d\n", input); if (!av7110->analog_tuner_flags) return 0; if (input >= 4) return -EINVAL; av7110->current_input = input; return av7110_dvb_c_switch(fh); } static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a) { dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index); if (a->index != 0) return -EINVAL; memcpy(a, &msp3400_v4l2_audio, sizeof(struct v4l2_audio)); return 0; } static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a) { dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index); return 0; } static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_G_SLICED_VBI_CAP\n"); if (cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) return -EINVAL; if (FW_VERSION(av7110->arm_app) >= 0x2623) { cap->service_set = V4L2_SLICED_WSS_625; cap->service_lines[0][23] = V4L2_SLICED_WSS_625; } return 0; } static int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_G_FMT:\n"); if (FW_VERSION(av7110->arm_app) < 0x2623) return -EINVAL; memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced); if (av7110->wssMode) { f->fmt.sliced.service_set = V4L2_SLICED_WSS_625; f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625; f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data); } return 0; } static int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *f) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct av7110 *av7110 = (struct av7110 *)dev->ext_priv; dprintk(2, "VIDIOC_S_FMT\n"); if (FW_VERSION(av7110->arm_app) < 0x2623) return -EINVAL; if (f->fmt.sliced.service_set != V4L2_SLICED_WSS_625 && f->fmt.sliced.service_lines[0][23] != V4L2_SLICED_WSS_625) { memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced)); /* WSS controlled by firmware */ av7110->wssMode = 0; av7110->wssData = 0; return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0); } else { memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced)); f->fmt.sliced.service_set = V4L2_SLICED_WSS_625; f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625; f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data); /* WSS controlled by userspace */ av7110->wssMode = 1; av7110->wssData = 0; } return 0; } static int av7110_vbi_reset(struct file *file) { struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct av7110 *av7110 = (struct av7110*) dev->ext_priv; dprintk(2, "%s\n", __func__); av7110->wssMode = 0; av7110->wssData = 0; if (FW_VERSION(av7110->arm_app) < 0x2623) return 0; else return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0); } static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct av7110 *av7110 = (struct av7110*) dev->ext_priv; struct v4l2_sliced_vbi_data d; int rc; dprintk(2, "%s\n", __func__); if (FW_VERSION(av7110->arm_app) < 0x2623 || !av7110->wssMode || count != sizeof d) return -EINVAL; if (copy_from_user(&d, data, count)) return -EFAULT; if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23) return -EINVAL; if (d.id) av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0]; else av7110->wssData = 0x8000; rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 1, av7110->wssData); return (rc < 0) ? rc : count; } /**************************************************************************** * INITIALIZATION ****************************************************************************/ static u8 saa7113_init_regs[] = { 0x02, 0xd0, 0x03, 0x23, 0x04, 0x00, 0x05, 0x00, 0x06, 0xe9, 0x07, 0x0d, 0x08, 0x98, 0x09, 0x02, 0x0a, 0x80, 0x0b, 0x40, 0x0c, 0x40, 0x0d, 0x00, 0x0e, 0x01, 0x0f, 0x7c, 0x10, 0x48, 0x11, 0x0c, 0x12, 0x8b, 0x13, 0x1a, 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00, 0x18, 0x00, 0x19, 0x00, 0x1a, 0x00, 0x1b, 0x00, 0x1c, 0x00, 0x1d, 0x00, 0x1e, 0x00, 0x41, 0x77, 0x42, 0x77, 0x43, 0x77, 0x44, 0x77, 0x45, 0x77, 0x46, 0x77, 0x47, 0x77, 0x48, 0x77, 0x49, 0x77, 0x4a, 0x77, 0x4b, 0x77, 0x4c, 0x77, 0x4d, 0x77, 0x4e, 0x77, 0x4f, 0x77, 0x50, 0x77, 0x51, 0x77, 0x52, 0x77, 0x53, 0x77, 0x54, 0x77, 0x55, 0x77, 0x56, 0x77, 0x57, 0xff, 0xff }; static struct saa7146_ext_vv av7110_vv_data_st; static struct saa7146_ext_vv av7110_vv_data_c; int av7110_init_analog_module(struct av7110 *av7110) { u16 version1, version2; if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 && i2c_writereg(av7110, 0x80, 0x0, 0) == 1) { printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n", av7110->dvb_adapter.num); av7110->adac_type = DVB_ADAC_MSP34x0; } else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 && i2c_writereg(av7110, 0x84, 0x0, 0) == 1) { printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3415\n", av7110->dvb_adapter.num); av7110->adac_type = DVB_ADAC_MSP34x5; } else return -ENODEV; msleep(100); // the probing above resets the msp... msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1); msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2); dprintk(1, "dvb-ttpci: @ card %d MSP34xx version 0x%04x 0x%04x\n", av7110->dvb_adapter.num, version1, version2); msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00); msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) { INFO(("saa7113 not accessible.\n")); } else { u8 *i = saa7113_init_regs; if ((av7110->dev->pci->subsystem_vendor == 0x110a) && (av7110->dev->pci->subsystem_device == 0x0000)) { /* Fujitsu/Siemens DVB-Cable */ av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820; } else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x0002)) { /* Hauppauge/TT DVB-C premium */ av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820; } else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x000A)) { /* Hauppauge/TT DVB-C premium */ av7110->analog_tuner_flags |= ANALOG_TUNER_STV0297; } /* setup for DVB by default */ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) { if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20)) dprintk(1, "setting band in demodulator failed.\n"); } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) { saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD) saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF) } /* init the saa7113 */ while (*i != 0xff) { if (i2c_writereg(av7110, 0x48, i[0], i[1]) != 1) { dprintk(1, "saa7113 initialization failed @ card %d", av7110->dvb_adapter.num); break; } i += 2; } /* setup msp for analog sound: B/G Dual-FM */ msp_writereg(av7110, MSP_WR_DEM, 0x00bb, 0x02d0); // AD_CV msp_writereg(av7110, MSP_WR_DEM, 0x0001, 3); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 18); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 27); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 48); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 66); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0001, 72); // FIR1 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 4); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 64); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 0); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 3); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 18); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 27); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 48); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 66); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0005, 72); // FIR2 msp_writereg(av7110, MSP_WR_DEM, 0x0083, 0xa000); // MODE_REG msp_writereg(av7110, MSP_WR_DEM, 0x0093, 0x00aa); // DCO1_LO 5.74MHz msp_writereg(av7110, MSP_WR_DEM, 0x009b, 0x04fc); // DCO1_HI msp_writereg(av7110, MSP_WR_DEM, 0x00a3, 0x038e); // DCO2_LO 5.5MHz msp_writereg(av7110, MSP_WR_DEM, 0x00ab, 0x04c6); // DCO2_HI msp_writereg(av7110, MSP_WR_DEM, 0x0056, 0); // LOAD_REG 1/2 } memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2); /* set dd1 stream a & b */ saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000); saa7146_write(av7110->dev, DD1_INIT, 0x03000700); saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); return 0; } int av7110_init_v4l(struct av7110 *av7110) { struct saa7146_dev* dev = av7110->dev; struct saa7146_ext_vv *vv_data; int ret; /* special case DVB-C: these cards have an analog tuner plus need some special handling, so we have separate saa7146_ext_vv data for these... */ if (av7110->analog_tuner_flags) vv_data = &av7110_vv_data_c; else vv_data = &av7110_vv_data_st; ret = saa7146_vv_init(dev, vv_data); if (ret) { ERR(("cannot init capture device. skipping.\n")); return -ENODEV; } vv_data->ops.vidioc_enum_input = vidioc_enum_input; vv_data->ops.vidioc_g_input = vidioc_g_input; vv_data->ops.vidioc_s_input = vidioc_s_input; vv_data->ops.vidioc_g_tuner = vidioc_g_tuner; vv_data->ops.vidioc_s_tuner = vidioc_s_tuner; vv_data->ops.vidioc_g_frequency = vidioc_g_frequency; vv_data->ops.vidioc_s_frequency = vidioc_s_frequency; vv_data->ops.vidioc_g_audio = vidioc_g_audio; vv_data->ops.vidioc_s_audio = vidioc_s_audio; vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap; vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out; vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out; if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) { ERR(("cannot register capture device. skipping.\n")); saa7146_vv_release(dev); return -ENODEV; } if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI)) ERR(("cannot register vbi v4l2 device. skipping.\n")); return 0; } int av7110_exit_v4l(struct av7110 *av7110) { struct saa7146_dev* dev = av7110->dev; saa7146_unregister_device(&av7110->v4l_dev, av7110->dev); saa7146_unregister_device(&av7110->vbi_dev, av7110->dev); saa7146_vv_release(dev); return 0; } /* FIXME: these values are experimental values that look better than the values from the latest "official" driver -- at least for me... (MiHu) */ static struct saa7146_standard standard[] = { { .name = "PAL", .id = V4L2_STD_PAL_BG, .v_offset = 0x15, .v_field = 288, .h_offset = 0x48, .h_pixels = 708, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x10, .v_field = 244, .h_offset = 0x40, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, } }; static struct saa7146_standard analog_standard[] = { { .name = "PAL", .id = V4L2_STD_PAL_BG, .v_offset = 0x1b, .v_field = 288, .h_offset = 0x08, .h_pixels = 708, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x10, .v_field = 244, .h_offset = 0x40, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, } }; static struct saa7146_standard dvb_standard[] = { { .name = "PAL", .id = V4L2_STD_PAL_BG, .v_offset = 0x14, .v_field = 288, .h_offset = 0x48, .h_pixels = 708, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 0x10, .v_field = 244, .h_offset = 0x40, .h_pixels = 708, .v_max_out = 480, .h_max_out = 640, } }; static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std) { struct av7110 *av7110 = (struct av7110*) dev->ext_priv; if (std->id & V4L2_STD_PAL) { av7110->vidmode = AV7110_VIDEO_MODE_PAL; av7110_set_vidmode(av7110, av7110->vidmode); } else if (std->id & V4L2_STD_NTSC) { av7110->vidmode = AV7110_VIDEO_MODE_NTSC; av7110_set_vidmode(av7110, av7110->vidmode); } else return -1; return 0; } static struct saa7146_ext_vv av7110_vv_data_st = { .inputs = 1, .audios = 1, .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT, .flags = 0, .stds = &standard[0], .num_stds = ARRAY_SIZE(standard), .std_callback = &std_callback, .vbi_fops.open = av7110_vbi_reset, .vbi_fops.release = av7110_vbi_reset, .vbi_fops.write = av7110_vbi_write, }; static struct saa7146_ext_vv av7110_vv_data_c = { .inputs = 1, .audios = 1, .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT, .flags = SAA7146_USE_PORT_B_FOR_VBI, .stds = &standard[0], .num_stds = ARRAY_SIZE(standard), .std_callback = &std_callback, .vbi_fops.open = av7110_vbi_reset, .vbi_fops.release = av7110_vbi_reset, .vbi_fops.write = av7110_vbi_write, };
gpl-2.0
vic-nation/kernel_goghvmu
drivers/macintosh/windfarm_pm121.c
3346
25397
/* * Windfarm PowerMac thermal control. iMac G5 iSight * * (c) Copyright 2007 Étienne Bersac <bersace@gmail.com> * * Bits & pieces from windfarm_pm81.c by (c) Copyright 2005 Benjamin * Herrenschmidt, IBM Corp. <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. * * * * PowerMac12,1 * ============ * * * The algorithm used is the PID control algorithm, used the same way * the published Darwin code does, using the same values that are * present in the Darwin 8.10 snapshot property lists (note however * that none of the code has been re-used, it's a complete * re-implementation * * There is two models using PowerMac12,1. Model 2 is iMac G5 iSight * 17" while Model 3 is iMac G5 20". They do have both the same * controls with a tiny difference. The control-ids of hard-drive-fan * and cpu-fan is swapped. * * * Target Correction : * * controls have a target correction calculated as : * * new_min = ((((average_power * slope) >> 16) + offset) >> 16) + min_value * new_value = max(new_value, max(new_min, 0)) * * OD Fan control correction. * * # model_id: 2 * offset : -19563152 * slope : 1956315 * * # model_id: 3 * offset : -15650652 * slope : 1565065 * * HD Fan control correction. * * # model_id: 2 * offset : -15650652 * slope : 1565065 * * # model_id: 3 * offset : -19563152 * slope : 1956315 * * CPU Fan control correction. * * # model_id: 2 * offset : -25431900 * slope : 2543190 * * # model_id: 3 * offset : -15650652 * slope : 1565065 * * * Target rubber-banding : * * Some controls have a target correction which depends on another * control value. The correction is computed in the following way : * * new_min = ref_value * slope + offset * * ref_value is the value of the reference control. If new_min is * greater than 0, then we correct the target value using : * * new_target = max (new_target, new_min >> 16) * * * # model_id : 2 * control : cpu-fan * ref : optical-drive-fan * offset : -15650652 * slope : 1565065 * * # model_id : 3 * control : optical-drive-fan * ref : hard-drive-fan * offset : -32768000 * slope : 65536 * * * In order to have the moste efficient correction with those * dependencies, we must trigger HD loop before OD loop before CPU * loop. * * * The various control loops found in Darwin config file are: * * HD Fan control loop. * * # model_id: 2 * control : hard-drive-fan * sensor : hard-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x002D70A3 * G_r = 0x00019999 * History = 2 entries * Input target = 0x370000 * Interval = 5s * * # model_id: 3 * control : hard-drive-fan * sensor : hard-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x002170A3 * G_r = 0x00019999 * History = 2 entries * Input target = 0x370000 * Interval = 5s * * OD Fan control loop. * * # model_id: 2 * control : optical-drive-fan * sensor : optical-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x001FAE14 * G_r = 0x00019999 * History = 2 entries * Input target = 0x320000 * Interval = 5s * * # model_id: 3 * control : optical-drive-fan * sensor : optical-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x001FAE14 * G_r = 0x00019999 * History = 2 entries * Input target = 0x320000 * Interval = 5s * * GPU Fan control loop. * * # model_id: 2 * control : hard-drive-fan * sensor : gpu-temp * PID params : G_d = 0x00000000 * G_p = 0x002A6666 * G_r = 0x00019999 * History = 2 entries * Input target = 0x5A0000 * Interval = 5s * * # model_id: 3 * control : cpu-fan * sensor : gpu-temp * PID params : G_d = 0x00000000 * G_p = 0x0010CCCC * G_r = 0x00019999 * History = 2 entries * Input target = 0x500000 * Interval = 5s * * KODIAK (aka northbridge) Fan control loop. * * # model_id: 2 * control : optical-drive-fan * sensor : north-bridge-temp * PID params : G_d = 0x00000000 * G_p = 0x003BD70A * G_r = 0x00019999 * History = 2 entries * Input target = 0x550000 * Interval = 5s * * # model_id: 3 * control : hard-drive-fan * sensor : north-bridge-temp * PID params : G_d = 0x00000000 * G_p = 0x0030F5C2 * G_r = 0x00019999 * History = 2 entries * Input target = 0x550000 * Interval = 5s * * CPU Fan control loop. * * control : cpu-fan * sensors : cpu-temp, cpu-power * PID params : from SDB partition * * * CPU Slew control loop. * * control : cpufreq-clamp * sensor : cpu-temp * */ #undef DEBUG #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> #include <asm/smu.h> #include "windfarm.h" #include "windfarm_pid.h" #define VERSION "0.3" static int pm121_mach_model; /* machine model id */ /* Controls & sensors */ static struct wf_sensor *sensor_cpu_power; static struct wf_sensor *sensor_cpu_temp; static struct wf_sensor *sensor_cpu_voltage; static struct wf_sensor *sensor_cpu_current; static struct wf_sensor *sensor_gpu_temp; static struct wf_sensor *sensor_north_bridge_temp; static struct wf_sensor *sensor_hard_drive_temp; static struct wf_sensor *sensor_optical_drive_temp; static struct wf_sensor *sensor_incoming_air_temp; /* unused ! */ enum { FAN_CPU, FAN_HD, FAN_OD, CPUFREQ, N_CONTROLS }; static struct wf_control *controls[N_CONTROLS] = {}; /* Set to kick the control loop into life */ static int pm121_all_controls_ok, pm121_all_sensors_ok, pm121_started; enum { FAILURE_FAN = 1 << 0, FAILURE_SENSOR = 1 << 1, FAILURE_OVERTEMP = 1 << 2 }; /* All sys loops. Note the HD before the OD loop in order to have it run before. */ enum { LOOP_GPU, /* control = hd or cpu, but luckily, it doesn't matter */ LOOP_HD, /* control = hd */ LOOP_KODIAK, /* control = hd or od */ LOOP_OD, /* control = od */ N_LOOPS }; static const char *loop_names[N_LOOPS] = { "GPU", "HD", "KODIAK", "OD", }; #define PM121_NUM_CONFIGS 2 static unsigned int pm121_failure_state; static int pm121_readjust, pm121_skipping; static s32 average_power; struct pm121_correction { int offset; int slope; }; static struct pm121_correction corrections[N_CONTROLS][PM121_NUM_CONFIGS] = { /* FAN_OD */ { /* MODEL 2 */ { .offset = -19563152, .slope = 1956315 }, /* MODEL 3 */ { .offset = -15650652, .slope = 1565065 }, }, /* FAN_HD */ { /* MODEL 2 */ { .offset = -15650652, .slope = 1565065 }, /* MODEL 3 */ { .offset = -19563152, .slope = 1956315 }, }, /* FAN_CPU */ { /* MODEL 2 */ { .offset = -25431900, .slope = 2543190 }, /* MODEL 3 */ { .offset = -15650652, .slope = 1565065 }, }, /* CPUFREQ has no correction (and is not implemented at all) */ }; struct pm121_connection { unsigned int control_id; unsigned int ref_id; struct pm121_correction correction; }; static struct pm121_connection pm121_connections[] = { /* MODEL 2 */ { .control_id = FAN_CPU, .ref_id = FAN_OD, { .offset = -32768000, .slope = 65536 } }, /* MODEL 3 */ { .control_id = FAN_OD, .ref_id = FAN_HD, { .offset = -32768000, .slope = 65536 } }, }; /* pointer to the current model connection */ static struct pm121_connection *pm121_connection; /* * ****** System Fans Control Loop ****** * */ /* Since each loop handles only one control and we want to avoid * writing virtual control, we store the control correction with the * loop params. Some data are not set, there are common to all loop * and thus, hardcoded. */ struct pm121_sys_param { /* purely informative since we use mach_model-2 as index */ int model_id; struct wf_sensor **sensor; /* use sensor_id instead ? */ s32 gp, itarget; unsigned int control_id; }; static struct pm121_sys_param pm121_sys_all_params[N_LOOPS][PM121_NUM_CONFIGS] = { /* GPU Fan control loop */ { { .model_id = 2, .sensor = &sensor_gpu_temp, .gp = 0x002A6666, .itarget = 0x5A0000, .control_id = FAN_HD, }, { .model_id = 3, .sensor = &sensor_gpu_temp, .gp = 0x0010CCCC, .itarget = 0x500000, .control_id = FAN_CPU, }, }, /* HD Fan control loop */ { { .model_id = 2, .sensor = &sensor_hard_drive_temp, .gp = 0x002D70A3, .itarget = 0x370000, .control_id = FAN_HD, }, { .model_id = 3, .sensor = &sensor_hard_drive_temp, .gp = 0x002170A3, .itarget = 0x370000, .control_id = FAN_HD, }, }, /* KODIAK Fan control loop */ { { .model_id = 2, .sensor = &sensor_north_bridge_temp, .gp = 0x003BD70A, .itarget = 0x550000, .control_id = FAN_OD, }, { .model_id = 3, .sensor = &sensor_north_bridge_temp, .gp = 0x0030F5C2, .itarget = 0x550000, .control_id = FAN_HD, }, }, /* OD Fan control loop */ { { .model_id = 2, .sensor = &sensor_optical_drive_temp, .gp = 0x001FAE14, .itarget = 0x320000, .control_id = FAN_OD, }, { .model_id = 3, .sensor = &sensor_optical_drive_temp, .gp = 0x001FAE14, .itarget = 0x320000, .control_id = FAN_OD, }, }, }; /* the hardcoded values */ #define PM121_SYS_GD 0x00000000 #define PM121_SYS_GR 0x00019999 #define PM121_SYS_HISTORY_SIZE 2 #define PM121_SYS_INTERVAL 5 /* State data used by the system fans control loop */ struct pm121_sys_state { int ticks; s32 setpoint; struct wf_pid_state pid; }; struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {}; /* * ****** CPU Fans Control Loop ****** * */ #define PM121_CPU_INTERVAL 1 /* State data used by the cpu fans control loop */ struct pm121_cpu_state { int ticks; s32 setpoint; struct wf_cpu_pid_state pid; }; static struct pm121_cpu_state *pm121_cpu_state; /* * ***** Implementation ***** * */ /* correction the value using the output-low-bound correction algo */ static s32 pm121_correct(s32 new_setpoint, unsigned int control_id, s32 min) { s32 new_min; struct pm121_correction *correction; correction = &corrections[control_id][pm121_mach_model - 2]; new_min = (average_power * correction->slope) >> 16; new_min += correction->offset; new_min = (new_min >> 16) + min; return max3(new_setpoint, new_min, 0); } static s32 pm121_connect(unsigned int control_id, s32 setpoint) { s32 new_min, value, new_setpoint; if (pm121_connection->control_id == control_id) { controls[control_id]->ops->get_value(controls[control_id], &value); new_min = value * pm121_connection->correction.slope; new_min += pm121_connection->correction.offset; if (new_min > 0) { new_setpoint = max(setpoint, (new_min >> 16)); if (new_setpoint != setpoint) { pr_debug("pm121: %s depending on %s, " "corrected from %d to %d RPM\n", controls[control_id]->name, controls[pm121_connection->ref_id]->name, (int) setpoint, (int) new_setpoint); } } else new_setpoint = setpoint; } /* no connection */ else new_setpoint = setpoint; return new_setpoint; } /* FAN LOOPS */ static void pm121_create_sys_fans(int loop_id) { struct pm121_sys_param *param = NULL; struct wf_pid_param pid_param; struct wf_control *control = NULL; int i; /* First, locate the params for this model */ for (i = 0; i < PM121_NUM_CONFIGS; i++) { if (pm121_sys_all_params[loop_id][i].model_id == pm121_mach_model) { param = &(pm121_sys_all_params[loop_id][i]); break; } } /* No params found, put fans to max */ if (param == NULL) { printk(KERN_WARNING "pm121: %s fan config not found " " for this machine model\n", loop_names[loop_id]); goto fail; } control = controls[param->control_id]; /* Alloc & initialize state */ pm121_sys_state[loop_id] = kmalloc(sizeof(struct pm121_sys_state), GFP_KERNEL); if (pm121_sys_state[loop_id] == NULL) { printk(KERN_WARNING "pm121: Memory allocation error\n"); goto fail; } pm121_sys_state[loop_id]->ticks = 1; /* Fill PID params */ pid_param.gd = PM121_SYS_GD; pid_param.gp = param->gp; pid_param.gr = PM121_SYS_GR; pid_param.interval = PM121_SYS_INTERVAL; pid_param.history_len = PM121_SYS_HISTORY_SIZE; pid_param.itarget = param->itarget; pid_param.min = control->ops->get_min(control); pid_param.max = control->ops->get_max(control); wf_pid_init(&pm121_sys_state[loop_id]->pid, &pid_param); pr_debug("pm121: %s Fan control loop initialized.\n" " itarged=%d.%03d, min=%d RPM, max=%d RPM\n", loop_names[loop_id], FIX32TOPRINT(pid_param.itarget), pid_param.min, pid_param.max); return; fail: /* note that this is not optimal since another loop may still control the same control */ printk(KERN_WARNING "pm121: failed to set up %s loop " "setting \"%s\" to max speed.\n", loop_names[loop_id], control->name); if (control) wf_control_set_max(control); } static void pm121_sys_fans_tick(int loop_id) { struct pm121_sys_param *param; struct pm121_sys_state *st; struct wf_sensor *sensor; struct wf_control *control; s32 temp, new_setpoint; int rc; param = &(pm121_sys_all_params[loop_id][pm121_mach_model-2]); st = pm121_sys_state[loop_id]; sensor = *(param->sensor); control = controls[param->control_id]; if (--st->ticks != 0) { if (pm121_readjust) goto readjust; return; } st->ticks = PM121_SYS_INTERVAL; rc = sensor->ops->get_value(sensor, &temp); if (rc) { printk(KERN_WARNING "windfarm: %s sensor error %d\n", sensor->name, rc); pm121_failure_state |= FAILURE_SENSOR; return; } pr_debug("pm121: %s Fan tick ! %s: %d.%03d\n", loop_names[loop_id], sensor->name, FIX32TOPRINT(temp)); new_setpoint = wf_pid_run(&st->pid, temp); /* correction */ new_setpoint = pm121_correct(new_setpoint, param->control_id, st->pid.param.min); /* linked corretion */ new_setpoint = pm121_connect(param->control_id, new_setpoint); if (new_setpoint == st->setpoint) return; st->setpoint = new_setpoint; pr_debug("pm121: %s corrected setpoint: %d RPM\n", control->name, (int)new_setpoint); readjust: if (control && pm121_failure_state == 0) { rc = control->ops->set_value(control, st->setpoint); if (rc) { printk(KERN_WARNING "windfarm: %s fan error %d\n", control->name, rc); pm121_failure_state |= FAILURE_FAN; } } } /* CPU LOOP */ static void pm121_create_cpu_fans(void) { struct wf_cpu_pid_param pid_param; const struct smu_sdbp_header *hdr; struct smu_sdbp_cpupiddata *piddata; struct smu_sdbp_fvt *fvt; struct wf_control *fan_cpu; s32 tmax, tdelta, maxpow, powadj; fan_cpu = controls[FAN_CPU]; /* First, locate the PID params in SMU SBD */ hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL); if (hdr == 0) { printk(KERN_WARNING "pm121: CPU PID fan config not found.\n"); goto fail; } piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; /* Get the FVT params for operating point 0 (the only supported one * for now) in order to get tmax */ hdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); if (hdr) { fvt = (struct smu_sdbp_fvt *)&hdr[1]; tmax = ((s32)fvt->maxtemp) << 16; } else tmax = 0x5e0000; /* 94 degree default */ /* Alloc & initialize state */ pm121_cpu_state = kmalloc(sizeof(struct pm121_cpu_state), GFP_KERNEL); if (pm121_cpu_state == NULL) goto fail; pm121_cpu_state->ticks = 1; /* Fill PID params */ pid_param.interval = PM121_CPU_INTERVAL; pid_param.history_len = piddata->history_len; if (pid_param.history_len > WF_CPU_PID_MAX_HISTORY) { printk(KERN_WARNING "pm121: History size overflow on " "CPU control loop (%d)\n", piddata->history_len); pid_param.history_len = WF_CPU_PID_MAX_HISTORY; } pid_param.gd = piddata->gd; pid_param.gp = piddata->gp; pid_param.gr = piddata->gr / pid_param.history_len; tdelta = ((s32)piddata->target_temp_delta) << 16; maxpow = ((s32)piddata->max_power) << 16; powadj = ((s32)piddata->power_adj) << 16; pid_param.tmax = tmax; pid_param.ttarget = tmax - tdelta; pid_param.pmaxadj = maxpow - powadj; pid_param.min = fan_cpu->ops->get_min(fan_cpu); pid_param.max = fan_cpu->ops->get_max(fan_cpu); wf_cpu_pid_init(&pm121_cpu_state->pid, &pid_param); pr_debug("pm121: CPU Fan control initialized.\n"); pr_debug(" ttarged=%d.%03d, tmax=%d.%03d, min=%d RPM, max=%d RPM,\n", FIX32TOPRINT(pid_param.ttarget), FIX32TOPRINT(pid_param.tmax), pid_param.min, pid_param.max); return; fail: printk(KERN_WARNING "pm121: CPU fan config not found, max fan speed\n"); if (controls[CPUFREQ]) wf_control_set_max(controls[CPUFREQ]); if (fan_cpu) wf_control_set_max(fan_cpu); } static void pm121_cpu_fans_tick(struct pm121_cpu_state *st) { s32 new_setpoint, temp, power; struct wf_control *fan_cpu = NULL; int rc; if (--st->ticks != 0) { if (pm121_readjust) goto readjust; return; } st->ticks = PM121_CPU_INTERVAL; fan_cpu = controls[FAN_CPU]; rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp); if (rc) { printk(KERN_WARNING "pm121: CPU temp sensor error %d\n", rc); pm121_failure_state |= FAILURE_SENSOR; return; } rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power); if (rc) { printk(KERN_WARNING "pm121: CPU power sensor error %d\n", rc); pm121_failure_state |= FAILURE_SENSOR; return; } pr_debug("pm121: CPU Fans tick ! CPU temp: %d.%03d°C, power: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(power)); if (temp > st->pid.param.tmax) pm121_failure_state |= FAILURE_OVERTEMP; new_setpoint = wf_cpu_pid_run(&st->pid, power, temp); /* correction */ new_setpoint = pm121_correct(new_setpoint, FAN_CPU, st->pid.param.min); /* connected correction */ new_setpoint = pm121_connect(FAN_CPU, new_setpoint); if (st->setpoint == new_setpoint) return; st->setpoint = new_setpoint; pr_debug("pm121: CPU corrected setpoint: %d RPM\n", (int)new_setpoint); readjust: if (fan_cpu && pm121_failure_state == 0) { rc = fan_cpu->ops->set_value(fan_cpu, st->setpoint); if (rc) { printk(KERN_WARNING "pm121: %s fan error %d\n", fan_cpu->name, rc); pm121_failure_state |= FAILURE_FAN; } } } /* * ****** Common ****** * */ static void pm121_tick(void) { unsigned int last_failure = pm121_failure_state; unsigned int new_failure; s32 total_power; int i; if (!pm121_started) { pr_debug("pm121: creating control loops !\n"); for (i = 0; i < N_LOOPS; i++) pm121_create_sys_fans(i); pm121_create_cpu_fans(); pm121_started = 1; } /* skipping ticks */ if (pm121_skipping && --pm121_skipping) return; /* compute average power */ total_power = 0; for (i = 0; i < pm121_cpu_state->pid.param.history_len; i++) total_power += pm121_cpu_state->pid.powers[i]; average_power = total_power / pm121_cpu_state->pid.param.history_len; pm121_failure_state = 0; for (i = 0 ; i < N_LOOPS; i++) { if (pm121_sys_state[i]) pm121_sys_fans_tick(i); } if (pm121_cpu_state) pm121_cpu_fans_tick(pm121_cpu_state); pm121_readjust = 0; new_failure = pm121_failure_state & ~last_failure; /* If entering failure mode, clamp cpufreq and ramp all * fans to full speed. */ if (pm121_failure_state && !last_failure) { for (i = 0; i < N_CONTROLS; i++) { if (controls[i]) wf_control_set_max(controls[i]); } } /* If leaving failure mode, unclamp cpufreq and readjust * all fans on next iteration */ if (!pm121_failure_state && last_failure) { if (controls[CPUFREQ]) wf_control_set_min(controls[CPUFREQ]); pm121_readjust = 1; } /* Overtemp condition detected, notify and start skipping a couple * ticks to let the temperature go down */ if (new_failure & FAILURE_OVERTEMP) { wf_set_overtemp(); pm121_skipping = 2; } /* We only clear the overtemp condition if overtemp is cleared * _and_ no other failure is present. Since a sensor error will * clear the overtemp condition (can't measure temperature) at * the control loop levels, but we don't want to keep it clear * here in this case */ if (new_failure == 0 && last_failure & FAILURE_OVERTEMP) wf_clear_overtemp(); } static struct wf_control* pm121_register_control(struct wf_control *ct, const char *match, unsigned int id) { if (controls[id] == NULL && !strcmp(ct->name, match)) { if (wf_get_control(ct) == 0) controls[id] = ct; } return controls[id]; } static void pm121_new_control(struct wf_control *ct) { int all = 1; if (pm121_all_controls_ok) return; all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all; all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all; all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all; all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all; if (all) pm121_all_controls_ok = 1; } static struct wf_sensor* pm121_register_sensor(struct wf_sensor *sensor, const char *match, struct wf_sensor **var) { if (*var == NULL && !strcmp(sensor->name, match)) { if (wf_get_sensor(sensor) == 0) *var = sensor; } return *var; } static void pm121_new_sensor(struct wf_sensor *sr) { int all = 1; if (pm121_all_sensors_ok) return; all = pm121_register_sensor(sr, "cpu-temp", &sensor_cpu_temp) && all; all = pm121_register_sensor(sr, "cpu-current", &sensor_cpu_current) && all; all = pm121_register_sensor(sr, "cpu-voltage", &sensor_cpu_voltage) && all; all = pm121_register_sensor(sr, "cpu-power", &sensor_cpu_power) && all; all = pm121_register_sensor(sr, "hard-drive-temp", &sensor_hard_drive_temp) && all; all = pm121_register_sensor(sr, "optical-drive-temp", &sensor_optical_drive_temp) && all; all = pm121_register_sensor(sr, "incoming-air-temp", &sensor_incoming_air_temp) && all; all = pm121_register_sensor(sr, "north-bridge-temp", &sensor_north_bridge_temp) && all; all = pm121_register_sensor(sr, "gpu-temp", &sensor_gpu_temp) && all; if (all) pm121_all_sensors_ok = 1; } static int pm121_notify(struct notifier_block *self, unsigned long event, void *data) { switch (event) { case WF_EVENT_NEW_CONTROL: pr_debug("pm121: new control %s detected\n", ((struct wf_control *)data)->name); pm121_new_control(data); break; case WF_EVENT_NEW_SENSOR: pr_debug("pm121: new sensor %s detected\n", ((struct wf_sensor *)data)->name); pm121_new_sensor(data); break; case WF_EVENT_TICK: if (pm121_all_controls_ok && pm121_all_sensors_ok) pm121_tick(); break; } return 0; } static struct notifier_block pm121_events = { .notifier_call = pm121_notify, }; static int pm121_init_pm(void) { const struct smu_sdbp_header *hdr; hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); if (hdr != 0) { struct smu_sdbp_sensortree *st = (struct smu_sdbp_sensortree *)&hdr[1]; pm121_mach_model = st->model_id; } pm121_connection = &pm121_connections[pm121_mach_model - 2]; printk(KERN_INFO "pm121: Initializing for iMac G5 iSight model ID %d\n", pm121_mach_model); return 0; } static int pm121_probe(struct platform_device *ddev) { wf_register_client(&pm121_events); return 0; } static int __devexit pm121_remove(struct platform_device *ddev) { wf_unregister_client(&pm121_events); return 0; } static struct platform_driver pm121_driver = { .probe = pm121_probe, .remove = __devexit_p(pm121_remove), .driver = { .name = "windfarm", .bus = &platform_bus_type, }, }; static int __init pm121_init(void) { int rc = -ENODEV; if (of_machine_is_compatible("PowerMac12,1")) rc = pm121_init_pm(); if (rc == 0) { request_module("windfarm_smu_controls"); request_module("windfarm_smu_sensors"); request_module("windfarm_smu_sat"); request_module("windfarm_lm75_sensor"); request_module("windfarm_max6690_sensor"); request_module("windfarm_cpufreq_clamp"); platform_driver_register(&pm121_driver); } return rc; } static void __exit pm121_exit(void) { platform_driver_unregister(&pm121_driver); } module_init(pm121_init); module_exit(pm121_exit); MODULE_AUTHOR("Étienne Bersac <bersace@gmail.com>"); MODULE_DESCRIPTION("Thermal control logic for iMac G5 (iSight)"); MODULE_LICENSE("GPL");
gpl-2.0
thewisenerd/android_kernel_xiaomi_armani
arch/um/drivers/ubd_kern.c
4626
34299
/* * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ /* 2001-09-28...2002-04-17 * Partition stuff by James_McMechan@hotmail.com * old style ubd by setting UBD_SHIFT to 0 * 2002-09-27...2002-10-18 massive tinkering for 2.5 * partitions have changed in 2.5 * 2003-01-29 more tinkering for 2.5.59-1 * This should now address the sysfs problems and has * the symlink for devfs to allow for booting with * the common /dev/ubd/discX/... names rather than * only /dev/ubdN/discN this version also has lots of * clean ups preparing for ubd-many. * James McMechan */ #define UBD_SHIFT 4 #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/cdrom.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <asm/tlbflush.h> #include "kern_util.h" #include "mconsole_kern.h" #include "init.h" #include "irq_kern.h" #include "ubd.h" #include "os.h" #include "cow.h" enum ubd_req { UBD_READ, UBD_WRITE }; struct io_thread_req { struct request *req; enum ubd_req op; int fds[2]; unsigned long offsets[2]; unsigned long long offset; unsigned long length; char *buffer; int sectorsize; unsigned long sector_mask; unsigned long long cow_offset; unsigned long bitmap_words[2]; int error; }; static inline int ubd_test_bit(__u64 bit, unsigned char *data) { __u64 n; int bits, off; bits = sizeof(data[0]) * 8; n = bit / bits; off = bit % bits; return (data[n] & (1 << off)) != 0; } static inline void ubd_set_bit(__u64 bit, unsigned char *data) { __u64 n; int bits, off; bits = sizeof(data[0]) * 8; n = bit / bits; off = bit % bits; data[n] |= (1 << off); } /*End stuff from ubd_user.h*/ #define DRIVER_NAME "uml-blkdev" static DEFINE_MUTEX(ubd_lock); static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */ static int ubd_open(struct block_device *bdev, fmode_t mode); static int ubd_release(struct gendisk *disk, fmode_t mode); static int ubd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); #define MAX_DEV (16) static const struct block_device_operations ubd_blops = { .owner = THIS_MODULE, .open = ubd_open, .release = ubd_release, .ioctl = ubd_ioctl, .getgeo = ubd_getgeo, }; /* Protected by ubd_lock */ static int fake_major = UBD_MAJOR; static struct gendisk *ubd_gendisk[MAX_DEV]; static struct gendisk *fake_gendisk[MAX_DEV]; #ifdef CONFIG_BLK_DEV_UBD_SYNC #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ .cl = 1 }) #else #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \ .cl = 1 }) #endif static struct openflags global_openflags = OPEN_FLAGS; struct cow { /* backing file name */ char *file; /* backing file fd */ int fd; unsigned long *bitmap; unsigned long bitmap_len; int bitmap_offset; int data_offset; }; #define MAX_SG 64 struct ubd { struct list_head restart; /* name (and fd, below) of the file opened for writing, either the * backing or the cow file. */ char *file; int count; int fd; __u64 size; struct openflags boot_openflags; struct openflags openflags; unsigned shared:1; unsigned no_cow:1; struct cow cow; struct platform_device pdev; struct request_queue *queue; spinlock_t lock; struct scatterlist sg[MAX_SG]; struct request *request; int start_sg, end_sg; sector_t rq_pos; }; #define DEFAULT_COW { \ .file = NULL, \ .fd = -1, \ .bitmap = NULL, \ .bitmap_offset = 0, \ .data_offset = 0, \ } #define DEFAULT_UBD { \ .file = NULL, \ .count = 0, \ .fd = -1, \ .size = -1, \ .boot_openflags = OPEN_FLAGS, \ .openflags = OPEN_FLAGS, \ .no_cow = 0, \ .shared = 0, \ .cow = DEFAULT_COW, \ .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ .request = NULL, \ .start_sg = 0, \ .end_sg = 0, \ .rq_pos = 0, \ } /* Protected by ubd_lock */ static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD }; /* Only changed by fake_ide_setup which is a setup */ static int fake_ide = 0; static struct proc_dir_entry *proc_ide_root = NULL; static struct proc_dir_entry *proc_ide = NULL; static void make_proc_ide(void) { proc_ide_root = proc_mkdir("ide", NULL); proc_ide = proc_mkdir("ide0", proc_ide_root); } static int fake_ide_media_proc_show(struct seq_file *m, void *v) { seq_puts(m, "disk\n"); return 0; } static int fake_ide_media_proc_open(struct inode *inode, struct file *file) { return single_open(file, fake_ide_media_proc_show, NULL); } static const struct file_operations fake_ide_media_proc_fops = { .owner = THIS_MODULE, .open = fake_ide_media_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void make_ide_entries(const char *dev_name) { struct proc_dir_entry *dir, *ent; char name[64]; if(proc_ide_root == NULL) make_proc_ide(); dir = proc_mkdir(dev_name, proc_ide); if(!dir) return; ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops); if(!ent) return; snprintf(name, sizeof(name), "ide0/%s", dev_name); proc_symlink(dev_name, proc_ide_root, name); } static int fake_ide_setup(char *str) { fake_ide = 1; return 1; } __setup("fake_ide", fake_ide_setup); __uml_help(fake_ide_setup, "fake_ide\n" " Create ide0 entries that map onto ubd devices.\n\n" ); static int parse_unit(char **ptr) { char *str = *ptr, *end; int n = -1; if(isdigit(*str)) { n = simple_strtoul(str, &end, 0); if(end == str) return -1; *ptr = end; } else if (('a' <= *str) && (*str <= 'z')) { n = *str - 'a'; str++; *ptr = str; } return n; } /* If *index_out == -1 at exit, the passed option was a general one; * otherwise, the str pointer is used (and owned) inside ubd_devs array, so it * should not be freed on exit. */ static int ubd_setup_common(char *str, int *index_out, char **error_out) { struct ubd *ubd_dev; struct openflags flags = global_openflags; char *backing_file; int n, err = 0, i; if(index_out) *index_out = -1; n = *str; if(n == '='){ char *end; int major; str++; if(!strcmp(str, "sync")){ global_openflags = of_sync(global_openflags); goto out1; } err = -EINVAL; major = simple_strtoul(str, &end, 0); if((*end != '\0') || (end == str)){ *error_out = "Didn't parse major number"; goto out1; } mutex_lock(&ubd_lock); if (fake_major != UBD_MAJOR) { *error_out = "Can't assign a fake major twice"; goto out1; } fake_major = major; printk(KERN_INFO "Setting extra ubd major number to %d\n", major); err = 0; out1: mutex_unlock(&ubd_lock); return err; } n = parse_unit(&str); if(n < 0){ *error_out = "Couldn't parse device number"; return -EINVAL; } if(n >= MAX_DEV){ *error_out = "Device number out of range"; return 1; } err = -EBUSY; mutex_lock(&ubd_lock); ubd_dev = &ubd_devs[n]; if(ubd_dev->file != NULL){ *error_out = "Device is already configured"; goto out; } if (index_out) *index_out = n; err = -EINVAL; for (i = 0; i < sizeof("rscd="); i++) { switch (*str) { case 'r': flags.w = 0; break; case 's': flags.s = 1; break; case 'd': ubd_dev->no_cow = 1; break; case 'c': ubd_dev->shared = 1; break; case '=': str++; goto break_loop; default: *error_out = "Expected '=' or flag letter " "(r, s, c, or d)"; goto out; } str++; } if (*str == '=') *error_out = "Too many flags specified"; else *error_out = "Missing '='"; goto out; break_loop: backing_file = strchr(str, ','); if (backing_file == NULL) backing_file = strchr(str, ':'); if(backing_file != NULL){ if(ubd_dev->no_cow){ *error_out = "Can't specify both 'd' and a cow file"; goto out; } else { *backing_file = '\0'; backing_file++; } } err = 0; ubd_dev->file = str; ubd_dev->cow.file = backing_file; ubd_dev->boot_openflags = flags; out: mutex_unlock(&ubd_lock); return err; } static int ubd_setup(char *str) { char *error; int err; err = ubd_setup_common(str, NULL, &error); if(err) printk(KERN_ERR "Failed to initialize device with \"%s\" : " "%s\n", str, error); return 1; } __setup("ubd", ubd_setup); __uml_help(ubd_setup, "ubd<n><flags>=<filename>[(:|,)<filename2>]\n" " This is used to associate a device with a file in the underlying\n" " filesystem. When specifying two filenames, the first one is the\n" " COW name and the second is the backing file name. As separator you can\n" " use either a ':' or a ',': the first one allows writing things like;\n" " ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n" " while with a ',' the shell would not expand the 2nd '~'.\n" " When using only one filename, UML will detect whether to treat it like\n" " a COW file or a backing file. To override this detection, add the 'd'\n" " flag:\n" " ubd0d=BackingFile\n" " Usually, there is a filesystem in the file, but \n" " that's not required. Swap devices containing swap files can be\n" " specified like this. Also, a file which doesn't contain a\n" " filesystem can have its contents read in the virtual \n" " machine by running 'dd' on the device. <n> must be in the range\n" " 0 to 7. Appending an 'r' to the number will cause that device\n" " to be mounted read-only. For example ubd1r=./ext_fs. Appending\n" " an 's' will cause data to be written to disk on the host immediately.\n" " 'c' will cause the device to be treated as being shared between multiple\n" " UMLs and file locking will be turned off - this is appropriate for a\n" " cluster filesystem and inappropriate at almost all other times.\n\n" ); static int udb_setup(char *str) { printk("udb%s specified on command line is almost certainly a ubd -> " "udb TYPO\n", str); return 1; } __setup("udb", udb_setup); __uml_help(udb_setup, "udb\n" " This option is here solely to catch ubd -> udb typos, which can be\n" " to impossible to catch visually unless you specifically look for\n" " them. The only result of any option starting with 'udb' is an error\n" " in the boot output.\n\n" ); static void do_ubd_request(struct request_queue * q); /* Only changed by ubd_init, which is an initcall. */ static int thread_fd = -1; static LIST_HEAD(restart); /* XXX - move this inside ubd_intr. */ /* Called without dev->lock held, and only in interrupt context. */ static void ubd_handler(void) { struct io_thread_req *req; struct ubd *ubd; struct list_head *list, *next_ele; unsigned long flags; int n; while(1){ n = os_read_file(thread_fd, &req, sizeof(struct io_thread_req *)); if(n != sizeof(req)){ if(n == -EAGAIN) break; printk(KERN_ERR "spurious interrupt in ubd_handler, " "err = %d\n", -n); return; } blk_end_request(req->req, 0, req->length); kfree(req); } reactivate_fd(thread_fd, UBD_IRQ); list_for_each_safe(list, next_ele, &restart){ ubd = container_of(list, struct ubd, restart); list_del_init(&ubd->restart); spin_lock_irqsave(&ubd->lock, flags); do_ubd_request(ubd->queue); spin_unlock_irqrestore(&ubd->lock, flags); } } static irqreturn_t ubd_intr(int irq, void *dev) { ubd_handler(); return IRQ_HANDLED; } /* Only changed by ubd_init, which is an initcall. */ static int io_pid = -1; static void kill_io_thread(void) { if(io_pid != -1) os_kill_process(io_pid, 1); } __uml_exitcall(kill_io_thread); static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) { char *file; int fd; int err; __u32 version; __u32 align; char *backing_file; time_t mtime; unsigned long long size; int sector_size; int bitmap_offset; if (ubd_dev->file && ubd_dev->cow.file) { file = ubd_dev->cow.file; goto out; } fd = os_open_file(ubd_dev->file, global_openflags, 0); if (fd < 0) return fd; err = read_cow_header(file_reader, &fd, &version, &backing_file, \ &mtime, &size, &sector_size, &align, &bitmap_offset); os_close_file(fd); if(err == -EINVAL) file = ubd_dev->file; else file = backing_file; out: return os_file_size(file, size_out); } static int read_cow_bitmap(int fd, void *buf, int offset, int len) { int err; err = os_seek_file(fd, offset); if (err < 0) return err; err = os_read_file(fd, buf, len); if (err < 0) return err; return 0; } static int backing_file_mismatch(char *file, __u64 size, time_t mtime) { unsigned long modtime; unsigned long long actual; int err; err = os_file_modtime(file, &modtime); if (err < 0) { printk(KERN_ERR "Failed to get modification time of backing " "file \"%s\", err = %d\n", file, -err); return err; } err = os_file_size(file, &actual); if (err < 0) { printk(KERN_ERR "Failed to get size of backing file \"%s\", " "err = %d\n", file, -err); return err; } if (actual != size) { /*__u64 can be a long on AMD64 and with %lu GCC complains; so * the typecast.*/ printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header " "vs backing file\n", (unsigned long long) size, actual); return -EINVAL; } if (modtime != mtime) { printk(KERN_ERR "mtime mismatch (%ld vs %ld) of COW header vs " "backing file\n", mtime, modtime); return -EINVAL; } return 0; } static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow) { struct uml_stat buf1, buf2; int err; if (from_cmdline == NULL) return 0; if (!strcmp(from_cmdline, from_cow)) return 0; err = os_stat_file(from_cmdline, &buf1); if (err < 0) { printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline, -err); return 0; } err = os_stat_file(from_cow, &buf2); if (err < 0) { printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow, -err); return 1; } if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino)) return 0; printk(KERN_ERR "Backing file mismatch - \"%s\" requested, " "\"%s\" specified in COW header of \"%s\"\n", from_cmdline, from_cow, cow); return 1; } static int open_ubd_file(char *file, struct openflags *openflags, int shared, char **backing_file_out, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out, int *create_cow_out) { time_t mtime; unsigned long long size; __u32 version, align; char *backing_file; int fd, err, sectorsize, asked_switch, mode = 0644; fd = os_open_file(file, *openflags, mode); if (fd < 0) { if ((fd == -ENOENT) && (create_cow_out != NULL)) *create_cow_out = 1; if (!openflags->w || ((fd != -EROFS) && (fd != -EACCES))) return fd; openflags->w = 0; fd = os_open_file(file, *openflags, mode); if (fd < 0) return fd; } if (shared) printk(KERN_INFO "Not locking \"%s\" on the host\n", file); else { err = os_lock_file(fd, openflags->w); if (err < 0) { printk(KERN_ERR "Failed to lock '%s', err = %d\n", file, -err); goto out_close; } } /* Successful return case! */ if (backing_file_out == NULL) return fd; err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime, &size, &sectorsize, &align, bitmap_offset_out); if (err && (*backing_file_out != NULL)) { printk(KERN_ERR "Failed to read COW header from COW file " "\"%s\", errno = %d\n", file, -err); goto out_close; } if (err) return fd; asked_switch = path_requires_switch(*backing_file_out, backing_file, file); /* Allow switching only if no mismatch. */ if (asked_switch && !backing_file_mismatch(*backing_file_out, size, mtime)) { printk(KERN_ERR "Switching backing file to '%s'\n", *backing_file_out); err = write_cow_header(file, fd, *backing_file_out, sectorsize, align, &size); if (err) { printk(KERN_ERR "Switch failed, errno = %d\n", -err); goto out_close; } } else { *backing_file_out = backing_file; err = backing_file_mismatch(*backing_file_out, size, mtime); if (err) goto out_close; } cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, bitmap_len_out, data_offset_out); return fd; out_close: os_close_file(fd); return err; } static int create_cow_file(char *cow_file, char *backing_file, struct openflags flags, int sectorsize, int alignment, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out) { int err, fd; flags.c = 1; fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL); if (fd < 0) { err = fd; printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n", cow_file, -err); goto out; } err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment, bitmap_offset_out, bitmap_len_out, data_offset_out); if (!err) return fd; os_close_file(fd); out: return err; } static void ubd_close_dev(struct ubd *ubd_dev) { os_close_file(ubd_dev->fd); if(ubd_dev->cow.file == NULL) return; os_close_file(ubd_dev->cow.fd); vfree(ubd_dev->cow.bitmap); ubd_dev->cow.bitmap = NULL; } static int ubd_open_dev(struct ubd *ubd_dev) { struct openflags flags; char **back_ptr; int err, create_cow, *create_ptr; int fd; ubd_dev->openflags = ubd_dev->boot_openflags; create_cow = 0; create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL; back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file; fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared, back_ptr, &ubd_dev->cow.bitmap_offset, &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset, create_ptr); if((fd == -ENOENT) && create_cow){ fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, ubd_dev->openflags, 1 << 9, PAGE_SIZE, &ubd_dev->cow.bitmap_offset, &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset); if(fd >= 0){ printk(KERN_INFO "Creating \"%s\" as COW file for " "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file); } } if(fd < 0){ printk("Failed to open '%s', errno = %d\n", ubd_dev->file, -fd); return fd; } ubd_dev->fd = fd; if(ubd_dev->cow.file != NULL){ blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); err = -ENOMEM; ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); if(ubd_dev->cow.bitmap == NULL){ printk(KERN_ERR "Failed to vmalloc COW bitmap\n"); goto error; } flush_tlb_kernel_vm(); err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap, ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len); if(err < 0) goto error; flags = ubd_dev->openflags; flags.w = 0; err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL, NULL, NULL, NULL, NULL); if(err < 0) goto error; ubd_dev->cow.fd = err; } return 0; error: os_close_file(ubd_dev->fd); return err; } static void ubd_device_release(struct device *dev) { struct ubd *ubd_dev = dev_get_drvdata(dev); blk_cleanup_queue(ubd_dev->queue); *ubd_dev = ((struct ubd) DEFAULT_UBD); } static int ubd_disk_register(int major, u64 size, int unit, struct gendisk **disk_out) { struct gendisk *disk; disk = alloc_disk(1 << UBD_SHIFT); if(disk == NULL) return -ENOMEM; disk->major = major; disk->first_minor = unit << UBD_SHIFT; disk->fops = &ubd_blops; set_capacity(disk, size / 512); if (major == UBD_MAJOR) sprintf(disk->disk_name, "ubd%c", 'a' + unit); else sprintf(disk->disk_name, "ubd_fake%d", unit); /* sysfs register (not for ide fake devices) */ if (major == UBD_MAJOR) { ubd_devs[unit].pdev.id = unit; ubd_devs[unit].pdev.name = DRIVER_NAME; ubd_devs[unit].pdev.dev.release = ubd_device_release; dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]); platform_device_register(&ubd_devs[unit].pdev); disk->driverfs_dev = &ubd_devs[unit].pdev.dev; } disk->private_data = &ubd_devs[unit]; disk->queue = ubd_devs[unit].queue; add_disk(disk); *disk_out = disk; return 0; } #define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9)) static int ubd_add(int n, char **error_out) { struct ubd *ubd_dev = &ubd_devs[n]; int err = 0; if(ubd_dev->file == NULL) goto out; err = ubd_file_size(ubd_dev, &ubd_dev->size); if(err < 0){ *error_out = "Couldn't determine size of device's file"; goto out; } ubd_dev->size = ROUND_BLOCK(ubd_dev->size); INIT_LIST_HEAD(&ubd_dev->restart); sg_init_table(ubd_dev->sg, MAX_SG); err = -ENOMEM; ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); if (ubd_dev->queue == NULL) { *error_out = "Failed to initialize device queue"; goto out; } ubd_dev->queue->queuedata = ubd_dev; blk_queue_max_segments(ubd_dev->queue, MAX_SG); err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); if(err){ *error_out = "Failed to register device"; goto out_cleanup; } if (fake_major != UBD_MAJOR) ubd_disk_register(fake_major, ubd_dev->size, n, &fake_gendisk[n]); /* * Perhaps this should also be under the "if (fake_major)" above * using the fake_disk->disk_name */ if (fake_ide) make_ide_entries(ubd_gendisk[n]->disk_name); err = 0; out: return err; out_cleanup: blk_cleanup_queue(ubd_dev->queue); goto out; } static int ubd_config(char *str, char **error_out) { int n, ret; /* This string is possibly broken up and stored, so it's only * freed if ubd_setup_common fails, or if only general options * were set. */ str = kstrdup(str, GFP_KERNEL); if (str == NULL) { *error_out = "Failed to allocate memory"; return -ENOMEM; } ret = ubd_setup_common(str, &n, error_out); if (ret) goto err_free; if (n == -1) { ret = 0; goto err_free; } mutex_lock(&ubd_lock); ret = ubd_add(n, error_out); if (ret) ubd_devs[n].file = NULL; mutex_unlock(&ubd_lock); out: return ret; err_free: kfree(str); goto out; } static int ubd_get_config(char *name, char *str, int size, char **error_out) { struct ubd *ubd_dev; int n, len = 0; n = parse_unit(&name); if((n >= MAX_DEV) || (n < 0)){ *error_out = "ubd_get_config : device number out of range"; return -1; } ubd_dev = &ubd_devs[n]; mutex_lock(&ubd_lock); if(ubd_dev->file == NULL){ CONFIG_CHUNK(str, size, len, "", 1); goto out; } CONFIG_CHUNK(str, size, len, ubd_dev->file, 0); if(ubd_dev->cow.file != NULL){ CONFIG_CHUNK(str, size, len, ",", 0); CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1); } else CONFIG_CHUNK(str, size, len, "", 1); out: mutex_unlock(&ubd_lock); return len; } static int ubd_id(char **str, int *start_out, int *end_out) { int n; n = parse_unit(str); *start_out = 0; *end_out = MAX_DEV - 1; return n; } static int ubd_remove(int n, char **error_out) { struct gendisk *disk = ubd_gendisk[n]; struct ubd *ubd_dev; int err = -ENODEV; mutex_lock(&ubd_lock); ubd_dev = &ubd_devs[n]; if(ubd_dev->file == NULL) goto out; /* you cannot remove a open disk */ err = -EBUSY; if(ubd_dev->count > 0) goto out; ubd_gendisk[n] = NULL; if(disk != NULL){ del_gendisk(disk); put_disk(disk); } if(fake_gendisk[n] != NULL){ del_gendisk(fake_gendisk[n]); put_disk(fake_gendisk[n]); fake_gendisk[n] = NULL; } err = 0; platform_device_unregister(&ubd_dev->pdev); out: mutex_unlock(&ubd_lock); return err; } /* All these are called by mconsole in process context and without * ubd-specific locks. The structure itself is const except for .list. */ static struct mc_device ubd_mc = { .list = LIST_HEAD_INIT(ubd_mc.list), .name = "ubd", .config = ubd_config, .get_config = ubd_get_config, .id = ubd_id, .remove = ubd_remove, }; static int __init ubd_mc_init(void) { mconsole_register_dev(&ubd_mc); return 0; } __initcall(ubd_mc_init); static int __init ubd0_init(void) { struct ubd *ubd_dev = &ubd_devs[0]; mutex_lock(&ubd_lock); if(ubd_dev->file == NULL) ubd_dev->file = "root_fs"; mutex_unlock(&ubd_lock); return 0; } __initcall(ubd0_init); /* Used in ubd_init, which is an initcall */ static struct platform_driver ubd_driver = { .driver = { .name = DRIVER_NAME, }, }; static int __init ubd_init(void) { char *error; int i, err; if (register_blkdev(UBD_MAJOR, "ubd")) return -1; if (fake_major != UBD_MAJOR) { char name[sizeof("ubd_nnn\0")]; snprintf(name, sizeof(name), "ubd_%d", fake_major); if (register_blkdev(fake_major, "ubd")) return -1; } platform_driver_register(&ubd_driver); mutex_lock(&ubd_lock); for (i = 0; i < MAX_DEV; i++){ err = ubd_add(i, &error); if(err) printk(KERN_ERR "Failed to initialize ubd device %d :" "%s\n", i, error); } mutex_unlock(&ubd_lock); return 0; } late_initcall(ubd_init); static int __init ubd_driver_init(void){ unsigned long stack; int err; /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ if(global_openflags.s){ printk(KERN_INFO "ubd: Synchronous mode\n"); /* Letting ubd=sync be like using ubd#s= instead of ubd#= is * enough. So use anyway the io thread. */ } stack = alloc_stack(0, 0); io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), &thread_fd); if(io_pid < 0){ printk(KERN_ERR "ubd : Failed to start I/O thread (errno = %d) - " "falling back to synchronous I/O\n", -io_pid); io_pid = -1; return 0; } err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, 0, "ubd", ubd_devs); if(err != 0) printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); return 0; } device_initcall(ubd_driver_init); static int ubd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct ubd *ubd_dev = disk->private_data; int err = 0; mutex_lock(&ubd_mutex); if(ubd_dev->count == 0){ err = ubd_open_dev(ubd_dev); if(err){ printk(KERN_ERR "%s: Can't open \"%s\": errno = %d\n", disk->disk_name, ubd_dev->file, -err); goto out; } } ubd_dev->count++; set_disk_ro(disk, !ubd_dev->openflags.w); /* This should no more be needed. And it didn't work anyway to exclude * read-write remounting of filesystems.*/ /*if((mode & FMODE_WRITE) && !ubd_dev->openflags.w){ if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev); err = -EROFS; }*/ out: mutex_unlock(&ubd_mutex); return err; } static int ubd_release(struct gendisk *disk, fmode_t mode) { struct ubd *ubd_dev = disk->private_data; mutex_lock(&ubd_mutex); if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev); mutex_unlock(&ubd_mutex); return 0; } static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, __u64 *cow_offset, unsigned long *bitmap, __u64 bitmap_offset, unsigned long *bitmap_words, __u64 bitmap_len) { __u64 sector = io_offset >> 9; int i, update_bitmap = 0; for(i = 0; i < length >> 9; i++){ if(cow_mask != NULL) ubd_set_bit(i, (unsigned char *) cow_mask); if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) continue; update_bitmap = 1; ubd_set_bit(sector + i, (unsigned char *) bitmap); } if(!update_bitmap) return; *cow_offset = sector / (sizeof(unsigned long) * 8); /* This takes care of the case where we're exactly at the end of the * device, and *cow_offset + 1 is off the end. So, just back it up * by one word. Thanks to Lynn Kerby for the fix and James McMechan * for the original diagnosis. */ if (*cow_offset == (DIV_ROUND_UP(bitmap_len, sizeof(unsigned long)) - 1)) (*cow_offset)--; bitmap_words[0] = bitmap[*cow_offset]; bitmap_words[1] = bitmap[*cow_offset + 1]; *cow_offset *= sizeof(unsigned long); *cow_offset += bitmap_offset; } static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, __u64 bitmap_offset, __u64 bitmap_len) { __u64 sector = req->offset >> 9; int i; if(req->length > (sizeof(req->sector_mask) * 8) << 9) panic("Operation too long"); if(req->op == UBD_READ) { for(i = 0; i < req->length >> 9; i++){ if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) ubd_set_bit(i, (unsigned char *) &req->sector_mask); } } else cowify_bitmap(req->offset, req->length, &req->sector_mask, &req->cow_offset, bitmap, bitmap_offset, req->bitmap_words, bitmap_len); } /* Called with dev->lock held */ static void prepare_request(struct request *req, struct io_thread_req *io_req, unsigned long long offset, int page_offset, int len, struct page *page) { struct gendisk *disk = req->rq_disk; struct ubd *ubd_dev = disk->private_data; io_req->req = req; io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : ubd_dev->fd; io_req->fds[1] = ubd_dev->fd; io_req->cow_offset = -1; io_req->offset = offset; io_req->length = len; io_req->error = 0; io_req->sector_mask = 0; io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; io_req->offsets[0] = 0; io_req->offsets[1] = ubd_dev->cow.data_offset; io_req->buffer = page_address(page) + page_offset; io_req->sectorsize = 1 << 9; if(ubd_dev->cow.file != NULL) cowify_req(io_req, ubd_dev->cow.bitmap, ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len); } /* Called with dev->lock held */ static void do_ubd_request(struct request_queue *q) { struct io_thread_req *io_req; struct request *req; int n; while(1){ struct ubd *dev = q->queuedata; if(dev->end_sg == 0){ struct request *req = blk_fetch_request(q); if(req == NULL) return; dev->request = req; dev->rq_pos = blk_rq_pos(req); dev->start_sg = 0; dev->end_sg = blk_rq_map_sg(q, req, dev->sg); } req = dev->request; while(dev->start_sg < dev->end_sg){ struct scatterlist *sg = &dev->sg[dev->start_sg]; io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); if(io_req == NULL){ if(list_empty(&dev->restart)) list_add(&dev->restart, &restart); return; } prepare_request(req, io_req, (unsigned long long)dev->rq_pos << 9, sg->offset, sg->length, sg_page(sg)); n = os_write_file(thread_fd, &io_req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)){ if(n != -EAGAIN) printk("write to io thread failed, " "errno = %d\n", -n); else if(list_empty(&dev->restart)) list_add(&dev->restart, &restart); kfree(io_req); return; } dev->rq_pos += sg->length >> 9; dev->start_sg++; } dev->end_sg = 0; dev->request = NULL; } } static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ubd *ubd_dev = bdev->bd_disk->private_data; geo->heads = 128; geo->sectors = 32; geo->cylinders = ubd_dev->size / (128 * 32 * 512); return 0; } static int ubd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct ubd *ubd_dev = bdev->bd_disk->private_data; u16 ubd_id[ATA_ID_WORDS]; switch (cmd) { struct cdrom_volctrl volume; case HDIO_GET_IDENTITY: memset(&ubd_id, 0, ATA_ID_WORDS * 2); ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512); ubd_id[ATA_ID_HEADS] = 128; ubd_id[ATA_ID_SECTORS] = 32; if(copy_to_user((char __user *) arg, (char *) &ubd_id, sizeof(ubd_id))) return -EFAULT; return 0; case CDROMVOLREAD: if(copy_from_user(&volume, (char __user *) arg, sizeof(volume))) return -EFAULT; volume.channel0 = 255; volume.channel1 = 255; volume.channel2 = 255; volume.channel3 = 255; if(copy_to_user((char __user *) arg, &volume, sizeof(volume))) return -EFAULT; return 0; } return -EINVAL; } static int update_bitmap(struct io_thread_req *req) { int n; if(req->cow_offset == -1) return 0; n = os_seek_file(req->fds[1], req->cow_offset); if(n < 0){ printk("do_io - bitmap lseek failed : err = %d\n", -n); return 1; } n = os_write_file(req->fds[1], &req->bitmap_words, sizeof(req->bitmap_words)); if(n != sizeof(req->bitmap_words)){ printk("do_io - bitmap update failed, err = %d fd = %d\n", -n, req->fds[1]); return 1; } return 0; } static void do_io(struct io_thread_req *req) { char *buf; unsigned long len; int n, nsectors, start, end, bit; int err; __u64 off; nsectors = req->length / req->sectorsize; start = 0; do { bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); end = start; while((end < nsectors) && (ubd_test_bit(end, (unsigned char *) &req->sector_mask) == bit)) end++; off = req->offset + req->offsets[bit] + start * req->sectorsize; len = (end - start) * req->sectorsize; buf = &req->buffer[start * req->sectorsize]; err = os_seek_file(req->fds[bit], off); if(err < 0){ printk("do_io - lseek failed : err = %d\n", -err); req->error = 1; return; } if(req->op == UBD_READ){ n = 0; do { buf = &buf[n]; len -= n; n = os_read_file(req->fds[bit], buf, len); if (n < 0) { printk("do_io - read failed, err = %d " "fd = %d\n", -n, req->fds[bit]); req->error = 1; return; } } while((n < len) && (n != 0)); if (n < len) memset(&buf[n], 0, len - n); } else { n = os_write_file(req->fds[bit], buf, len); if(n != len){ printk("do_io - write failed err = %d " "fd = %d\n", -n, req->fds[bit]); req->error = 1; return; } } start = end; } while(start < nsectors); req->error = update_bitmap(req); } /* Changed in start_io_thread, which is serialized by being called only * from ubd_init, which is an initcall. */ int kernel_fd = -1; /* Only changed by the io thread. XXX: currently unused. */ static int io_count = 0; int io_thread(void *arg) { struct io_thread_req *req; int n; ignore_sigwinch_sig(); while(1){ n = os_read_file(kernel_fd, &req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)){ if(n < 0) printk("io_thread - read failed, fd = %d, " "err = %d\n", kernel_fd, -n); else { printk("io_thread - short read, fd = %d, " "length = %d\n", kernel_fd, n); } continue; } io_count++; do_io(req); n = os_write_file(kernel_fd, &req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)) printk("io_thread - write failed, fd = %d, err = %d\n", kernel_fd, -n); } return 0; }
gpl-2.0
barome/AK-onePone
arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
9490
44015
/* * SH7720 Pinmux * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7720.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA, PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN, PTG6_IN, PTG5_IN, PTG4_IN, PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN, PTH6_IN, PTH5_IN, PTH4_IN, PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN, PTJ6_IN, PTJ5_IN, PTJ4_IN, PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN, PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN, PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTP4_IN, PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN, PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN, PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN, PTS4_IN, PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU, PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU, PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU, PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU, PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU, PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU, PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU, PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU, PTF0_IN_PU, PTG6_IN_PU, PTG5_IN_PU, PTG4_IN_PU, PTG3_IN_PU, PTG2_IN_PU, PTG1_IN_PU, PTG0_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU, PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU, PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU, PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU, PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU, PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU, PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU, PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU, PTP4_IN_PU, PTP3_IN_PU, PTP2_IN_PU, PTP1_IN_PU, PTP0_IN_PU, PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU, PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU, PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU, PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU, PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU, PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT, PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT, PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE4_OUT, PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT, PTF0_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTJ6_OUT, PTJ5_OUT, PTJ4_OUT, PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT, PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT, PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTP4_OUT, PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT, PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT, PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT, PTS4_OUT, PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN, PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN, PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN, PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN, PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN, PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN, PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN, PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN, PTE6_FN, PTE5_FN, PTE4_FN, PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN, PTF6_FN, PTF5_FN, PTF4_FN, PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN, PTG6_FN, PTG5_FN, PTG4_FN, PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN, PTH6_FN, PTH5_FN, PTH4_FN, PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN, PTJ6_FN, PTJ5_FN, PTJ4_FN, PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN, PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN, PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, PTL3_FN, PTM7_FN, PTM6_FN, PTM5_FN, PTM4_FN, PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN, PTP4_FN, PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN, PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN, PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN, PTS4_FN, PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN, PTT4_FN, PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN, PTU4_FN, PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN, PTV4_FN, PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN, PSELA_1_0_00, PSELA_1_0_01, PSELA_1_0_10, PSELA_3_2_00, PSELA_3_2_01, PSELA_3_2_10, PSELA_3_2_11, PSELA_5_4_00, PSELA_5_4_01, PSELA_5_4_10, PSELA_5_4_11, PSELA_7_6_00, PSELA_7_6_01, PSELA_7_6_10, PSELA_9_8_00, PSELA_9_8_01, PSELA_9_8_10, PSELA_11_10_00, PSELA_11_10_01, PSELA_11_10_10, PSELA_13_12_00, PSELA_13_12_10, PSELA_15_14_00, PSELA_15_14_10, PSELB_9_8_00, PSELB_9_8_11, PSELB_11_10_00, PSELB_11_10_01, PSELB_11_10_10, PSELB_11_10_11, PSELB_13_12_00, PSELB_13_12_01, PSELB_13_12_10, PSELB_13_12_11, PSELB_15_14_00, PSELB_15_14_11, PSELC_9_8_00, PSELC_9_8_10, PSELC_11_10_00, PSELC_11_10_10, PSELC_13_12_00, PSELC_13_12_01, PSELC_13_12_10, PSELC_15_14_00, PSELC_15_14_01, PSELC_15_14_10, PSELD_1_0_00, PSELD_1_0_10, PSELD_11_10_00, PSELD_11_10_01, PSELD_15_14_00, PSELD_15_14_01, PSELD_15_14_10, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, IOIS16_MARK, RAS_MARK, CAS_MARK, CKE_MARK, CS5B_CE1A_MARK, CS6B_CE1B_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, A21_MARK, A20_MARK, A19_MARK, A0_MARK, REFOUT_MARK, IRQOUT_MARK, LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, LCD_M_DISP_MARK, LCD_CL1_MARK, LCD_CL2_MARK, LCD_DON_MARK, LCD_FLM_MARK, LCD_VEPWC_MARK, LCD_VCPWC_MARK, AFE_RXIN_MARK, AFE_RDET_MARK, AFE_FS_MARK, AFE_TXOUT_MARK, AFE_SCLK_MARK, AFE_RLYCNT_MARK, AFE_HC1_MARK, IIC_SCL_MARK, IIC_SDA_MARK, DA1_MARK, DA0_MARK, AN3_MARK, AN2_MARK, AN1_MARK, AN0_MARK, ADTRG_MARK, USB1D_RCV_MARK, USB1D_TXSE0_MARK, USB1D_TXDPLS_MARK, USB1D_DMNS_MARK, USB1D_DPLS_MARK, USB1D_SPEED_MARK, USB1D_TXENL_MARK, USB2_PWR_EN_MARK, USB1_PWR_EN_USBF_UPLUP_MARK, USB1D_SUSPEND_MARK, IRQ5_MARK, IRQ4_MARK, IRQ3_IRL3_MARK, IRQ2_IRL2_MARK, IRQ1_IRL1_MARK, IRQ0_IRL0_MARK, PCC_REG_MARK, PCC_DRV_MARK, PCC_BVD2_MARK, PCC_BVD1_MARK, PCC_CD2_MARK, PCC_CD1_MARK, PCC_RESET_MARK, PCC_RDY_MARK, PCC_VS2_MARK, PCC_VS1_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, AUDCK_MARK, AUDSYNC_MARK, ASEBRKAK_MARK, TRST_MARK, TMS_MARK, TDO_MARK, TDI_MARK, TCK_MARK, DACK1_MARK, DREQ1_MARK, DACK0_MARK, DREQ0_MARK, TEND1_MARK, TEND0_MARK, SIOF0_SYNC_MARK, SIOF0_MCLK_MARK, SIOF0_TXD_MARK, SIOF0_RXD_MARK, SIOF0_SCK_MARK, SIOF1_SYNC_MARK, SIOF1_MCLK_MARK, SIOF1_TXD_MARK, SIOF1_RXD_MARK, SIOF1_SCK_MARK, SCIF0_TXD_MARK, SCIF0_RXD_MARK, SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK, SCIF1_TXD_MARK, SCIF1_RXD_MARK, SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK, TPU_TO1_MARK, TPU_TO0_MARK, TPU_TI3B_MARK, TPU_TI3A_MARK, TPU_TI2B_MARK, TPU_TI2A_MARK, TPU_TO3_MARK, TPU_TO2_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, MMC_DAT_MARK, MMC_CMD_MARK, MMC_CLK_MARK, MMC_VDDON_MARK, MMC_ODMOD_MARK, STATUS0_MARK, STATUS1_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PTA GPIO */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU), PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU), PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU), PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU), PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU), PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU), PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU), /* PTB GPIO */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU), /* PTC GPIO */ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU), PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU), PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU), PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU), /* PTD GPIO */ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU), PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU), PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU), PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU), PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU), PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU), PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU), PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU), /* PTE GPIO */ PINMUX_DATA(PTE6_DATA, PTE6_IN), PINMUX_DATA(PTE5_DATA, PTE5_IN), PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU), PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU), PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU), PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU), PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU), /* PTF GPIO */ PINMUX_DATA(PTF6_DATA, PTF6_IN), PINMUX_DATA(PTF5_DATA, PTF5_IN), PINMUX_DATA(PTF4_DATA, PTF4_IN), PINMUX_DATA(PTF3_DATA, PTF3_IN), PINMUX_DATA(PTF2_DATA, PTF2_IN), PINMUX_DATA(PTF1_DATA, PTF1_IN), PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU), /* PTG GPIO */ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT, PTG6_IN_PU), PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT, PTG5_IN_PU), PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT, PTG4_IN_PU), PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT, PTG3_IN_PU), PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT, PTG2_IN_PU), PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT, PTG1_IN_PU), PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT, PTG0_IN_PU), /* PTH GPIO */ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU), PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU), PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU), PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU), PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU), PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU), PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU), /* PTJ GPIO */ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT, PTJ6_IN_PU), PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT, PTJ5_IN_PU), PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT, PTJ4_IN_PU), PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU), PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU), PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU), PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU), /* PTK GPIO */ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU), PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU), PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU), PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU), /* PTL GPIO */ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU), PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU), PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU), PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU), PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU), /* PTM GPIO */ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU), PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU), PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU), PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU), PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU), PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU), PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU), PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU), /* PTP GPIO */ PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT, PTP4_IN_PU), PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT, PTP3_IN_PU), PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT, PTP2_IN_PU), PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT, PTP1_IN_PU), PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT, PTP0_IN_PU), /* PTR GPIO */ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU), PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU), PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU), PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU), PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT, PTR3_IN_PU), PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT, PTR2_IN_PU), PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU), PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU), /* PTS GPIO */ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU), PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU), PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU), PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU), PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU), /* PTT GPIO */ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU), PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU), PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU), PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU), PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU), /* PTU GPIO */ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU), PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU), PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU), PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU), PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU), /* PTV GPIO */ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU), PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU), PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU), PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU), PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU), /* PTA FN */ PINMUX_DATA(D23_MARK, PTA7_FN), PINMUX_DATA(D22_MARK, PTA6_FN), PINMUX_DATA(D21_MARK, PTA5_FN), PINMUX_DATA(D20_MARK, PTA4_FN), PINMUX_DATA(D19_MARK, PTA3_FN), PINMUX_DATA(D18_MARK, PTA2_FN), PINMUX_DATA(D17_MARK, PTA1_FN), PINMUX_DATA(D16_MARK, PTA0_FN), /* PTB FN */ PINMUX_DATA(D31_MARK, PTB7_FN), PINMUX_DATA(D30_MARK, PTB6_FN), PINMUX_DATA(D29_MARK, PTB5_FN), PINMUX_DATA(D28_MARK, PTB4_FN), PINMUX_DATA(D27_MARK, PTB3_FN), PINMUX_DATA(D26_MARK, PTB2_FN), PINMUX_DATA(D25_MARK, PTB1_FN), PINMUX_DATA(D24_MARK, PTB0_FN), /* PTC FN */ PINMUX_DATA(LCD_DATA7_MARK, PTC7_FN), PINMUX_DATA(LCD_DATA6_MARK, PTC6_FN), PINMUX_DATA(LCD_DATA5_MARK, PTC5_FN), PINMUX_DATA(LCD_DATA4_MARK, PTC4_FN), PINMUX_DATA(LCD_DATA3_MARK, PTC3_FN), PINMUX_DATA(LCD_DATA2_MARK, PTC2_FN), PINMUX_DATA(LCD_DATA1_MARK, PTC1_FN), PINMUX_DATA(LCD_DATA0_MARK, PTC0_FN), /* PTD FN */ PINMUX_DATA(LCD_DATA15_MARK, PTD7_FN), PINMUX_DATA(LCD_DATA14_MARK, PTD6_FN), PINMUX_DATA(LCD_DATA13_MARK, PTD5_FN), PINMUX_DATA(LCD_DATA12_MARK, PTD4_FN), PINMUX_DATA(LCD_DATA11_MARK, PTD3_FN), PINMUX_DATA(LCD_DATA10_MARK, PTD2_FN), PINMUX_DATA(LCD_DATA9_MARK, PTD1_FN), PINMUX_DATA(LCD_DATA8_MARK, PTD0_FN), /* PTE FN */ PINMUX_DATA(IIC_SCL_MARK, PSELB_9_8_00, PTE6_FN), PINMUX_DATA(AFE_RXIN_MARK, PSELB_9_8_11, PTE6_FN), PINMUX_DATA(IIC_SDA_MARK, PSELB_9_8_00, PTE5_FN), PINMUX_DATA(AFE_RDET_MARK, PSELB_9_8_11, PTE5_FN), PINMUX_DATA(LCD_M_DISP_MARK, PTE4_FN), PINMUX_DATA(LCD_CL1_MARK, PTE3_FN), PINMUX_DATA(LCD_CL2_MARK, PTE2_FN), PINMUX_DATA(LCD_DON_MARK, PTE1_FN), PINMUX_DATA(LCD_FLM_MARK, PTE0_FN), /* PTF FN */ PINMUX_DATA(DA1_MARK, PTF6_FN), PINMUX_DATA(DA0_MARK, PTF5_FN), PINMUX_DATA(AN3_MARK, PTF4_FN), PINMUX_DATA(AN2_MARK, PTF3_FN), PINMUX_DATA(AN1_MARK, PTF2_FN), PINMUX_DATA(AN0_MARK, PTF1_FN), PINMUX_DATA(ADTRG_MARK, PTF0_FN), /* PTG FN */ PINMUX_DATA(USB1D_RCV_MARK, PSELA_3_2_00, PTG6_FN), PINMUX_DATA(AFE_FS_MARK, PSELA_3_2_01, PTG6_FN), PINMUX_DATA(PCC_REG_MARK, PSELA_3_2_10, PTG6_FN), PINMUX_DATA(IRQ5_MARK, PSELA_3_2_11, PTG6_FN), PINMUX_DATA(USB1D_TXSE0_MARK, PSELA_5_4_00, PTG5_FN), PINMUX_DATA(AFE_TXOUT_MARK, PSELA_5_4_01, PTG5_FN), PINMUX_DATA(PCC_DRV_MARK, PSELA_5_4_10, PTG5_FN), PINMUX_DATA(IRQ4_MARK, PSELA_5_4_11, PTG5_FN), PINMUX_DATA(USB1D_TXDPLS_MARK, PSELA_7_6_00, PTG4_FN), PINMUX_DATA(AFE_SCLK_MARK, PSELA_7_6_01, PTG4_FN), PINMUX_DATA(IOIS16_MARK, PSELA_7_6_10, PTG4_FN), PINMUX_DATA(USB1D_DMNS_MARK, PSELA_9_8_00, PTG3_FN), PINMUX_DATA(AFE_RLYCNT_MARK, PSELA_9_8_01, PTG3_FN), PINMUX_DATA(PCC_BVD2_MARK, PSELA_9_8_10, PTG3_FN), PINMUX_DATA(USB1D_DPLS_MARK, PSELA_11_10_00, PTG2_FN), PINMUX_DATA(AFE_HC1_MARK, PSELA_11_10_01, PTG2_FN), PINMUX_DATA(PCC_BVD1_MARK, PSELA_11_10_10, PTG2_FN), PINMUX_DATA(USB1D_SPEED_MARK, PSELA_13_12_00, PTG1_FN), PINMUX_DATA(PCC_CD2_MARK, PSELA_13_12_10, PTG1_FN), PINMUX_DATA(USB1D_TXENL_MARK, PSELA_15_14_00, PTG0_FN), PINMUX_DATA(PCC_CD1_MARK, PSELA_15_14_10, PTG0_FN), /* PTH FN */ PINMUX_DATA(RAS_MARK, PTH6_FN), PINMUX_DATA(CAS_MARK, PTH5_FN), PINMUX_DATA(CKE_MARK, PTH4_FN), PINMUX_DATA(STATUS1_MARK, PTH3_FN), PINMUX_DATA(STATUS0_MARK, PTH2_FN), PINMUX_DATA(USB2_PWR_EN_MARK, PTH1_FN), PINMUX_DATA(USB1_PWR_EN_USBF_UPLUP_MARK, PTH0_FN), /* PTJ FN */ PINMUX_DATA(AUDCK_MARK, PTJ6_FN), PINMUX_DATA(ASEBRKAK_MARK, PTJ5_FN), PINMUX_DATA(AUDATA3_MARK, PTJ4_FN), PINMUX_DATA(AUDATA2_MARK, PTJ3_FN), PINMUX_DATA(AUDATA1_MARK, PTJ2_FN), PINMUX_DATA(AUDATA0_MARK, PTJ1_FN), PINMUX_DATA(AUDSYNC_MARK, PTJ0_FN), /* PTK FN */ PINMUX_DATA(PCC_RESET_MARK, PTK3_FN), PINMUX_DATA(PCC_RDY_MARK, PTK2_FN), PINMUX_DATA(PCC_VS2_MARK, PTK1_FN), PINMUX_DATA(PCC_VS1_MARK, PTK0_FN), /* PTL FN */ PINMUX_DATA(TRST_MARK, PTL7_FN), PINMUX_DATA(TMS_MARK, PTL6_FN), PINMUX_DATA(TDO_MARK, PTL5_FN), PINMUX_DATA(TDI_MARK, PTL4_FN), PINMUX_DATA(TCK_MARK, PTL3_FN), /* PTM FN */ PINMUX_DATA(DREQ1_MARK, PTM7_FN), PINMUX_DATA(DREQ0_MARK, PTM6_FN), PINMUX_DATA(DACK1_MARK, PTM5_FN), PINMUX_DATA(DACK0_MARK, PTM4_FN), PINMUX_DATA(TEND1_MARK, PTM3_FN), PINMUX_DATA(TEND0_MARK, PTM2_FN), PINMUX_DATA(CS5B_CE1A_MARK, PTM1_FN), PINMUX_DATA(CS6B_CE1B_MARK, PTM0_FN), /* PTP FN */ PINMUX_DATA(USB1D_SUSPEND_MARK, PSELA_1_0_00, PTP4_FN), PINMUX_DATA(REFOUT_MARK, PSELA_1_0_01, PTP4_FN), PINMUX_DATA(IRQOUT_MARK, PSELA_1_0_10, PTP4_FN), PINMUX_DATA(IRQ3_IRL3_MARK, PTP3_FN), PINMUX_DATA(IRQ2_IRL2_MARK, PTP2_FN), PINMUX_DATA(IRQ1_IRL1_MARK, PTP1_FN), PINMUX_DATA(IRQ0_IRL0_MARK, PTP0_FN), /* PTR FN */ PINMUX_DATA(A25_MARK, PTR7_FN), PINMUX_DATA(A24_MARK, PTR6_FN), PINMUX_DATA(A23_MARK, PTR5_FN), PINMUX_DATA(A22_MARK, PTR4_FN), PINMUX_DATA(A21_MARK, PTR3_FN), PINMUX_DATA(A20_MARK, PTR2_FN), PINMUX_DATA(A19_MARK, PTR1_FN), PINMUX_DATA(A0_MARK, PTR0_FN), /* PTS FN */ PINMUX_DATA(SIOF0_SYNC_MARK, PTS4_FN), PINMUX_DATA(SIOF0_MCLK_MARK, PTS3_FN), PINMUX_DATA(SIOF0_TXD_MARK, PTS2_FN), PINMUX_DATA(SIOF0_RXD_MARK, PTS1_FN), PINMUX_DATA(SIOF0_SCK_MARK, PTS0_FN), /* PTT FN */ PINMUX_DATA(SCIF0_CTS_MARK, PSELB_15_14_00, PTT4_FN), PINMUX_DATA(TPU_TO1_MARK, PSELB_15_14_11, PTT4_FN), PINMUX_DATA(SCIF0_RTS_MARK, PSELB_15_14_00, PTT3_FN), PINMUX_DATA(TPU_TO0_MARK, PSELB_15_14_11, PTT3_FN), PINMUX_DATA(SCIF0_TXD_MARK, PTT2_FN), PINMUX_DATA(SCIF0_RXD_MARK, PTT1_FN), PINMUX_DATA(SCIF0_SCK_MARK, PTT0_FN), /* PTU FN */ PINMUX_DATA(SIOF1_SYNC_MARK, PTU4_FN), PINMUX_DATA(SIOF1_MCLK_MARK, PSELD_11_10_00, PTU3_FN), PINMUX_DATA(TPU_TI3B_MARK, PSELD_11_10_01, PTU3_FN), PINMUX_DATA(SIOF1_TXD_MARK, PSELD_15_14_00, PTU2_FN), PINMUX_DATA(TPU_TI3A_MARK, PSELD_15_14_01, PTU2_FN), PINMUX_DATA(MMC_DAT_MARK, PSELD_15_14_10, PTU2_FN), PINMUX_DATA(SIOF1_RXD_MARK, PSELC_13_12_00, PTU1_FN), PINMUX_DATA(TPU_TI2B_MARK, PSELC_13_12_01, PTU1_FN), PINMUX_DATA(MMC_CMD_MARK, PSELC_13_12_10, PTU1_FN), PINMUX_DATA(SIOF1_SCK_MARK, PSELC_15_14_00, PTU0_FN), PINMUX_DATA(TPU_TI2A_MARK, PSELC_15_14_01, PTU0_FN), PINMUX_DATA(MMC_CLK_MARK, PSELC_15_14_10, PTU0_FN), /* PTV FN */ PINMUX_DATA(SCIF1_CTS_MARK, PSELB_11_10_00, PTV4_FN), PINMUX_DATA(TPU_TO3_MARK, PSELB_11_10_01, PTV4_FN), PINMUX_DATA(MMC_VDDON_MARK, PSELB_11_10_10, PTV4_FN), PINMUX_DATA(LCD_VEPWC_MARK, PSELB_11_10_11, PTV4_FN), PINMUX_DATA(SCIF1_RTS_MARK, PSELB_13_12_00, PTV3_FN), PINMUX_DATA(TPU_TO2_MARK, PSELB_13_12_01, PTV3_FN), PINMUX_DATA(MMC_ODMOD_MARK, PSELB_13_12_10, PTV3_FN), PINMUX_DATA(LCD_VCPWC_MARK, PSELB_13_12_11, PTV3_FN), PINMUX_DATA(SCIF1_TXD_MARK, PSELC_9_8_00, PTV2_FN), PINMUX_DATA(SIM_D_MARK, PSELC_9_8_10, PTV2_FN), PINMUX_DATA(SCIF1_RXD_MARK, PSELC_11_10_00, PTV1_FN), PINMUX_DATA(SIM_RST_MARK, PSELC_11_10_10, PTV1_FN), PINMUX_DATA(SCIF1_SCK_MARK, PSELD_1_0_00, PTV0_FN), PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN), }; static struct pinmux_gpio pinmux_gpios[] = { /* PTA */ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), /* PTB */ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), /* PTC */ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), PINMUX_GPIO(GPIO_PTC6, PTC6_DATA), PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), PINMUX_GPIO(GPIO_PTC1, PTC1_DATA), PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), /* PTD */ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), /* PTE */ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), PINMUX_GPIO(GPIO_PTE3, PTE3_DATA), PINMUX_GPIO(GPIO_PTE2, PTE2_DATA), PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), /* PTF */ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), /* PTG */ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA), PINMUX_GPIO(GPIO_PTG5, PTG5_DATA), PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), /* PTH */ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), /* PTJ */ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA), PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA), PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA), PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), /* PTK */ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), /* PTL */ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA), PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), /* PTM */ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA), PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), /* PTP */ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA), PINMUX_GPIO(GPIO_PTP3, PTP3_DATA), PINMUX_GPIO(GPIO_PTP2, PTP2_DATA), PINMUX_GPIO(GPIO_PTP1, PTP1_DATA), PINMUX_GPIO(GPIO_PTP0, PTP0_DATA), /* PTR */ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA), PINMUX_GPIO(GPIO_PTR6, PTR6_DATA), PINMUX_GPIO(GPIO_PTR5, PTR5_DATA), PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), /* PTS */ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), /* PTT */ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), /* PTU */ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), /* PTV */ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), /* BSC */ PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_RAS, RAS_MARK), PINMUX_GPIO(GPIO_FN_CAS, CAS_MARK), PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK), PINMUX_GPIO(GPIO_FN_CS5B_CE1A, CS5B_CE1A_MARK), PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_A21, A21_MARK), PINMUX_GPIO(GPIO_FN_A20, A20_MARK), PINMUX_GPIO(GPIO_FN_A19, A19_MARK), PINMUX_GPIO(GPIO_FN_A0, A0_MARK), PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK), /* LCDC */ PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK), PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK), PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK), PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK), PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK), PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK), /* AFEIF */ PINMUX_GPIO(GPIO_FN_AFE_RXIN, AFE_RXIN_MARK), PINMUX_GPIO(GPIO_FN_AFE_RDET, AFE_RDET_MARK), PINMUX_GPIO(GPIO_FN_AFE_FS, AFE_FS_MARK), PINMUX_GPIO(GPIO_FN_AFE_TXOUT, AFE_TXOUT_MARK), PINMUX_GPIO(GPIO_FN_AFE_SCLK, AFE_SCLK_MARK), PINMUX_GPIO(GPIO_FN_AFE_RLYCNT, AFE_RLYCNT_MARK), PINMUX_GPIO(GPIO_FN_AFE_HC1, AFE_HC1_MARK), /* IIC */ PINMUX_GPIO(GPIO_FN_IIC_SCL, IIC_SCL_MARK), PINMUX_GPIO(GPIO_FN_IIC_SDA, IIC_SDA_MARK), /* DAC */ PINMUX_GPIO(GPIO_FN_DA1, DA1_MARK), PINMUX_GPIO(GPIO_FN_DA0, DA0_MARK), /* ADC */ PINMUX_GPIO(GPIO_FN_AN3, AN3_MARK), PINMUX_GPIO(GPIO_FN_AN2, AN2_MARK), PINMUX_GPIO(GPIO_FN_AN1, AN1_MARK), PINMUX_GPIO(GPIO_FN_AN0, AN0_MARK), PINMUX_GPIO(GPIO_FN_ADTRG, ADTRG_MARK), /* USB */ PINMUX_GPIO(GPIO_FN_USB1D_RCV, USB1D_RCV_MARK), PINMUX_GPIO(GPIO_FN_USB1D_TXSE0, USB1D_TXSE0_MARK), PINMUX_GPIO(GPIO_FN_USB1D_TXDPLS, USB1D_TXDPLS_MARK), PINMUX_GPIO(GPIO_FN_USB1D_DMNS, USB1D_DMNS_MARK), PINMUX_GPIO(GPIO_FN_USB1D_DPLS, USB1D_DPLS_MARK), PINMUX_GPIO(GPIO_FN_USB1D_SPEED, USB1D_SPEED_MARK), PINMUX_GPIO(GPIO_FN_USB1D_TXENL, USB1D_TXENL_MARK), PINMUX_GPIO(GPIO_FN_USB2_PWR_EN, USB2_PWR_EN_MARK), PINMUX_GPIO(GPIO_FN_USB1_PWR_EN_USBF_UPLUP, USB1_PWR_EN_USBF_UPLUP_MARK), PINMUX_GPIO(GPIO_FN_USB1D_SUSPEND, USB1D_SUSPEND_MARK), /* INTC */ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK), PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_IRL3, IRQ3_IRL3_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_IRL2, IRQ2_IRL2_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_IRL1, IRQ1_IRL1_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_IRL0, IRQ0_IRL0_MARK), /* PCC */ PINMUX_GPIO(GPIO_FN_PCC_REG, PCC_REG_MARK), PINMUX_GPIO(GPIO_FN_PCC_DRV, PCC_DRV_MARK), PINMUX_GPIO(GPIO_FN_PCC_BVD2, PCC_BVD2_MARK), PINMUX_GPIO(GPIO_FN_PCC_BVD1, PCC_BVD1_MARK), PINMUX_GPIO(GPIO_FN_PCC_CD2, PCC_CD2_MARK), PINMUX_GPIO(GPIO_FN_PCC_CD1, PCC_CD1_MARK), PINMUX_GPIO(GPIO_FN_PCC_RESET, PCC_RESET_MARK), PINMUX_GPIO(GPIO_FN_PCC_RDY, PCC_RDY_MARK), PINMUX_GPIO(GPIO_FN_PCC_VS2, PCC_VS2_MARK), PINMUX_GPIO(GPIO_FN_PCC_VS1, PCC_VS1_MARK), /* HUDI */ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK), PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK), PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK), PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK), PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK), PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK), PINMUX_GPIO(GPIO_FN_ASEBRKAK, ASEBRKAK_MARK), PINMUX_GPIO(GPIO_FN_TRST, TRST_MARK), PINMUX_GPIO(GPIO_FN_TMS, TMS_MARK), PINMUX_GPIO(GPIO_FN_TDO, TDO_MARK), PINMUX_GPIO(GPIO_FN_TDI, TDI_MARK), PINMUX_GPIO(GPIO_FN_TCK, TCK_MARK), /* DMAC */ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK), PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK), PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK), /* SIOF0 */ PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_MCLK, SIOF0_MCLK_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK), /* SIOF1 */ PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_MCLK, SIOF1_MCLK_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK), /* SCIF0 */ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), /* SCIF1 */ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK), /* TPU */ PINMUX_GPIO(GPIO_FN_TPU_TO1, TPU_TO1_MARK), PINMUX_GPIO(GPIO_FN_TPU_TO0, TPU_TO0_MARK), PINMUX_GPIO(GPIO_FN_TPU_TI3B, TPU_TI3B_MARK), PINMUX_GPIO(GPIO_FN_TPU_TI3A, TPU_TI3A_MARK), PINMUX_GPIO(GPIO_FN_TPU_TI2B, TPU_TI2B_MARK), PINMUX_GPIO(GPIO_FN_TPU_TI2A, TPU_TI2A_MARK), PINMUX_GPIO(GPIO_FN_TPU_TO3, TPU_TO3_MARK), PINMUX_GPIO(GPIO_FN_TPU_TO2, TPU_TO2_MARK), /* SIM */ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK), PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK), PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK), /* MMC */ PINMUX_GPIO(GPIO_FN_MMC_DAT, MMC_DAT_MARK), PINMUX_GPIO(GPIO_FN_MMC_CMD, MMC_CMD_MARK), PINMUX_GPIO(GPIO_FN_MMC_CLK, MMC_CLK_MARK), PINMUX_GPIO(GPIO_FN_MMC_VDDON, MMC_VDDON_MARK), PINMUX_GPIO(GPIO_FN_MMC_ODMOD, MMC_ODMOD_MARK), /* SYSC */ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) { PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN, PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN, PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN, PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN, PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN, PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN, PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN, PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN } }, { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) { PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN, PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN, PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN, PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN, PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN, PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN, PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN, PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN } }, { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) { PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN, PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN, PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN, PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN, PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN, PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN, PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN, PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN } }, { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) { PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN, PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN, PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN, PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN, PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN, PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN, PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN, PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN } }, { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) { 0, 0, 0, 0, PTE6_FN, 0, 0, PTE6_IN, PTE5_FN, 0, 0, PTE5_IN, PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN, PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN, PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN, PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN, PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN } }, { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) { 0, 0, 0, 0, PTF6_FN, 0, 0, PTF6_IN, PTF5_FN, 0, 0, PTF5_IN, PTF4_FN, 0, 0, PTF4_IN, PTF3_FN, 0, 0, PTF3_IN, PTF2_FN, 0, 0, PTF2_IN, PTF1_FN, 0, 0, PTF1_IN, PTF0_FN, 0, 0, PTF0_IN } }, { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) { 0, 0, 0, 0, PTG6_FN, PTG6_OUT, PTG6_IN_PU, PTG6_IN, PTG5_FN, PTG5_OUT, PTG5_IN_PU, PTG5_IN, PTG4_FN, PTG4_OUT, PTG4_IN_PU, PTG4_IN, PTG3_FN, PTG3_OUT, PTG3_IN_PU, PTG3_IN, PTG2_FN, PTG2_OUT, PTG2_IN_PU, PTG2_IN, PTG1_FN, PTG1_OUT, PTG1_IN_PU, PTG1_IN, PTG0_FN, PTG0_OUT, PTG0_IN_PU, PTG0_IN } }, { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) { 0, 0, 0, 0, PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN, PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN, PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN, PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN, PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN, PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN, PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN } }, { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) { 0, 0, 0, 0, PTJ6_FN, PTJ6_OUT, PTJ6_IN_PU, PTJ6_IN, PTJ5_FN, PTJ5_OUT, PTJ5_IN_PU, PTJ5_IN, PTJ4_FN, PTJ4_OUT, PTJ4_IN_PU, PTJ4_IN, PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN, PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN, PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN, PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN } }, { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN, PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN, PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN, PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN } }, { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) { PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN, PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN, PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN, PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN, PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) { PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN, PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN, PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN, PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN, PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN, PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN, PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN, PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN } }, { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PTP4_FN, PTP4_OUT, PTP4_IN_PU, PTP4_IN, PTP3_FN, PTP3_OUT, PTP3_IN_PU, PTP3_IN, PTP2_FN, PTP2_OUT, PTP2_IN_PU, PTP2_IN, PTP1_FN, PTP1_OUT, PTP1_IN_PU, PTP1_IN, PTP0_FN, PTP0_OUT, PTP0_IN_PU, PTP0_IN } }, { PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) { PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN, PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN, PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN, PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN, PTR3_FN, PTR3_OUT, PTR3_IN_PU, PTR3_IN, PTR2_FN, PTR2_OUT, PTR2_IN_PU, PTR2_IN, PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN, PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN } }, { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN, PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN, PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN, PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN, PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN } }, { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN, PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN, PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN, PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN, PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN } }, { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN, PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN, PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN, PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN, PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN } }, { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN, PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN, PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN, PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN, PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xa4050140, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xa4050142, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xa4050144, 8) { PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xa4050148, 8) { 0, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xa405014a, 8) { 0, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xa405014c, 8) { 0, PTG6_DATA, PTG5_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xa405014e, 8) { 0, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xa4050150, 8) { 0, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xa4050152, 8) { 0, 0, 0, 0, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xa4050154, 8) { PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, 0, 0, 0 } }, { PINMUX_DATA_REG("PMDR", 0xa4050156, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PPDR", 0xa4050158, 8) { 0, 0, 0, PTP4_DATA, PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xa405015a, 8) { PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xa405015c, 8) { 0, 0, 0, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xa405015e, 8) { 0, 0, 0, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xa4050160, 8) { 0, 0, 0, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xa4050162, 8) { 0, 0, 0, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { }, }; static struct pinmux_info sh7720_pinmux_info = { .name = "sh7720_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PTA7, .last_gpio = GPIO_FN_STATUS1, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7720_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
scotthartbti/android_kernel_motorola_msm8992
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c
10002
8107
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "phy_qmath.h" /* * Description: This function make 16 bit unsigned multiplication. * To fit the output into 16 bits the 32 bit multiplication result is right * shifted by 16 bits. */ u16 qm_mulu16(u16 op1, u16 op2) { return (u16) (((u32) op1 * (u32) op2) >> 16); } /* * Description: This function make 16 bit multiplication and return the result * in 16 bits. To fit the multiplication result into 16 bits the multiplication * result is right shifted by 15 bits. Right shifting 15 bits instead of 16 bits * is done to remove the extra sign bit formed due to the multiplication. * When both the 16bit inputs are 0x8000 then the output is saturated to * 0x7fffffff. */ s16 qm_muls16(s16 op1, s16 op2) { s32 result; if (op1 == (s16) 0x8000 && op2 == (s16) 0x8000) result = 0x7fffffff; else result = ((s32) (op1) * (s32) (op2)); return (s16) (result >> 15); } /* * Description: This function add two 32 bit numbers and return the 32bit * result. If the result overflow 32 bits, the output will be saturated to * 32bits. */ s32 qm_add32(s32 op1, s32 op2) { s32 result; result = op1 + op2; if (op1 < 0 && op2 < 0 && result > 0) result = 0x80000000; else if (op1 > 0 && op2 > 0 && result < 0) result = 0x7fffffff; return result; } /* * Description: This function add two 16 bit numbers and return the 16bit * result. If the result overflow 16 bits, the output will be saturated to * 16bits. */ s16 qm_add16(s16 op1, s16 op2) { s16 result; s32 temp = (s32) op1 + (s32) op2; if (temp > (s32) 0x7fff) result = (s16) 0x7fff; else if (temp < (s32) 0xffff8000) result = (s16) 0xffff8000; else result = (s16) temp; return result; } /* * Description: This function make 16 bit subtraction and return the 16bit * result. If the result overflow 16 bits, the output will be saturated to * 16bits. */ s16 qm_sub16(s16 op1, s16 op2) { s16 result; s32 temp = (s32) op1 - (s32) op2; if (temp > (s32) 0x7fff) result = (s16) 0x7fff; else if (temp < (s32) 0xffff8000) result = (s16) 0xffff8000; else result = (s16) temp; return result; } /* * Description: This function make a 32 bit saturated left shift when the * specified shift is +ve. This function will make a 32 bit right shift when * the specified shift is -ve. This function return the result after shifting * operation. */ s32 qm_shl32(s32 op, int shift) { int i; s32 result; result = op; if (shift > 31) shift = 31; else if (shift < -31) shift = -31; if (shift >= 0) { for (i = 0; i < shift; i++) result = qm_add32(result, result); } else { result = result >> (-shift); } return result; } /* * Description: This function make a 16 bit saturated left shift when the * specified shift is +ve. This function will make a 16 bit right shift when * the specified shift is -ve. This function return the result after shifting * operation. */ s16 qm_shl16(s16 op, int shift) { int i; s16 result; result = op; if (shift > 15) shift = 15; else if (shift < -15) shift = -15; if (shift > 0) { for (i = 0; i < shift; i++) result = qm_add16(result, result); } else { result = result >> (-shift); } return result; } /* * Description: This function make a 16 bit right shift when shift is +ve. * This function make a 16 bit saturated left shift when shift is -ve. This * function return the result of the shift operation. */ s16 qm_shr16(s16 op, int shift) { return qm_shl16(op, -shift); } /* * Description: This function return the number of redundant sign bits in a * 32 bit number. Example: qm_norm32(0x00000080) = 23 */ s16 qm_norm32(s32 op) { u16 u16extraSignBits; if (op == 0) { return 31; } else { u16extraSignBits = 0; while ((op >> 31) == (op >> 30)) { u16extraSignBits++; op = op << 1; } } return u16extraSignBits; } /* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */ static const s16 log_table[] = { 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, 10549, 11716, 12855, 13968, 15055, 16117, 17156, 18173, 19168, 20143, 21098, 22034, 22952, 23852, 24736, 25604, 26455, 27292, 28114, 28922, 29717, 30498, 31267, 32024 }; #define LOG_TABLE_SIZE 32 /* log_table size */ #define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */ #define Q_LOG_TABLE 15 /* qformat of log_table */ #define LOG10_2 19728 /* log10(2) in q.16 */ /* * Description: * This routine takes the input number N and its q format qN and compute * the log10(N). This routine first normalizes the input no N. Then N is in * mag*(2^x) format. mag is any number in the range 2^30-(2^31 - 1). * Then log2(mag * 2^x) = log2(mag) + x is computed. From that * log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed. * This routine looks the log2 value in the table considering * LOG2_LOG_TABLE_SIZE+1 MSBs. As the MSB is always 1, only next * LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup. Next 16 MSBs are used * for interpolation. * Inputs: * N - number to which log10 has to be found. * qN - q format of N * log10N - address where log10(N) will be written. * qLog10N - address where log10N qformat will be written. * Note/Problem: * For accurate results input should be in normalized or near normalized form. */ void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N) { s16 s16norm, s16tableIndex, s16errorApproximation; u16 u16offset; s32 s32log; /* normalize the N. */ s16norm = qm_norm32(N); N = N << s16norm; /* The qformat of N after normalization. * -30 is added to treat the no as between 1.0 to 2.0 * i.e. after adding the -30 to the qformat the decimal point will be * just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e. * at the right side of 30th bit. */ qN = qN + s16norm - 30; /* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the * MSB */ s16tableIndex = (s16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE))); /* remove the MSB. the MSB is always 1 after normalization. */ s16tableIndex = s16tableIndex & (s16) ((1 << LOG2_LOG_TABLE_SIZE) - 1); /* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */ N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1); /* take the offset as the 16 MSBS after table index. */ u16offset = (u16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16))); /* look the log value in the table. */ s32log = log_table[s16tableIndex]; /* q.15 format */ /* interpolate using the offset. q.15 format. */ s16errorApproximation = (s16) qm_mulu16(u16offset, (u16) (log_table[s16tableIndex + 1] - log_table[s16tableIndex])); /* q.15 format */ s32log = qm_add16((s16) s32log, s16errorApproximation); /* adjust for the qformat of the N as * log2(mag * 2^x) = log2(mag) + x */ s32log = qm_add32(s32log, ((s32) -qN) << 15); /* q.15 format */ /* normalize the result. */ s16norm = qm_norm32(s32log); /* bring all the important bits into lower 16 bits */ /* q.15+s16norm-16 format */ s32log = qm_shl32(s32log, s16norm - 16); /* compute the log10(N) by multiplying log2(N) with log10(2). * as log10(mag * 2^x) = log2(mag * 2^x) * log10(2) * log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16) */ *log10N = qm_muls16((s16) s32log, (s16) LOG10_2); /* write the q format of the result. */ *qLog10N = 15 + s16norm - 16 + 1; return; }
gpl-2.0
xcstacy/flo-kernel
drivers/input/mouse/inport.c
14610
5446
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * Teemu Rantanen Derrick Cole * Peter Cervasio Christoph Niemann * Philip Blundell Russell King * Bob Harris */ /* * Inport (ATI XL and Microsoft) busmouse driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/input.h> #include <asm/io.h> #include <asm/irq.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Inport (ATI XL and Microsoft) busmouse driver"); MODULE_LICENSE("GPL"); #define INPORT_BASE 0x23c #define INPORT_EXTENT 4 #define INPORT_CONTROL_PORT INPORT_BASE + 0 #define INPORT_DATA_PORT INPORT_BASE + 1 #define INPORT_SIGNATURE_PORT INPORT_BASE + 2 #define INPORT_REG_BTNS 0x00 #define INPORT_REG_X 0x01 #define INPORT_REG_Y 0x02 #define INPORT_REG_MODE 0x07 #define INPORT_RESET 0x80 #ifdef CONFIG_MOUSE_ATIXL #define INPORT_NAME "ATI XL Mouse" #define INPORT_VENDOR 0x0002 #define INPORT_SPEED_30HZ 0x01 #define INPORT_SPEED_50HZ 0x02 #define INPORT_SPEED_100HZ 0x03 #define INPORT_SPEED_200HZ 0x04 #define INPORT_MODE_BASE INPORT_SPEED_100HZ #define INPORT_MODE_IRQ 0x08 #else #define INPORT_NAME "Microsoft InPort Mouse" #define INPORT_VENDOR 0x0001 #define INPORT_MODE_BASE 0x10 #define INPORT_MODE_IRQ 0x01 #endif #define INPORT_MODE_HOLD 0x20 #define INPORT_IRQ 5 static int inport_irq = INPORT_IRQ; module_param_named(irq, inport_irq, uint, 0); MODULE_PARM_DESC(irq, "IRQ number (5=default)"); static struct input_dev *inport_dev; static irqreturn_t inport_interrupt(int irq, void *dev_id) { unsigned char buttons; outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_HOLD | INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); outb(INPORT_REG_X, INPORT_CONTROL_PORT); input_report_rel(inport_dev, REL_X, inb(INPORT_DATA_PORT)); outb(INPORT_REG_Y, INPORT_CONTROL_PORT); input_report_rel(inport_dev, REL_Y, inb(INPORT_DATA_PORT)); outb(INPORT_REG_BTNS, INPORT_CONTROL_PORT); buttons = inb(INPORT_DATA_PORT); input_report_key(inport_dev, BTN_MIDDLE, buttons & 1); input_report_key(inport_dev, BTN_LEFT, buttons & 2); input_report_key(inport_dev, BTN_RIGHT, buttons & 4); outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); input_sync(inport_dev); return IRQ_HANDLED; } static int inport_open(struct input_dev *dev) { if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL)) return -EBUSY; outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); return 0; } static void inport_close(struct input_dev *dev) { outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_BASE, INPORT_DATA_PORT); free_irq(inport_irq, NULL); } static int __init inport_init(void) { unsigned char a, b, c; int err; if (!request_region(INPORT_BASE, INPORT_EXTENT, "inport")) { printk(KERN_ERR "inport.c: Can't allocate ports at %#x\n", INPORT_BASE); return -EBUSY; } a = inb(INPORT_SIGNATURE_PORT); b = inb(INPORT_SIGNATURE_PORT); c = inb(INPORT_SIGNATURE_PORT); if (a == b || a != c) { printk(KERN_INFO "inport.c: Didn't find InPort mouse at %#x\n", INPORT_BASE); err = -ENODEV; goto err_release_region; } inport_dev = input_allocate_device(); if (!inport_dev) { printk(KERN_ERR "inport.c: Not enough memory for input device\n"); err = -ENOMEM; goto err_release_region; } inport_dev->name = INPORT_NAME; inport_dev->phys = "isa023c/input0"; inport_dev->id.bustype = BUS_ISA; inport_dev->id.vendor = INPORT_VENDOR; inport_dev->id.product = 0x0001; inport_dev->id.version = 0x0100; inport_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); inport_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); inport_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); inport_dev->open = inport_open; inport_dev->close = inport_close; outb(INPORT_RESET, INPORT_CONTROL_PORT); outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_BASE, INPORT_DATA_PORT); err = input_register_device(inport_dev); if (err) goto err_free_dev; return 0; err_free_dev: input_free_device(inport_dev); err_release_region: release_region(INPORT_BASE, INPORT_EXTENT); return err; } static void __exit inport_exit(void) { input_unregister_device(inport_dev); release_region(INPORT_BASE, INPORT_EXTENT); } module_init(inport_init); module_exit(inport_exit);
gpl-2.0
emceethemouth/kernel_msm
drivers/mtd/maps/map_funcs.c
14866
1078
/* * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS * is enabled. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/map.h> #include <linux/mtd/xip.h> static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs) { return inline_map_read(map, ofs); } static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs) { inline_map_write(map, datum, ofs); } static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { inline_map_copy_from(map, to, from, len); } static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { inline_map_copy_to(map, to, from, len); } void simple_map_init(struct map_info *map) { BUG_ON(!map_bankwidth_supported(map->bankwidth)); map->read = simple_map_read; map->write = simple_map_write; map->copy_from = simple_map_copy_from; map->copy_to = simple_map_copy_to; } EXPORT_SYMBOL(simple_map_init); MODULE_LICENSE("GPL");
gpl-2.0
ycaihua/fastsocket
kernel/tools/perf/builtin-inject.c
19
11521
/* * builtin-inject.c * * Builtin inject command: Examine the live mode (stdin) event stream * and repipe it to stdout while optionally injecting additional * events into it. */ #include "builtin.h" #include "perf.h" #include "util/color.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/session.h" #include "util/tool.h" #include "util/debug.h" #include "util/build-id.h" #include "util/parse-options.h" #include <linux/list.h> struct perf_inject { struct perf_tool tool; bool build_ids; bool sched_stat; const char *input_name; int pipe_output, output; u64 bytes_written; struct list_head samples; }; struct event_entry { struct list_head node; u32 tid; union perf_event event[0]; }; static int perf_event__repipe_synth(struct perf_tool *tool, union perf_event *event) { struct perf_inject *inject = container_of(tool, struct perf_inject, tool); uint32_t size; void *buf = event; size = event->header.size; while (size) { int ret = write(inject->output, buf, size); if (ret < 0) return -errno; size -= ret; buf += ret; inject->bytes_written += ret; } return 0; } static int perf_event__repipe_op2_synth(struct perf_tool *tool, union perf_event *event, struct perf_session *session __maybe_unused) { return perf_event__repipe_synth(tool, event); } static int perf_event__repipe_event_type_synth(struct perf_tool *tool, union perf_event *event) { return perf_event__repipe_synth(tool, event); } static int perf_event__repipe_attr(struct perf_tool *tool, union perf_event *event, struct perf_evlist **pevlist) { int ret; ret = perf_event__process_attr(tool, event, pevlist); if (ret) return ret; return perf_event__repipe_synth(tool, event); } static int perf_event__repipe(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { return perf_event__repipe_synth(tool, event); } typedef int (*inject_handler)(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine); static int perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { if (evsel->handler.func) { inject_handler f = evsel->handler.func; return f(tool, event, sample, evsel, machine); } build_id__mark_dso_hit(tool, event, sample, evsel, machine); return perf_event__repipe_synth(tool, event); } static int perf_event__repipe_mmap(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) { int err; err = perf_event__process_mmap(tool, event, sample, machine); perf_event__repipe(tool, event, sample, machine); return err; } static int perf_event__repipe_fork(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) { int err; err = perf_event__process_fork(tool, event, sample, machine); perf_event__repipe(tool, event, sample, machine); return err; } static int perf_event__repipe_tracing_data(struct perf_tool *tool, union perf_event *event, struct perf_session *session) { int err; perf_event__repipe_synth(tool, event); err = perf_event__process_tracing_data(tool, event, session); return err; } static int dso__read_build_id(struct dso *self) { if (self->has_build_id) return 0; if (filename__read_build_id(self->long_name, self->build_id, sizeof(self->build_id)) > 0) { self->has_build_id = true; return 0; } return -1; } static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, struct machine *machine) { u16 misc = PERF_RECORD_MISC_USER; int err; if (dso__read_build_id(self) < 0) { pr_debug("no build_id found for %s\n", self->long_name); return -1; } if (self->kernel) misc = PERF_RECORD_MISC_KERNEL; err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe, machine); if (err) { pr_err("Can't synthesize build_id event for %s\n", self->long_name); return -1; } return 0; } static int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) { struct addr_location al; struct thread *thread; u8 cpumode; cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; thread = machine__findnew_thread(machine, event->ip.pid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", event->header.type); goto repipe; } thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, event->ip.ip, &al); if (al.map != NULL) { if (!al.map->dso->hit) { al.map->dso->hit = 1; if (map__load(al.map, NULL) >= 0) { dso__inject_build_id(al.map->dso, tool, machine); /* * If this fails, too bad, let the other side * account this as unresolved. */ } else { #ifdef LIBELF_SUPPORT pr_warning("no symbols found in %s, maybe " "install a debug package?\n", al.map->dso->long_name); #endif } } } repipe: perf_event__repipe(tool, event, sample, machine); return 0; } static int perf_inject__sched_process_exit(struct perf_tool *tool, union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine __maybe_unused) { struct perf_inject *inject = container_of(tool, struct perf_inject, tool); struct event_entry *ent; list_for_each_entry(ent, &inject->samples, node) { if (sample->tid == ent->tid) { list_del_init(&ent->node); free(ent); break; } } return 0; } static int perf_inject__sched_switch(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct perf_inject *inject = container_of(tool, struct perf_inject, tool); struct event_entry *ent; perf_inject__sched_process_exit(tool, event, sample, evsel, machine); ent = malloc(event->header.size + sizeof(struct event_entry)); if (ent == NULL) { color_fprintf(stderr, PERF_COLOR_RED, "Not enough memory to process sched switch event!"); return -1; } ent->tid = sample->tid; memcpy(&ent->event, event, event->header.size); list_add(&ent->node, &inject->samples); return 0; } static int perf_inject__sched_stat(struct perf_tool *tool, union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct event_entry *ent; union perf_event *event_sw; struct perf_sample sample_sw; struct perf_inject *inject = container_of(tool, struct perf_inject, tool); u32 pid = perf_evsel__intval(evsel, sample, "pid"); list_for_each_entry(ent, &inject->samples, node) { if (pid == ent->tid) goto found; } return 0; found: event_sw = &ent->event[0]; perf_evsel__parse_sample(evsel, event_sw, &sample_sw); sample_sw.period = sample->period; sample_sw.time = sample->time; perf_event__synthesize_sample(event_sw, evsel->attr.sample_type, &sample_sw, false); build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine); return perf_event__repipe(tool, event_sw, &sample_sw, machine); } extern volatile int session_done; static void sig_handler(int sig __maybe_unused) { session_done = 1; } static int perf_evsel__check_stype(struct perf_evsel *evsel, u64 sample_type, const char *sample_msg) { struct perf_event_attr *attr = &evsel->attr; const char *name = perf_evsel__name(evsel); if (!(attr->sample_type & sample_type)) { pr_err("Samples for %s event do not have %s attribute set.", name, sample_msg); return -EINVAL; } return 0; } static int __cmd_inject(struct perf_inject *inject) { struct perf_session *session; int ret = -EINVAL; signal(SIGINT, sig_handler); if (inject->build_ids || inject->sched_stat) { inject->tool.mmap = perf_event__repipe_mmap; inject->tool.fork = perf_event__repipe_fork; inject->tool.tracing_data = perf_event__repipe_tracing_data; } session = perf_session__new(inject->input_name, O_RDONLY, false, true, &inject->tool); if (session == NULL) return -ENOMEM; if (inject->build_ids) { inject->tool.sample = perf_event__inject_buildid; } else if (inject->sched_stat) { struct perf_evsel *evsel; inject->tool.ordered_samples = true; list_for_each_entry(evsel, &session->evlist->entries, node) { const char *name = perf_evsel__name(evsel); if (!strcmp(name, "sched:sched_switch")) { if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID")) return -EINVAL; evsel->handler.func = perf_inject__sched_switch; } else if (!strcmp(name, "sched:sched_process_exit")) evsel->handler.func = perf_inject__sched_process_exit; else if (!strncmp(name, "sched:sched_stat_", 17)) evsel->handler.func = perf_inject__sched_stat; } } if (!inject->pipe_output) lseek(inject->output, session->header.data_offset, SEEK_SET); ret = perf_session__process_events(session, &inject->tool); if (!inject->pipe_output) { session->header.data_size = inject->bytes_written; perf_session__write_header(session, session->evlist, inject->output, true); } perf_session__delete(session); return ret; } int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_inject inject = { .tool = { .sample = perf_event__repipe_sample, .mmap = perf_event__repipe, .comm = perf_event__repipe, .fork = perf_event__repipe, .exit = perf_event__repipe, .lost = perf_event__repipe, .read = perf_event__repipe_sample, .throttle = perf_event__repipe, .unthrottle = perf_event__repipe, .attr = perf_event__repipe_attr, .event_type = perf_event__repipe_event_type_synth, .tracing_data = perf_event__repipe_op2_synth, .build_id = perf_event__repipe_op2_synth, }, .input_name = "-", .samples = LIST_HEAD_INIT(inject.samples), }; const char *output_name = "-"; const struct option options[] = { OPT_BOOLEAN('b', "build-ids", &inject.build_ids, "Inject build-ids into the output stream"), OPT_STRING('i', "input", &inject.input_name, "file", "input file name"), OPT_STRING('o', "output", &output_name, "file", "output file name"), OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, "Merge sched-stat and sched-switch for getting events " "where and how long tasks slept"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show build ids, etc)"), OPT_END() }; const char * const inject_usage[] = { "perf inject [<options>]", NULL }; argc = parse_options(argc, argv, options, inject_usage, 0); /* * Any (unrecognized) arguments left? */ if (argc) usage_with_options(inject_usage, options); if (!strcmp(output_name, "-")) { inject.pipe_output = 1; inject.output = STDOUT_FILENO; } else { inject.output = open(output_name, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR); if (inject.output < 0) { perror("failed to create output file"); return -1; } } if (symbol__init() < 0) return -1; return __cmd_inject(&inject); }
gpl-2.0
johnzz/fastsocket
kernel/drivers/infiniband/hw/ehca/ehca_mrmw.c
19
76000
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * MR/MW functions * * Authors: Dietmar Decker <ddecker@de.ibm.com> * Christoph Raisch <raisch@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <rdma/ib_umem.h> #include "ehca_iverbs.h" #include "ehca_mrmw.h" #include "hcp_if.h" #include "hipz_hw.h" #define NUM_CHUNKS(length, chunk_size) \ (((length) + (chunk_size - 1)) / (chunk_size)) /* max number of rpages (per hcall register_rpages) */ #define MAX_RPAGES 512 /* DMEM toleration management */ #define EHCA_SECTSHIFT SECTION_SIZE_BITS #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT) #define EHCA_HUGEPAGESHIFT 34 #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT) #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2) #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT) #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */ #define EHCA_DIR_MAP_SIZE (0x10000) #define EHCA_ENT_MAP_SIZE (0x10000) #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1) static unsigned long ehca_mr_len; /* * Memory map data structures */ struct ehca_dir_bmap { u64 ent[EHCA_MAP_ENTRIES]; }; struct ehca_top_bmap { struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES]; }; struct ehca_bmap { struct ehca_top_bmap *top[EHCA_MAP_ENTRIES]; }; static struct ehca_bmap *ehca_bmap; static struct kmem_cache *mr_cache; static struct kmem_cache *mw_cache; enum ehca_mr_pgsize { EHCA_MR_PGSIZE4K = 0x1000L, EHCA_MR_PGSIZE64K = 0x10000L, EHCA_MR_PGSIZE1M = 0x100000L, EHCA_MR_PGSIZE16M = 0x1000000L }; #define EHCA_MR_PGSHIFT4K 12 #define EHCA_MR_PGSHIFT64K 16 #define EHCA_MR_PGSHIFT1M 20 #define EHCA_MR_PGSHIFT16M 24 static u64 ehca_map_vaddr(void *caddr); static u32 ehca_encode_hwpage_size(u32 pgsize) { int log = ilog2(pgsize); WARN_ON(log < 12 || log > 24 || log & 3); return (log - 12) / 4; } static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) { return rounddown_pow_of_two(shca->hca_cap_mr_pgsize); } static struct ehca_mr *ehca_mr_new(void) { struct ehca_mr *me; me = kmem_cache_zalloc(mr_cache, GFP_KERNEL); if (me) spin_lock_init(&me->mrlock); else ehca_gen_err("alloc failed"); return me; } static void ehca_mr_delete(struct ehca_mr *me) { kmem_cache_free(mr_cache, me); } static struct ehca_mw *ehca_mw_new(void) { struct ehca_mw *me; me = kmem_cache_zalloc(mw_cache, GFP_KERNEL); if (me) spin_lock_init(&me->mwlock); else ehca_gen_err("alloc failed"); return me; } static void ehca_mw_delete(struct ehca_mw *me) { kmem_cache_free(mw_cache, me); } /*----------------------------------------------------------------------*/ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *ib_mr; int ret; struct ehca_mr *e_maxmr; struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); if (shca->maxmr) { e_maxmr = ehca_mr_new(); if (!e_maxmr) { ehca_err(&shca->ib_device, "out of memory"); ib_mr = ERR_PTR(-ENOMEM); goto get_dma_mr_exit0; } ret = ehca_reg_maxmr(shca, e_maxmr, (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)), mr_access_flags, e_pd, &e_maxmr->ib.ib_mr.lkey, &e_maxmr->ib.ib_mr.rkey); if (ret) { ehca_mr_delete(e_maxmr); ib_mr = ERR_PTR(ret); goto get_dma_mr_exit0; } ib_mr = &e_maxmr->ib.ib_mr; } else { ehca_err(&shca->ib_device, "no internal max-MR exist!"); ib_mr = ERR_PTR(-EINVAL); goto get_dma_mr_exit0; } get_dma_mr_exit0: if (IS_ERR(ib_mr)) ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x", PTR_ERR(ib_mr), pd, mr_access_flags); return ib_mr; } /* end ehca_get_dma_mr() */ /*----------------------------------------------------------------------*/ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_mr *ib_mr; int ret; struct ehca_mr *e_mr; struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); u64 size; if ((num_phys_buf <= 0) || !phys_buf_array) { ehca_err(pd->device, "bad input values: num_phys_buf=%x " "phys_buf_array=%p", num_phys_buf, phys_buf_array); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; } if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; } /* check physical buffer list and calculate size */ ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf, iova_start, &size); if (ret) { ib_mr = ERR_PTR(ret); goto reg_phys_mr_exit0; } if ((size == 0) || (((u64)iova_start + size) < (u64)iova_start)) { ehca_err(pd->device, "bad input values: size=%llx iova_start=%p", size, iova_start); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; } e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(pd->device, "out of memory"); ib_mr = ERR_PTR(-ENOMEM); goto reg_phys_mr_exit0; } /* register MR on HCA */ if (ehca_mr_is_maxmr(size, iova_start)) { e_mr->flags |= EHCA_MR_FLAG_MAXMR; ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags, e_pd, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); if (ret) { ib_mr = ERR_PTR(ret); goto reg_phys_mr_exit1; } } else { struct ehca_mr_pginfo pginfo; u32 num_kpages; u32 num_hwpages; u64 hw_pgsize; num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size, PAGE_SIZE); /* for kernel space we try most possible pgsize */ hw_pgsize = ehca_get_max_hwpage_size(shca); num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.hwpage_size = hw_pgsize; pginfo.num_hwpages = num_hwpages; pginfo.u.phy.num_phys_buf = num_phys_buf; pginfo.u.phy.phys_buf_array = phys_buf_array; pginfo.next_hwpage = ((u64)iova_start & ~PAGE_MASK) / hw_pgsize; ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); if (ret) { ib_mr = ERR_PTR(ret); goto reg_phys_mr_exit1; } } /* successful registration of all pages */ return &e_mr->ib.ib_mr; reg_phys_mr_exit1: ehca_mr_delete(e_mr); reg_phys_mr_exit0: if (IS_ERR(ib_mr)) ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p " "num_phys_buf=%x mr_access_flags=%x iova_start=%p", PTR_ERR(ib_mr), pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); return ib_mr; } /* end ehca_reg_phys_mr() */ /*----------------------------------------------------------------------*/ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int mr_access_flags, struct ib_udata *udata) { struct ib_mr *ib_mr; struct ehca_mr *e_mr; struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_mr_pginfo pginfo; int ret, page_shift; u32 num_kpages; u32 num_hwpages; u64 hwpage_size; if (!pd) { ehca_gen_err("bad pd=%p", pd); return ERR_PTR(-EFAULT); } if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit0; } if (length == 0 || virt + length < virt) { ehca_err(pd->device, "bad input values: length=%llx " "virt_base=%llx", length, virt); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit0; } e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(pd->device, "out of memory"); ib_mr = ERR_PTR(-ENOMEM); goto reg_user_mr_exit0; } e_mr->umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags, 0); if (IS_ERR(e_mr->umem)) { ib_mr = (void *)e_mr->umem; goto reg_user_mr_exit1; } if (e_mr->umem->page_size != PAGE_SIZE) { ehca_err(pd->device, "page size not supported, " "e_mr->umem->page_size=%x", e_mr->umem->page_size); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit2; } /* determine number of MR pages */ num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); /* select proper hw_pgsize */ page_shift = PAGE_SHIFT; if (e_mr->umem->hugetlb) { /* determine page_shift, clamp between 4K and 16M */ page_shift = (fls64(length - 1) + 3) & ~3; page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K), EHCA_MR_PGSHIFT16M); } hwpage_size = 1UL << page_shift; /* now that we have the desired page size, shift until it's * supported, too. 4K is always supported, so this terminates. */ while (!(hwpage_size & shca->hca_cap_mr_pgsize)) hwpage_size >>= 4; reg_user_mr_fallback: num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size); /* register MR on HCA */ memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_USER; pginfo.hwpage_size = hwpage_size; pginfo.num_kpages = num_kpages; pginfo.num_hwpages = num_hwpages; pginfo.u.usr.region = e_mr->umem; pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, (&e_mr->umem->chunk_list), list); ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { ehca_warn(pd->device, "failed to register mr " "with hwpage_size=%llx", hwpage_size); ehca_info(pd->device, "try to register mr with " "kpage_size=%lx", PAGE_SIZE); /* * this means kpages are not contiguous for a hw page * try kernel page size as fallback solution */ hwpage_size = PAGE_SIZE; goto reg_user_mr_fallback; } if (ret) { ib_mr = ERR_PTR(ret); goto reg_user_mr_exit2; } /* successful registration of all pages */ return &e_mr->ib.ib_mr; reg_user_mr_exit2: ib_umem_release(e_mr->umem); reg_user_mr_exit1: ehca_mr_delete(e_mr); reg_user_mr_exit0: if (IS_ERR(ib_mr)) ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p", PTR_ERR(ib_mr), pd, mr_access_flags, udata); return ib_mr; } /* end ehca_reg_user_mr() */ /*----------------------------------------------------------------------*/ int ehca_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { int ret; struct ehca_shca *shca = container_of(mr->device, struct ehca_shca, ib_device); struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); u64 new_size; u64 *new_start; u32 new_acl; struct ehca_pd *new_pd; u32 tmp_lkey, tmp_rkey; unsigned long sl_flags; u32 num_kpages = 0; u32 num_hwpages = 0; struct ehca_mr_pginfo pginfo; if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) { /* TODO not supported, because PHYP rereg hCall needs pages */ ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not " "supported yet, mr_rereg_mask=%x", mr_rereg_mask); ret = -EINVAL; goto rereg_phys_mr_exit0; } if (mr_rereg_mask & IB_MR_REREG_PD) { if (!pd) { ehca_err(mr->device, "rereg with bad pd, pd=%p " "mr_rereg_mask=%x", pd, mr_rereg_mask); ret = -EINVAL; goto rereg_phys_mr_exit0; } } if ((mr_rereg_mask & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) || (mr_rereg_mask == 0)) { ret = -EINVAL; goto rereg_phys_mr_exit0; } /* check other parameters */ if (e_mr == shca->maxmr) { /* should be impossible, however reject to be sure */ ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p " "shca->maxmr=%p mr->lkey=%x", mr, shca->maxmr, mr->lkey); ret = -EINVAL; goto rereg_phys_mr_exit0; } if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */ if (e_mr->flags & EHCA_MR_FLAG_FMR) { ehca_err(mr->device, "not supported for FMR, mr=%p " "flags=%x", mr, e_mr->flags); ret = -EINVAL; goto rereg_phys_mr_exit0; } if (!phys_buf_array || num_phys_buf <= 0) { ehca_err(mr->device, "bad input values mr_rereg_mask=%x" " phys_buf_array=%p num_phys_buf=%x", mr_rereg_mask, phys_buf_array, num_phys_buf); ret = -EINVAL; goto rereg_phys_mr_exit0; } } if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */ (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(mr->device, "bad input values: mr_rereg_mask=%x " "mr_access_flags=%x", mr_rereg_mask, mr_access_flags); ret = -EINVAL; goto rereg_phys_mr_exit0; } /* set requested values dependent on rereg request */ spin_lock_irqsave(&e_mr->mrlock, sl_flags); new_start = e_mr->start; new_size = e_mr->size; new_acl = e_mr->acl; new_pd = container_of(mr->pd, struct ehca_pd, ib_pd); if (mr_rereg_mask & IB_MR_REREG_TRANS) { u64 hw_pgsize = ehca_get_max_hwpage_size(shca); new_start = iova_start; /* change address */ /* check physical buffer list and calculate size */ ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf, iova_start, &new_size); if (ret) goto rereg_phys_mr_exit1; if ((new_size == 0) || (((u64)iova_start + new_size) < (u64)iova_start)) { ehca_err(mr->device, "bad input values: new_size=%llx " "iova_start=%p", new_size, iova_start); ret = -EINVAL; goto rereg_phys_mr_exit1; } num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + new_size, PAGE_SIZE); num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) + new_size, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.hwpage_size = hw_pgsize; pginfo.num_hwpages = num_hwpages; pginfo.u.phy.num_phys_buf = num_phys_buf; pginfo.u.phy.phys_buf_array = phys_buf_array; pginfo.next_hwpage = ((u64)iova_start & ~PAGE_MASK) / hw_pgsize; } if (mr_rereg_mask & IB_MR_REREG_ACCESS) new_acl = mr_access_flags; if (mr_rereg_mask & IB_MR_REREG_PD) new_pd = container_of(pd, struct ehca_pd, ib_pd); ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl, new_pd, &pginfo, &tmp_lkey, &tmp_rkey); if (ret) goto rereg_phys_mr_exit1; /* successful reregistration */ if (mr_rereg_mask & IB_MR_REREG_PD) mr->pd = pd; mr->lkey = tmp_lkey; mr->rkey = tmp_rkey; rereg_phys_mr_exit1: spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); rereg_phys_mr_exit0: if (ret) ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p " "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x " "iova_start=%p", ret, mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); return ret; } /* end ehca_rereg_phys_mr() */ /*----------------------------------------------------------------------*/ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) { int ret = 0; u64 h_ret; struct ehca_shca *shca = container_of(mr->device, struct ehca_shca, ib_device); struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); unsigned long sl_flags; struct ehca_mr_hipzout_parms hipzout; if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " "e_mr->flags=%x", mr, e_mr, e_mr->flags); ret = -EINVAL; goto query_mr_exit0; } memset(mr_attr, 0, sizeof(struct ib_mr_attr)); spin_lock_irqsave(&e_mr->mrlock, sl_flags); h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p " "hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, mr->lkey); ret = ehca2ib_return_code(h_ret); goto query_mr_exit1; } mr_attr->pd = mr->pd; mr_attr->device_virt_addr = hipzout.vaddr; mr_attr->size = hipzout.len; mr_attr->lkey = hipzout.lkey; mr_attr->rkey = hipzout.rkey; ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); query_mr_exit1: spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); query_mr_exit0: if (ret) ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p", ret, mr, mr_attr); return ret; } /* end ehca_query_mr() */ /*----------------------------------------------------------------------*/ int ehca_dereg_mr(struct ib_mr *mr) { int ret = 0; u64 h_ret; struct ehca_shca *shca = container_of(mr->device, struct ehca_shca, ib_device); struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " "e_mr->flags=%x", mr, e_mr, e_mr->flags); ret = -EINVAL; goto dereg_mr_exit0; } else if (e_mr == shca->maxmr) { /* should be impossible, however reject to be sure */ ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p " "shca->maxmr=%p mr->lkey=%x", mr, shca->maxmr, mr->lkey); ret = -EINVAL; goto dereg_mr_exit0; } /* TODO: BUSY: MR still has bound window(s) */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p " "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x", h_ret, shca, e_mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, mr->lkey); ret = ehca2ib_return_code(h_ret); goto dereg_mr_exit0; } if (e_mr->umem) ib_umem_release(e_mr->umem); /* successful deregistration */ ehca_mr_delete(e_mr); dereg_mr_exit0: if (ret) ehca_err(mr->device, "ret=%i mr=%p", ret, mr); return ret; } /* end ehca_dereg_mr() */ /*----------------------------------------------------------------------*/ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) { struct ib_mw *ib_mw; u64 h_ret; struct ehca_mw *e_mw; struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_mw_hipzout_parms hipzout; if (type != IB_MW_TYPE_1) return ERR_PTR(-EINVAL); e_mw = ehca_mw_new(); if (!e_mw) { ib_mw = ERR_PTR(-ENOMEM); goto alloc_mw_exit0; } h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli " "shca=%p hca_hndl=%llx mw=%p", h_ret, shca, shca->ipz_hca_handle.handle, e_mw); ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); goto alloc_mw_exit1; } /* successful MW allocation */ e_mw->ipz_mw_handle = hipzout.handle; e_mw->ib_mw.rkey = hipzout.rkey; return &e_mw->ib_mw; alloc_mw_exit1: ehca_mw_delete(e_mw); alloc_mw_exit0: if (IS_ERR(ib_mw)) ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd); return ib_mw; } /* end ehca_alloc_mw() */ /*----------------------------------------------------------------------*/ int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) { /* TODO: not supported up to now */ ehca_gen_err("bind MW currently not supported by HCAD"); return -EPERM; } /* end ehca_bind_mw() */ /*----------------------------------------------------------------------*/ int ehca_dealloc_mw(struct ib_mw *mw) { u64 h_ret; struct ehca_shca *shca = container_of(mw->device, struct ehca_shca, ib_device); struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw); h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); if (h_ret != H_SUCCESS) { ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p " "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx", h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, e_mw->ipz_mw_handle.handle); return ehca2ib_return_code(h_ret); } /* successful deallocation */ ehca_mw_delete(e_mw); return 0; } /* end ehca_dealloc_mw() */ /*----------------------------------------------------------------------*/ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct ib_fmr *ib_fmr; struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_mr *e_fmr; int ret; u32 tmp_lkey, tmp_rkey; struct ehca_mr_pginfo pginfo; u64 hw_pgsize; /* check other parameters */ if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { /* * Remote Write Access requires Local Write Access * Remote Atomic Access requires Local Write Access */ ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } if (mr_access_flags & IB_ACCESS_MW_BIND) { ehca_err(pd->device, "bad input values: mr_access_flags=%x", mr_access_flags); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) { ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x " "fmr_attr->max_maps=%x fmr_attr->page_shift=%x", fmr_attr->max_pages, fmr_attr->max_maps, fmr_attr->page_shift); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } hw_pgsize = 1 << fmr_attr->page_shift; if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) { ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", fmr_attr->page_shift); ib_fmr = ERR_PTR(-EINVAL); goto alloc_fmr_exit0; } e_fmr = ehca_mr_new(); if (!e_fmr) { ib_fmr = ERR_PTR(-ENOMEM); goto alloc_fmr_exit0; } e_fmr->flags |= EHCA_MR_FLAG_FMR; /* register MR on HCA */ memset(&pginfo, 0, sizeof(pginfo)); pginfo.hwpage_size = hw_pgsize; /* * pginfo.num_hwpages==0, ie register_rpages() will not be called * but deferred to map_phys_fmr() */ ret = ehca_reg_mr(shca, e_fmr, NULL, fmr_attr->max_pages * (1 << fmr_attr->page_shift), mr_access_flags, e_pd, &pginfo, &tmp_lkey, &tmp_rkey, EHCA_REG_MR); if (ret) { ib_fmr = ERR_PTR(ret); goto alloc_fmr_exit1; } /* successful */ e_fmr->hwpage_size = hw_pgsize; e_fmr->fmr_page_size = 1 << fmr_attr->page_shift; e_fmr->fmr_max_pages = fmr_attr->max_pages; e_fmr->fmr_max_maps = fmr_attr->max_maps; e_fmr->fmr_map_cnt = 0; return &e_fmr->ib.ib_fmr; alloc_fmr_exit1: ehca_mr_delete(e_fmr); alloc_fmr_exit0: return ib_fmr; } /* end ehca_alloc_fmr() */ /*----------------------------------------------------------------------*/ int ehca_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, u64 iova) { int ret; struct ehca_shca *shca = container_of(fmr->device, struct ehca_shca, ib_device); struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); struct ehca_mr_pginfo pginfo; u32 tmp_lkey, tmp_rkey; if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", e_fmr, e_fmr->flags); ret = -EINVAL; goto map_phys_fmr_exit0; } ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); if (ret) goto map_phys_fmr_exit0; if (iova % e_fmr->fmr_page_size) { /* only whole-numbered pages */ ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", iova, e_fmr->fmr_page_size); ret = -EINVAL; goto map_phys_fmr_exit0; } if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) { /* HCAD does not limit the maps, however trace this anyway */ ehca_info(fmr->device, "map limit exceeded, fmr=%p " "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x", fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); } memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_FMR; pginfo.num_kpages = list_len; pginfo.hwpage_size = e_fmr->hwpage_size; pginfo.num_hwpages = list_len * e_fmr->fmr_page_size / pginfo.hwpage_size; pginfo.u.fmr.page_list = page_list; pginfo.next_hwpage = (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size; pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, list_len * e_fmr->fmr_page_size, e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); if (ret) goto map_phys_fmr_exit0; /* successful reregistration */ e_fmr->fmr_map_cnt++; e_fmr->ib.ib_fmr.lkey = tmp_lkey; e_fmr->ib.ib_fmr.rkey = tmp_rkey; return 0; map_phys_fmr_exit0: if (ret) ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " "iova=%llx", ret, fmr, page_list, list_len, iova); return ret; } /* end ehca_map_phys_fmr() */ /*----------------------------------------------------------------------*/ int ehca_unmap_fmr(struct list_head *fmr_list) { int ret = 0; struct ib_fmr *ib_fmr; struct ehca_shca *shca = NULL; struct ehca_shca *prev_shca; struct ehca_mr *e_fmr; u32 num_fmr = 0; u32 unmap_fmr_cnt = 0; /* check all FMR belong to same SHCA, and check internal flag */ list_for_each_entry(ib_fmr, fmr_list, list) { prev_shca = shca; shca = container_of(ib_fmr->device, struct ehca_shca, ib_device); e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); if ((shca != prev_shca) && prev_shca) { ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p " "prev_shca=%p e_fmr=%p", shca, prev_shca, e_fmr); ret = -EINVAL; goto unmap_fmr_exit0; } if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p " "e_fmr->flags=%x", e_fmr, e_fmr->flags); ret = -EINVAL; goto unmap_fmr_exit0; } num_fmr++; } /* loop over all FMRs to unmap */ list_for_each_entry(ib_fmr, fmr_list, list) { unmap_fmr_cnt++; e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); shca = container_of(ib_fmr->device, struct ehca_shca, ib_device); ret = ehca_unmap_one_fmr(shca, e_fmr); if (ret) { /* unmap failed, stop unmapping of rest of FMRs */ ehca_err(&shca->ib_device, "unmap of one FMR failed, " "stop rest, e_fmr=%p num_fmr=%x " "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr, unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey); goto unmap_fmr_exit0; } } unmap_fmr_exit0: if (ret) ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x", ret, fmr_list, num_fmr, unmap_fmr_cnt); return ret; } /* end ehca_unmap_fmr() */ /*----------------------------------------------------------------------*/ int ehca_dealloc_fmr(struct ib_fmr *fmr) { int ret; u64 h_ret; struct ehca_shca *shca = container_of(fmr->device, struct ehca_shca, ib_device); struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", e_fmr, e_fmr->flags); ret = -EINVAL; goto free_fmr_exit0; } h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p " "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, fmr->lkey); ret = ehca2ib_return_code(h_ret); goto free_fmr_exit0; } /* successful deregistration */ ehca_mr_delete(e_fmr); return 0; free_fmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr); return ret; } /* end ehca_dealloc_fmr() */ /*----------------------------------------------------------------------*/ static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, struct ehca_mr *e_mr, struct ehca_mr_pginfo *pginfo); int ehca_reg_mr(struct ehca_shca *shca, struct ehca_mr *e_mr, u64 *iova_start, u64 size, int acl, struct ehca_pd *e_pd, struct ehca_mr_pginfo *pginfo, u32 *lkey, /*OUT*/ u32 *rkey, /*OUT*/ enum ehca_reg_type reg_type) { int ret; u64 h_ret; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl); if (ehca_use_hp_mr == 1) hipz_acl |= 0x00000001; h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, (u64)iova_start, size, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli " "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle); ret = ehca2ib_return_code(h_ret); goto ehca_reg_mr_exit0; } e_mr->ipz_mr_handle = hipzout.handle; if (reg_type == EHCA_REG_BUSMAP_MR) ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo); else if (reg_type == EHCA_REG_MR) ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); else ret = -EINVAL; if (ret) goto ehca_reg_mr_exit1; /* successful registration */ e_mr->num_kpages = pginfo->num_kpages; e_mr->num_hwpages = pginfo->num_hwpages; e_mr->hwpage_size = pginfo->hwpage_size; e_mr->start = iova_start; e_mr->size = size; e_mr->acl = acl; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0; ehca_reg_mr_exit1: h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p " "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x " "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i", h_ret, shca, e_mr, iova_start, size, acl, e_pd, hipzout.lkey, pginfo, pginfo->num_kpages, pginfo->num_hwpages, ret); ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " "not recoverable"); } ehca_reg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_reg_mr() */ /*----------------------------------------------------------------------*/ int ehca_reg_mr_rpages(struct ehca_shca *shca, struct ehca_mr *e_mr, struct ehca_mr_pginfo *pginfo) { int ret = 0; u64 h_ret; u32 rnum; u64 rpage; u32 i; u64 *kpage; if (!pginfo->num_hwpages) /* in case of fmr */ return 0; kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!kpage) { ehca_err(&shca->ib_device, "kpage alloc failed"); ret = -ENOMEM; goto ehca_reg_mr_rpages_exit0; } /* max MAX_RPAGES ehca mr pages per register call */ for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) { if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */ if (rnum == 0) rnum = MAX_RPAGES; /* last shot is full */ } else rnum = MAX_RPAGES; ret = ehca_set_pagebuf(pginfo, rnum, kpage); if (ret) { ehca_err(&shca->ib_device, "ehca_set_pagebuf " "bad rc, ret=%i rnum=%x kpage=%p", ret, rnum, kpage); goto ehca_reg_mr_rpages_exit1; } if (rnum > 1) { rpage = virt_to_abs(kpage); if (!rpage) { ehca_err(&shca->ib_device, "kpage=%p i=%x", kpage, i); ret = -EFAULT; goto ehca_reg_mr_rpages_exit1; } } else rpage = *kpage; h_ret = hipz_h_register_rpage_mr( shca->ipz_hca_handle, e_mr, ehca_encode_hwpage_size(pginfo->hwpage_size), 0, rpage, rnum); if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { /* * check for 'registration complete'==H_SUCCESS * and for 'page registered'==H_PAGE_REGISTERED */ if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "last " "hipz_reg_rpage_mr failed, h_ret=%lli " "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx" " lkey=%x", h_ret, e_mr, i, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); break; } else ret = 0; } else if (h_ret != H_PAGE_REGISTERED) { ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx " "mr_hndl=%llx", h_ret, e_mr, i, e_mr->ib.ib_mr.lkey, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle); ret = ehca2ib_return_code(h_ret); break; } else ret = 0; } /* end for(i) */ ehca_reg_mr_rpages_exit1: ehca_free_fw_ctrlblock(kpage); ehca_reg_mr_rpages_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_reg_mr_rpages() */ /*----------------------------------------------------------------------*/ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, struct ehca_mr *e_mr, u64 *iova_start, u64 size, u32 acl, struct ehca_pd *e_pd, struct ehca_mr_pginfo *pginfo, u32 *lkey, /*OUT*/ u32 *rkey) /*OUT*/ { int ret; u64 h_ret; u32 hipz_acl; u64 *kpage; u64 rpage; struct ehca_mr_pginfo pginfo_save; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl); kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!kpage) { ehca_err(&shca->ib_device, "kpage alloc failed"); ret = -ENOMEM; goto ehca_rereg_mr_rereg1_exit0; } pginfo_save = *pginfo; ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); if (ret) { ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx " "kpage=%p", e_mr, pginfo, pginfo->type, pginfo->num_kpages, pginfo->num_hwpages, kpage); goto ehca_rereg_mr_rereg1_exit1; } rpage = virt_to_abs(kpage); if (!rpage) { ehca_err(&shca->ib_device, "kpage=%p", kpage); ret = -EFAULT; goto ehca_rereg_mr_rereg1_exit1; } h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr, (u64)iova_start, size, hipz_acl, e_pd->fw_pd, rpage, &hipzout); if (h_ret != H_SUCCESS) { /* * reregistration unsuccessful, try it again with the 3 hCalls, * e.g. this is required in case H_MR_CONDITION * (MW bound or MR is shared) */ ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr); *pginfo = pginfo_save; ret = -EAGAIN; } else if ((u64 *)hipzout.vaddr != iova_start) { ehca_err(&shca->ib_device, "PHYP changed iova_start in " "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p " "mr_handle=%llx lkey=%x lkey_out=%x", iova_start, hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey, hipzout.lkey); ret = -EFAULT; } else { /* * successful reregistration * note: start and start_out are identical for eServer HCAs */ e_mr->num_kpages = pginfo->num_kpages; e_mr->num_hwpages = pginfo->num_hwpages; e_mr->hwpage_size = pginfo->hwpage_size; e_mr->start = iova_start; e_mr->size = size; e_mr->acl = acl; *lkey = hipzout.lkey; *rkey = hipzout.rkey; } ehca_rereg_mr_rereg1_exit1: ehca_free_fw_ctrlblock(kpage); ehca_rereg_mr_rereg1_exit0: if ( ret && (ret != -EAGAIN) ) ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " "pginfo=%p num_kpages=%llx num_hwpages=%llx", ret, *lkey, *rkey, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_rereg_mr_rereg1() */ /*----------------------------------------------------------------------*/ int ehca_rereg_mr(struct ehca_shca *shca, struct ehca_mr *e_mr, u64 *iova_start, u64 size, int acl, struct ehca_pd *e_pd, struct ehca_mr_pginfo *pginfo, u32 *lkey, u32 *rkey) { int ret = 0; u64 h_ret; int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */ int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ /* first determine reregistration hCall(s) */ if ((pginfo->num_hwpages > MAX_RPAGES) || (e_mr->num_hwpages > MAX_RPAGES) || (pginfo->num_hwpages > e_mr->num_hwpages)) { ehca_dbg(&shca->ib_device, "Rereg3 case, " "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x", pginfo->num_hwpages, e_mr->num_hwpages); rereg_1_hcall = 0; rereg_3_hcall = 1; } if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */ rereg_1_hcall = 0; rereg_3_hcall = 1; e_mr->flags &= ~EHCA_MR_FLAG_MAXMR; ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p", e_mr); } if (rereg_1_hcall) { ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size, acl, e_pd, pginfo, lkey, rkey); if (ret) { if (ret == -EAGAIN) rereg_3_hcall = 1; else goto ehca_rereg_mr_exit0; } } if (rereg_3_hcall) { struct ehca_mr save_mr; /* first deregister old MR */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx " "mr->lkey=%x", h_ret, e_mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_rereg_mr_exit0; } /* clean ehca_mr_t, without changing struct ib_mr and lock */ save_mr = *e_mr; ehca_mr_deletenew(e_mr); /* set some MR values */ e_mr->flags = save_mr.flags; e_mr->hwpage_size = save_mr.hwpage_size; e_mr->fmr_page_size = save_mr.fmr_page_size; e_mr->fmr_max_pages = save_mr.fmr_max_pages; e_mr->fmr_max_maps = save_mr.fmr_max_maps; e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, e_pd, pginfo, lkey, rkey, EHCA_REG_MR); if (ret) { u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; memcpy(&e_mr->flags, &(save_mr.flags), sizeof(struct ehca_mr) - offset); goto ehca_rereg_mr_exit0; } } ehca_rereg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x " "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, rereg_1_hcall, rereg_3_hcall); return ret; } /* end ehca_rereg_mr() */ /*----------------------------------------------------------------------*/ int ehca_unmap_one_fmr(struct ehca_shca *shca, struct ehca_mr *e_fmr) { int ret = 0; u64 h_ret; struct ehca_pd *e_pd = container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); struct ehca_mr save_fmr; u32 tmp_lkey, tmp_rkey; struct ehca_mr_pginfo pginfo; struct ehca_mr_hipzout_parms hipzout; struct ehca_mr save_mr; if (e_fmr->fmr_max_pages <= MAX_RPAGES) { /* * note: after using rereg hcall with len=0, * rereg hcall must be used again for registering pages */ h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, 0, 0, e_pd->fw_pd, 0, &hipzout); if (h_ret == H_SUCCESS) { /* successful reregistration */ e_fmr->start = NULL; e_fmr->size = 0; tmp_lkey = hipzout.lkey; tmp_rkey = hipzout.rkey; return 0; } /* * should not happen, because length checked above, * FMRs are not shared and no MW bound to FMRs */ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx " "mr_hndl=%llx lkey=%x lkey_out=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey, hipzout.lkey); /* try free and rereg */ } /* first free old FMR */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx " "lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_unmap_one_fmr_exit0; } /* clean ehca_mr_t, without changing lock */ save_fmr = *e_fmr; ehca_mr_deletenew(e_fmr); /* set some MR values */ e_fmr->flags = save_fmr.flags; e_fmr->hwpage_size = save_fmr.hwpage_size; e_fmr->fmr_page_size = save_fmr.fmr_page_size; e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; e_fmr->acl = save_fmr.acl; memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_FMR; ret = ehca_reg_mr(shca, e_fmr, NULL, (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey, EHCA_REG_MR); if (ret) { u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; memcpy(&e_fmr->flags, &(save_mr.flags), sizeof(struct ehca_mr) - offset); } ehca_unmap_one_fmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x " "fmr_max_pages=%x", ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages); return ret; } /* end ehca_unmap_one_fmr() */ /*----------------------------------------------------------------------*/ int ehca_reg_smr(struct ehca_shca *shca, struct ehca_mr *e_origmr, struct ehca_mr *e_newmr, u64 *iova_start, int acl, struct ehca_pd *e_pd, u32 *lkey, /*OUT*/ u32 *rkey) /*OUT*/ { int ret = 0; u64 h_ret; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl); h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_reg_smr_exit0; } /* successful registration */ e_newmr->num_kpages = e_origmr->num_kpages; e_newmr->num_hwpages = e_origmr->num_hwpages; e_newmr->hwpage_size = e_origmr->hwpage_size; e_newmr->start = iova_start; e_newmr->size = e_origmr->size; e_newmr->acl = acl; e_newmr->ipz_mr_handle = hipzout.handle; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0; ehca_reg_smr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p " "e_newmr=%p iova_start=%p acl=%x e_pd=%p", ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd); return ret; } /* end ehca_reg_smr() */ /*----------------------------------------------------------------------*/ static inline void *ehca_calc_sectbase(int top, int dir, int idx) { unsigned long ret = idx; ret |= dir << EHCA_DIR_INDEX_SHIFT; ret |= top << EHCA_TOP_INDEX_SHIFT; return abs_to_virt(ret << SECTION_SIZE_BITS); } #define ehca_bmap_valid(entry) \ ((u64)entry != (u64)EHCA_INVAL_ADDR) static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage, struct ehca_shca *shca, struct ehca_mr *mr, struct ehca_mr_pginfo *pginfo) { u64 h_ret = 0; unsigned long page = 0; u64 rpage = virt_to_abs(kpage); int page_count; void *sectbase = ehca_calc_sectbase(top, dir, idx); if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) { ehca_err(&shca->ib_device, "reg_mr_section will probably fail:" "hwpage_size does not fit to " "section start address"); } page_count = EHCA_SECTSIZE / pginfo->hwpage_size; while (page < page_count) { u64 rnum; for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); rnum++) { void *pg = sectbase + ((page++) * pginfo->hwpage_size); kpage[rnum] = virt_to_abs(pg); } h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr, ehca_encode_hwpage_size(pginfo->hwpage_size), 0, rpage, rnum); if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) { ehca_err(&shca->ib_device, "register_rpage_mr failed"); return h_ret; } } return h_ret; } static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage, struct ehca_shca *shca, struct ehca_mr *mr, struct ehca_mr_pginfo *pginfo) { u64 hret = H_SUCCESS; int idx; for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) { if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx])) continue; hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr, pginfo); if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) return hret; } return hret; } static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca, struct ehca_mr *mr, struct ehca_mr_pginfo *pginfo) { u64 hret = H_SUCCESS; int dir; for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) continue; hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo); if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) return hret; } return hret; } /* register internal max-MR to internal SHCA */ int ehca_reg_internal_maxmr( struct ehca_shca *shca, struct ehca_pd *e_pd, struct ehca_mr **e_maxmr) /*OUT*/ { int ret; struct ehca_mr *e_mr; u64 *iova_start; u64 size_maxmr; struct ehca_mr_pginfo pginfo; struct ib_phys_buf ib_pbuf; u32 num_kpages; u32 num_hwpages; u64 hw_pgsize; if (!ehca_bmap) { ret = -EFAULT; goto ehca_reg_internal_maxmr_exit0; } e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(&shca->ib_device, "out of memory"); ret = -ENOMEM; goto ehca_reg_internal_maxmr_exit0; } e_mr->flags |= EHCA_MR_FLAG_MAXMR; /* register internal max-MR on HCA */ size_maxmr = ehca_mr_len; iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)); ib_pbuf.addr = 0; ib_pbuf.size = size_maxmr; num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, PAGE_SIZE); hw_pgsize = ehca_get_max_hwpage_size(shca); num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.num_hwpages = num_hwpages; pginfo.hwpage_size = hw_pgsize; pginfo.u.phy.num_phys_buf = 1; pginfo.u.phy.phys_buf_array = &ib_pbuf; ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR); if (ret) { ehca_err(&shca->ib_device, "reg of internal max MR failed, " "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " "num_hwpages=%x", e_mr, iova_start, size_maxmr, num_kpages, num_hwpages); goto ehca_reg_internal_maxmr_exit1; } /* successful registration of all pages */ e_mr->ib.ib_mr.device = e_pd->ib_pd.device; e_mr->ib.ib_mr.pd = &e_pd->ib_pd; e_mr->ib.ib_mr.uobject = NULL; atomic_inc(&(e_pd->ib_pd.usecnt)); atomic_set(&(e_mr->ib.ib_mr.usecnt), 0); *e_maxmr = e_mr; return 0; ehca_reg_internal_maxmr_exit1: ehca_mr_delete(e_mr); ehca_reg_internal_maxmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p", ret, shca, e_pd, e_maxmr); return ret; } /* end ehca_reg_internal_maxmr() */ /*----------------------------------------------------------------------*/ int ehca_reg_maxmr(struct ehca_shca *shca, struct ehca_mr *e_newmr, u64 *iova_start, int acl, struct ehca_pd *e_pd, u32 *lkey, u32 *rkey) { u64 h_ret; struct ehca_mr *e_origmr = shca->maxmr; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl); h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, e_origmr, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); return ehca2ib_return_code(h_ret); } /* successful registration */ e_newmr->num_kpages = e_origmr->num_kpages; e_newmr->num_hwpages = e_origmr->num_hwpages; e_newmr->hwpage_size = e_origmr->hwpage_size; e_newmr->start = iova_start; e_newmr->size = e_origmr->size; e_newmr->acl = acl; e_newmr->ipz_mr_handle = hipzout.handle; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0; } /* end ehca_reg_maxmr() */ /*----------------------------------------------------------------------*/ int ehca_dereg_internal_maxmr(struct ehca_shca *shca) { int ret; struct ehca_mr *e_maxmr; struct ib_pd *ib_pd; if (!shca->maxmr) { ehca_err(&shca->ib_device, "bad call, shca=%p", shca); ret = -EINVAL; goto ehca_dereg_internal_maxmr_exit0; } e_maxmr = shca->maxmr; ib_pd = e_maxmr->ib.ib_mr.pd; shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */ ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr); if (ret) { ehca_err(&shca->ib_device, "dereg internal max-MR failed, " "ret=%i e_maxmr=%p shca=%p lkey=%x", ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey); shca->maxmr = e_maxmr; goto ehca_dereg_internal_maxmr_exit0; } atomic_dec(&ib_pd->usecnt); ehca_dereg_internal_maxmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p", ret, shca, shca->maxmr); return ret; } /* end ehca_dereg_internal_maxmr() */ /*----------------------------------------------------------------------*/ /* * check physical buffer array of MR verbs for validness and * calculates MR size */ int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, int num_phys_buf, u64 *iova_start, u64 *size) { struct ib_phys_buf *pbuf = phys_buf_array; u64 size_count = 0; u32 i; if (num_phys_buf == 0) { ehca_gen_err("bad phys buf array len, num_phys_buf=0"); return -EINVAL; } /* check first buffer */ if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { ehca_gen_err("iova_start/addr mismatch, iova_start=%p " "pbuf->addr=%llx pbuf->size=%llx", iova_start, pbuf->addr, pbuf->size); return -EINVAL; } if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && (num_phys_buf > 1)) { ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx " "pbuf->size=%llx", pbuf->addr, pbuf->size); return -EINVAL; } for (i = 0; i < num_phys_buf; i++) { if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { ehca_gen_err("bad address, i=%x pbuf->addr=%llx " "pbuf->size=%llx", i, pbuf->addr, pbuf->size); return -EINVAL; } if (((i > 0) && /* not 1st */ (i < (num_phys_buf - 1)) && /* not last */ (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { ehca_gen_err("bad size, i=%x pbuf->size=%llx", i, pbuf->size); return -EINVAL; } size_count += pbuf->size; pbuf++; } *size = size_count; return 0; } /* end ehca_mr_chk_buf_and_calc_size() */ /*----------------------------------------------------------------------*/ /* check page list of map FMR verb for validness */ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, u64 *page_list, int list_len) { u32 i; u64 *page; if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) { ehca_gen_err("bad list_len, list_len=%x " "e_fmr->fmr_max_pages=%x fmr=%p", list_len, e_fmr->fmr_max_pages, e_fmr); return -EINVAL; } /* each page must be aligned */ page = page_list; for (i = 0; i < list_len; i++) { if (*page % e_fmr->fmr_page_size) { ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p " "fmr_page_size=%x", i, *page, page, e_fmr, e_fmr->fmr_page_size); return -EINVAL; } page++; } return 0; } /* end ehca_fmr_check_page_list() */ /*----------------------------------------------------------------------*/ /* PAGE_SIZE >= pginfo->hwpage_size */ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *chunk; u64 pgaddr; u32 i = 0; u32 j = 0; int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; /* loop over desired chunk entries */ chunk = pginfo->u.usr.next_chunk; prev_chunk = pginfo->u.usr.next_chunk; list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ; *kpage = phys_to_abs(pgaddr + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%llx " "chunk->page_list[i]=%llx " "i=%x next_hwpage=%llx", pgaddr, (u64)sg_dma_address( &chunk->page_list[i]), i, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; (pginfo->next_hwpage)++; kpage++; if (pginfo->next_hwpage % hwpages_per_kpage == 0) { (pginfo->kpage_cnt)++; (pginfo->u.usr.next_nmap)++; pginfo->next_hwpage = 0; i++; } j++; if (j >= number) break; } if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (j >= number)) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; break; } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; } else if (j >= number) break; else prev_chunk = chunk; } pginfo->u.usr.next_chunk = list_prepare_entry(prev_chunk, (&(pginfo->u.usr.region->chunk_list)), list); return ret; } /* * check given pages for contiguous layout * last page addr is returned in prev_pgaddr for further check */ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, int start_idx, int end_idx, u64 *prev_pgaddr) { int t; for (t = start_idx; t <= end_idx; t++) { u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; if (ehca_debug_level >= 3) ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); if (pgaddr - PAGE_SIZE != *prev_pgaddr) { ehca_gen_err("uncontiguous page found pgaddr=%llx " "prev_pgaddr=%llx page_list_i=%x", pgaddr, *prev_pgaddr, t); return -EINVAL; } *prev_pgaddr = pgaddr; } return 0; } /* PAGE_SIZE < pginfo->hwpage_size */ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *chunk; u64 pgaddr, prev_pgaddr; u32 i = 0; u32 j = 0; int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; int nr_kpages = kpages_per_hwpage; /* loop over desired chunk entries */ chunk = pginfo->u.usr.next_chunk; prev_chunk = pginfo->u.usr.next_chunk; list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { if (nr_kpages == kpages_per_hwpage) { pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ); *kpage = phys_to_abs(pgaddr); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%llx i=%x", pgaddr, i); ret = -EFAULT; return ret; } /* * The first page in a hwpage must be aligned; * the first MR page is exempt from this rule. */ if (pgaddr & (pginfo->hwpage_size - 1)) { if (pginfo->hwpage_cnt) { ehca_gen_err( "invalid alignment " "pgaddr=%llx i=%x " "mr_pgsize=%llx", pgaddr, i, pginfo->hwpage_size); ret = -EFAULT; return ret; } /* first MR page */ pginfo->kpage_cnt = (pgaddr & (pginfo->hwpage_size - 1)) >> PAGE_SHIFT; nr_kpages -= pginfo->kpage_cnt; *kpage = phys_to_abs( pgaddr & ~(pginfo->hwpage_size - 1)); } if (ehca_debug_level >= 3) { u64 val = *(u64 *)abs_to_virt( phys_to_abs(pgaddr)); ehca_gen_dbg("kpage=%llx chunk_page=%llx " "value=%016llx", *kpage, pgaddr, val); } prev_pgaddr = pgaddr; i++; pginfo->kpage_cnt++; pginfo->u.usr.next_nmap++; nr_kpages--; if (!nr_kpages) goto next_kpage; continue; } if (i + nr_kpages > chunk->nmap) { ret = ehca_check_kpages_per_ate( chunk->page_list, i, chunk->nmap - 1, &prev_pgaddr); if (ret) return ret; pginfo->kpage_cnt += chunk->nmap - i; pginfo->u.usr.next_nmap += chunk->nmap - i; nr_kpages -= chunk->nmap - i; break; } ret = ehca_check_kpages_per_ate(chunk->page_list, i, i + nr_kpages - 1, &prev_pgaddr); if (ret) return ret; i += nr_kpages; pginfo->kpage_cnt += nr_kpages; pginfo->u.usr.next_nmap += nr_kpages; next_kpage: nr_kpages = kpages_per_hwpage; (pginfo->hwpage_cnt)++; kpage++; j++; if (j >= number) break; } if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (j >= number)) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; break; } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; } else if (j >= number) break; else prev_chunk = chunk; } pginfo->u.usr.next_chunk = list_prepare_entry(prev_chunk, (&(pginfo->u.usr.region->chunk_list)), list); return ret; } static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; struct ib_phys_buf *pbuf; u64 num_hw, offs_hw; u32 i = 0; /* loop over desired phys_buf_array entries */ while (i < number) { pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf; num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) + pbuf->size, pginfo->hwpage_size); offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size; while (pginfo->next_hwpage < offs_hw + num_hw) { /* sanity check */ if ((pginfo->kpage_cnt >= pginfo->num_kpages) || (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { ehca_gen_err("kpage_cnt >= num_kpages, " "kpage_cnt=%llx num_kpages=%llx " "hwpage_cnt=%llx " "num_hwpages=%llx i=%x", pginfo->kpage_cnt, pginfo->num_kpages, pginfo->hwpage_cnt, pginfo->num_hwpages, i); return -EFAULT; } *kpage = phys_to_abs( (pbuf->addr & ~(pginfo->hwpage_size - 1)) + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) && pbuf->addr ) { ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " "next_hwpage=%llx", pbuf->addr, pbuf->size, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; (pginfo->next_hwpage)++; if (PAGE_SIZE >= pginfo->hwpage_size) { if (pginfo->next_hwpage % (PAGE_SIZE / pginfo->hwpage_size) == 0) (pginfo->kpage_cnt)++; } else pginfo->kpage_cnt += pginfo->hwpage_size / PAGE_SIZE; kpage++; i++; if (i >= number) break; } if (pginfo->next_hwpage >= offs_hw + num_hw) { (pginfo->u.phy.next_buf)++; pginfo->next_hwpage = 0; } } return ret; } static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret = 0; u64 *fmrlist; u32 i; /* loop over desired page_list entries */ fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; for (i = 0; i < number; i++) { *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + pginfo->next_hwpage * pginfo->hwpage_size); if ( !(*kpage) ) { ehca_gen_err("*fmrlist=%llx fmrlist=%p " "next_listelem=%llx next_hwpage=%llx", *fmrlist, fmrlist, pginfo->u.fmr.next_listelem, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) { if (pginfo->next_hwpage % (pginfo->u.fmr.fmr_pgsize / pginfo->hwpage_size) == 0) { (pginfo->kpage_cnt)++; (pginfo->u.fmr.next_listelem)++; fmrlist++; pginfo->next_hwpage = 0; } else (pginfo->next_hwpage)++; } else { unsigned int cnt_per_hwpage = pginfo->hwpage_size / pginfo->u.fmr.fmr_pgsize; unsigned int j; u64 prev = *kpage; /* check if adrs are contiguous */ for (j = 1; j < cnt_per_hwpage; j++) { u64 p = phys_to_abs(fmrlist[j] & ~(pginfo->hwpage_size - 1)); if (prev + pginfo->u.fmr.fmr_pgsize != p) { ehca_gen_err("uncontiguous fmr pages " "found prev=%llx p=%llx " "idx=%x", prev, p, i + j); return -EINVAL; } prev = p; } pginfo->kpage_cnt += cnt_per_hwpage; pginfo->u.fmr.next_listelem += cnt_per_hwpage; fmrlist += cnt_per_hwpage; } kpage++; } return ret; } /* setup page buffer from page info */ int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage) { int ret; switch (pginfo->type) { case EHCA_MR_PGI_PHYS: ret = ehca_set_pagebuf_phys(pginfo, number, kpage); break; case EHCA_MR_PGI_USER: ret = PAGE_SIZE >= pginfo->hwpage_size ? ehca_set_pagebuf_user1(pginfo, number, kpage) : ehca_set_pagebuf_user2(pginfo, number, kpage); break; case EHCA_MR_PGI_FMR: ret = ehca_set_pagebuf_fmr(pginfo, number, kpage); break; default: ehca_gen_err("bad pginfo->type=%x", pginfo->type); ret = -EFAULT; break; } return ret; } /* end ehca_set_pagebuf() */ /*----------------------------------------------------------------------*/ /* * check MR if it is a max-MR, i.e. uses whole memory * in case it's a max-MR 1 is returned, else 0 */ int ehca_mr_is_maxmr(u64 size, u64 *iova_start) { /* a MR is treated as max-MR only if it fits following: */ if ((size == ehca_mr_len) && (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) { ehca_gen_dbg("this is a max-MR"); return 1; } else return 0; } /* end ehca_mr_is_maxmr() */ /*----------------------------------------------------------------------*/ /* map access control for MR/MW. This routine is used for MR and MW. */ void ehca_mrmw_map_acl(int ib_acl, u32 *hipz_acl) { *hipz_acl = 0; if (ib_acl & IB_ACCESS_REMOTE_READ) *hipz_acl |= HIPZ_ACCESSCTRL_R_READ; if (ib_acl & IB_ACCESS_REMOTE_WRITE) *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE; if (ib_acl & IB_ACCESS_REMOTE_ATOMIC) *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC; if (ib_acl & IB_ACCESS_LOCAL_WRITE) *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE; if (ib_acl & IB_ACCESS_MW_BIND) *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND; } /* end ehca_mrmw_map_acl() */ /*----------------------------------------------------------------------*/ /* sets page size in hipz access control for MR/MW. */ void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/ { *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24); } /* end ehca_mrmw_set_pgsize_hipz_acl() */ /*----------------------------------------------------------------------*/ /* * reverse map access control for MR/MW. * This routine is used for MR and MW. */ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, int *ib_acl) /*OUT*/ { *ib_acl = 0; if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ) *ib_acl |= IB_ACCESS_REMOTE_READ; if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE) *ib_acl |= IB_ACCESS_REMOTE_WRITE; if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC) *ib_acl |= IB_ACCESS_REMOTE_ATOMIC; if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE) *ib_acl |= IB_ACCESS_LOCAL_WRITE; if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND) *ib_acl |= IB_ACCESS_MW_BIND; } /* end ehca_mrmw_reverse_map_acl() */ /*----------------------------------------------------------------------*/ /* * MR destructor and constructor * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, * except struct ib_mr and spinlock */ void ehca_mr_deletenew(struct ehca_mr *mr) { mr->flags = 0; mr->num_kpages = 0; mr->num_hwpages = 0; mr->acl = 0; mr->start = NULL; mr->fmr_page_size = 0; mr->fmr_max_pages = 0; mr->fmr_max_maps = 0; mr->fmr_map_cnt = 0; memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); memset(&mr->galpas, 0, sizeof(mr->galpas)); } /* end ehca_mr_deletenew() */ int ehca_init_mrmw_cache(void) { mr_cache = kmem_cache_create("ehca_cache_mr", sizeof(struct ehca_mr), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mr_cache) return -ENOMEM; mw_cache = kmem_cache_create("ehca_cache_mw", sizeof(struct ehca_mw), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mw_cache) { kmem_cache_destroy(mr_cache); mr_cache = NULL; return -ENOMEM; } return 0; } void ehca_cleanup_mrmw_cache(void) { if (mr_cache) kmem_cache_destroy(mr_cache); if (mw_cache) kmem_cache_destroy(mw_cache); } static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap, int dir) { if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) { ehca_top_bmap->dir[dir] = kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL); if (!ehca_top_bmap->dir[dir]) return -ENOMEM; /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE); } return 0; } static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir) { if (!ehca_bmap_valid(ehca_bmap->top[top])) { ehca_bmap->top[top] = kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL); if (!ehca_bmap->top[top]) return -ENOMEM; /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE); } return ehca_init_top_bmap(ehca_bmap->top[top], dir); } static inline int ehca_calc_index(unsigned long i, unsigned long s) { return (i >> s) & EHCA_INDEX_MASK; } void ehca_destroy_busmap(void) { int top, dir; if (!ehca_bmap) return; for (top = 0; top < EHCA_MAP_ENTRIES; top++) { if (!ehca_bmap_valid(ehca_bmap->top[top])) continue; for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) continue; kfree(ehca_bmap->top[top]->dir[dir]); } kfree(ehca_bmap->top[top]); } kfree(ehca_bmap); ehca_bmap = NULL; } static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages) { unsigned long i, start_section, end_section; int top, dir, idx; if (!nr_pages) return 0; if (!ehca_bmap) { ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL); if (!ehca_bmap) return -ENOMEM; /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE); } start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE; end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; for (i = start_section; i < end_section; i++) { int ret; top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT); dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT); idx = i & EHCA_INDEX_MASK; ret = ehca_init_bmap(ehca_bmap, top, dir); if (ret) { ehca_destroy_busmap(); return ret; } ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len; ehca_mr_len += EHCA_SECTSIZE; } return 0; } static int ehca_is_hugepage(unsigned long pfn) { int page_order; if (pfn & EHCA_HUGEPAGE_PFN_MASK) return 0; page_order = compound_order(pfn_to_page(pfn)); if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT) return 0; return 1; } static int ehca_create_busmap_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) { int ret; unsigned long pfn, start_pfn, end_pfn, nr_pages; if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE) return ehca_update_busmap(initial_pfn, total_nr_pages); /* Given chunk is >= 16GB -> check for hugepages */ start_pfn = initial_pfn; end_pfn = initial_pfn + total_nr_pages; pfn = start_pfn; while (pfn < end_pfn) { if (ehca_is_hugepage(pfn)) { /* Add mem found in front of the hugepage */ nr_pages = pfn - start_pfn; ret = ehca_update_busmap(start_pfn, nr_pages); if (ret) return ret; /* Skip the hugepage */ pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE); start_pfn = pfn; } else pfn += (EHCA_SECTSIZE / PAGE_SIZE); } /* Add mem found behind the hugepage(s) */ nr_pages = pfn - start_pfn; return ehca_update_busmap(start_pfn, nr_pages); } int ehca_create_busmap(void) { int ret; ehca_mr_len = 0; ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL, ehca_create_busmap_callback); return ret; } static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, struct ehca_mr *e_mr, struct ehca_mr_pginfo *pginfo) { int top; u64 hret, *kpage; kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!kpage) { ehca_err(&shca->ib_device, "kpage alloc failed"); return -ENOMEM; } for (top = 0; top < EHCA_MAP_ENTRIES; top++) { if (!ehca_bmap_valid(ehca_bmap->top[top])) continue; hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo); if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) break; } ehca_free_fw_ctrlblock(kpage); if (hret == H_SUCCESS) return 0; /* Everything is fine */ else { ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, " "h_ret=%lli e_mr=%p top=%x lkey=%x " "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top, e_mr->ib.ib_mr.lkey, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle); return ehca2ib_return_code(hret); } } static u64 ehca_map_vaddr(void *caddr) { int top, dir, idx; unsigned long abs_addr, offset; u64 entry; if (!ehca_bmap) return EHCA_INVAL_ADDR; abs_addr = virt_to_abs(caddr); top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT); if (!ehca_bmap_valid(ehca_bmap->top[top])) return EHCA_INVAL_ADDR; dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT); if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) return EHCA_INVAL_ADDR; idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT); entry = ehca_bmap->top[top]->dir[dir]->ent[idx]; if (ehca_bmap_valid(entry)) { offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1); return entry | offset; } else return EHCA_INVAL_ADDR; } static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { return dma_addr == EHCA_INVAL_ADDR; } static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { if (cpu_addr) return ehca_map_vaddr(cpu_addr); else return EHCA_INVAL_ADDR; } static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { /* This is only a stub; nothing to be done here */ } static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { u64 addr; if (offset + size > PAGE_SIZE) return EHCA_INVAL_ADDR; addr = ehca_map_vaddr(page_address(page)); if (!ehca_dma_mapping_error(dev, addr)) addr += offset; return addr; } static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { /* This is only a stub; nothing to be done here */ } static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) { u64 addr; addr = ehca_map_vaddr(sg_virt(sg)); if (ehca_dma_mapping_error(dev, addr)) return 0; sg->dma_address = addr; sg->dma_length = sg->length; } return nents; } static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { /* This is only a stub; nothing to be done here */ } static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg) { return sg->dma_address; } static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg) { return sg->length; } static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); } static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_device(dev->dma_device, addr, size, dir); } static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size, u64 *dma_handle, gfp_t flag) { struct page *p; void *addr = NULL; u64 dma_addr; p = alloc_pages(flag, get_order(size)); if (p) { addr = page_address(p); dma_addr = ehca_map_vaddr(addr); if (ehca_dma_mapping_error(dev, dma_addr)) { free_pages((unsigned long)addr, get_order(size)); return NULL; } if (dma_handle) *dma_handle = dma_addr; return addr; } return NULL; } static void ehca_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, u64 dma_handle) { if (cpu_addr && size) free_pages((unsigned long)cpu_addr, get_order(size)); } struct ib_dma_mapping_ops ehca_dma_mapping_ops = { .mapping_error = ehca_dma_mapping_error, .map_single = ehca_dma_map_single, .unmap_single = ehca_dma_unmap_single, .map_page = ehca_dma_map_page, .unmap_page = ehca_dma_unmap_page, .map_sg = ehca_dma_map_sg, .unmap_sg = ehca_dma_unmap_sg, .dma_address = ehca_dma_address, .dma_len = ehca_dma_len, .sync_single_for_cpu = ehca_dma_sync_single_for_cpu, .sync_single_for_device = ehca_dma_sync_single_for_device, .alloc_coherent = ehca_dma_alloc_coherent, .free_coherent = ehca_dma_free_coherent, };
gpl-2.0
HofiOne/xbmc
xbmc/platform/android/peripherals/PeripheralBusAndroid.cpp
19
12322
/* * Copyright (C) 2016 Team Kodi * http://kodi.tv * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this Program; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include <android/input.h> #include "PeripheralBusAndroid.h" #include "input/joysticks/JoystickTypes.h" #include "peripherals/addons/PeripheralAddonTranslator.h" #include "peripherals/devices/PeripheralJoystick.h" #include "platform/android/activity/XBMCApp.h" #include "androidjni/View.h" #include "threads/SingleLock.h" #include "utils/log.h" #include "utils/StringUtils.h" using namespace KODI; using namespace PERIPHERALS; static const std::string DeviceLocationPrefix = "android/inputdevice/"; CPeripheralBusAndroid::CPeripheralBusAndroid(CPeripherals& manager) : CPeripheralBus("PeripBusAndroid", manager, PERIPHERAL_BUS_ANDROID) { // we don't need polling as we get notified through the IInputDeviceCallbacks interface m_bNeedsPolling = false; // register for input device callbacks CXBMCApp::RegisterInputDeviceCallbacks(this); // register for input device events CXBMCApp::RegisterInputDeviceEventHandler(this); // get all currently connected input devices m_scanResults = GetInputDevices(); } CPeripheralBusAndroid::~CPeripheralBusAndroid() { // unregister from input device events CXBMCApp::UnregisterInputDeviceEventHandler(); // unregister from input device callbacks CXBMCApp::UnregisterInputDeviceCallbacks(); } bool CPeripheralBusAndroid::InitializeProperties(CPeripheral& peripheral) { if (!CPeripheralBus::InitializeProperties(peripheral)) return false; if (peripheral.Type() != PERIPHERAL_JOYSTICK) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: invalid peripheral type"); return false; } int deviceId; if (!GetDeviceId(peripheral.Location(), deviceId)) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to initialize properties for peripheral \"%s\"", peripheral.Location().c_str()); return false; } const CJNIViewInputDevice device = CXBMCApp::GetInputDevice(deviceId); if (!device) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to get input device with ID %d", deviceId); return false; } CPeripheralJoystick& joystick = static_cast<CPeripheralJoystick&>(peripheral); joystick.SetRequestedPort(device.getControllerNumber()); joystick.SetProvider("android"); // prepare the joystick state CAndroidJoystickState state; if (!state.Initialize(device)) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to initialize the state for input device \"%s\" with ID %d", joystick.DeviceName().c_str(), deviceId); return false; } // fill in the number of buttons, hats and axes joystick.SetButtonCount(state.GetButtonCount()); joystick.SetHatCount(state.GetHatCount()); joystick.SetAxisCount(state.GetAxisCount()); // remember the joystick state m_joystickStates.insert(std::make_pair(deviceId, std::move(state))); CLog::Log(LOGDEBUG, "CPeripheralBusAndroid: input device \"%s\" with ID %d has %u buttons, %u hats and %u axes", joystick.DeviceName().c_str(), deviceId, joystick.ButtonCount(), joystick.HatCount(), joystick.AxisCount()); return true; } void CPeripheralBusAndroid::Initialise(void) { CPeripheralBus::Initialise(); TriggerDeviceScan(); } void CPeripheralBusAndroid::ProcessEvents() { std::vector<kodi::addon::PeripheralEvent> events; { CSingleLock lock(m_critSectionStates); for (const auto& joystickState : m_joystickStates) joystickState.second.GetEvents(events); } for (const auto& event : events) { PeripheralPtr device = GetPeripheral(GetDeviceLocation(event.PeripheralIndex())); if (!device || device->Type() != PERIPHERAL_JOYSTICK) continue; CPeripheralJoystick* joystick = static_cast<CPeripheralJoystick*>(device.get()); switch (event.Type()) { case PERIPHERAL_EVENT_TYPE_DRIVER_BUTTON: { const bool bPressed = (event.ButtonState() == JOYSTICK_STATE_BUTTON_PRESSED); joystick->OnButtonMotion(event.DriverIndex(), bPressed); break; } case PERIPHERAL_EVENT_TYPE_DRIVER_HAT: { const JOYSTICK::HAT_STATE state = CPeripheralAddonTranslator::TranslateHatState(event.HatState()); joystick->OnHatMotion(event.DriverIndex(), state); break; } case PERIPHERAL_EVENT_TYPE_DRIVER_AXIS: { joystick->OnAxisMotion(event.DriverIndex(), event.AxisState()); break; } default: break; } } { CSingleLock lock(m_critSectionStates); for (const auto& joystickState : m_joystickStates) { PeripheralPtr device = GetPeripheral(GetDeviceLocation(joystickState.second.GetDeviceId())); if (!device || device->Type() != PERIPHERAL_JOYSTICK) continue; static_cast<CPeripheralJoystick*>(device.get())->ProcessAxisMotions(); } } } void CPeripheralBusAndroid::OnInputDeviceAdded(int deviceId) { const std::string deviceLocation = GetDeviceLocation(deviceId); { CSingleLock lock(m_critSectionResults); // add the device to the cached result list const auto& it = std::find_if(m_scanResults.m_results.cbegin(), m_scanResults.m_results.cend(), [&deviceLocation](const PeripheralScanResult& scanResult) { return scanResult.m_strLocation == deviceLocation; }); if (it != m_scanResults.m_results.cend()) { CLog::Log(LOGINFO, "CPeripheralBusAndroid: ignoring added input device with ID %d because we already know it", deviceId); return; } const CJNIViewInputDevice device = CXBMCApp::GetInputDevice(deviceId); if (!device) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to add input device with ID %d because it couldn't be found", deviceId); return; } PeripheralScanResult result; if (!ConvertToPeripheralScanResult(device, result)) return; m_scanResults.m_results.push_back(result); } CLog::Log(LOGDEBUG, "CPeripheralBusAndroid: input device with ID %d added", deviceId); OnDeviceAdded(deviceLocation); } void CPeripheralBusAndroid::OnInputDeviceChanged(int deviceId) { bool changed = false; const std::string deviceLocation = GetDeviceLocation(deviceId); { CSingleLock lock(m_critSectionResults); // change the device in the cached result list for (auto result = m_scanResults.m_results.begin(); result != m_scanResults.m_results.end(); ++result) { if (result->m_strLocation == deviceLocation) { const CJNIViewInputDevice device = CXBMCApp::GetInputDevice(deviceId); if (!device) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to update input device \"%s\" with ID %d because it couldn't be found", result->m_strDeviceName.c_str(), deviceId); return; } if (!ConvertToPeripheralScanResult(device, *result)) return; CLog::Log(LOGINFO, "CPeripheralBusAndroid: input device \"%s\" with ID %d updated", result->m_strDeviceName.c_str(), deviceId); changed = true; break; } } } if (changed) OnDeviceChanged(deviceLocation); else CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to update input device with ID %d because it couldn't be found", deviceId); } void CPeripheralBusAndroid::OnInputDeviceRemoved(int deviceId) { bool removed = false; const std::string deviceLocation = GetDeviceLocation(deviceId); { CSingleLock lock(m_critSectionResults); // remove the device from the cached result list for (auto result = m_scanResults.m_results.begin(); result != m_scanResults.m_results.end(); ++result) { if (result->m_strLocation == deviceLocation) { CLog::Log(LOGINFO, "CPeripheralBusAndroid: input device \"%s\" with ID %d removed", result->m_strDeviceName.c_str(), deviceId); m_scanResults.m_results.erase(result); removed = true; break; } } } if (removed) { m_joystickStates.erase(deviceId); OnDeviceRemoved(deviceLocation); } else CLog::Log(LOGWARNING, "CPeripheralBusAndroid: failed to remove input device with ID %d because it couldn't be found", deviceId); } bool CPeripheralBusAndroid::OnInputDeviceEvent(const AInputEvent* event) { if (event == nullptr) return false; CSingleLock lock(m_critSectionStates); // get the id of the input device which generated the event int32_t deviceId = AInputEvent_getDeviceId(event); // find the matching joystick state auto joystickState = m_joystickStates.find(deviceId); if (joystickState == m_joystickStates.end()) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: ignoring input event for unknown input device with ID %d", deviceId); return false; } return joystickState->second.ProcessEvent(event); } bool CPeripheralBusAndroid::PerformDeviceScan(PeripheralScanResults &results) { CSingleLock lock(m_critSectionResults); results = m_scanResults; return true; } PeripheralScanResults CPeripheralBusAndroid::GetInputDevices() { CLog::Log(LOGINFO, "CPeripheralBusAndroid: scanning for input devices..."); PeripheralScanResults results; std::vector<int> deviceIds = CXBMCApp::GetInputDeviceIds(); for (const auto& deviceId : deviceIds) { const CJNIViewInputDevice device = CXBMCApp::GetInputDevice(deviceId); if (!device) { CLog::Log(LOGWARNING, "CPeripheralBusAndroid: no input device with ID %d found", deviceId); continue; } PeripheralScanResult result; if (!ConvertToPeripheralScanResult(device, result)) continue; CLog::Log(LOGINFO, "CPeripheralBusAndroid: input device \"%s\" with ID %d detected", result.m_strDeviceName.c_str(), deviceId); results.m_results.push_back(result); } return results; } std::string CPeripheralBusAndroid::GetDeviceLocation(int deviceId) { return StringUtils::Format("%s%d", DeviceLocationPrefix.c_str(), deviceId); } bool CPeripheralBusAndroid::GetDeviceId(const std::string& deviceLocation, int& deviceId) { if (deviceLocation.empty() || !StringUtils::StartsWith(deviceLocation, DeviceLocationPrefix) || deviceLocation.size() <= DeviceLocationPrefix.size()) return false; std::string strDeviceId = deviceLocation.substr(DeviceLocationPrefix.size()); if (!StringUtils::IsNaturalNumber(strDeviceId)) return false; deviceId = static_cast<int>(strtol(strDeviceId.c_str(), nullptr, 10)); return true; } bool CPeripheralBusAndroid::ConvertToPeripheralScanResult(const CJNIViewInputDevice& inputDevice, PeripheralScanResult& peripheralScanResult) { int deviceId = inputDevice.getId(); std::string deviceName = inputDevice.getName(); if (inputDevice.isVirtual()) { CLog::Log(LOGDEBUG, "CPeripheralBusAndroid: ignoring virtual input device \"%s\" with ID %d", deviceName.c_str(), deviceId); return false; } if (!inputDevice.supportsSource(CJNIViewInputDevice::SOURCE_JOYSTICK) && !inputDevice.supportsSource(CJNIViewInputDevice::SOURCE_GAMEPAD)) { CLog::Log(LOGDEBUG, "CPeripheralBusAndroid: ignoring unknown input device \"%s\" with ID %d", deviceName.c_str(), deviceId); return false; } peripheralScanResult.m_type = PERIPHERAL_JOYSTICK; peripheralScanResult.m_strLocation = GetDeviceLocation(deviceId); peripheralScanResult.m_iVendorId = inputDevice.getVendorId(); peripheralScanResult.m_iProductId = inputDevice.getProductId(); peripheralScanResult.m_mappedType = PERIPHERAL_JOYSTICK; peripheralScanResult.m_strDeviceName = deviceName; peripheralScanResult.m_busType = PERIPHERAL_BUS_ANDROID; peripheralScanResult.m_mappedBusType = PERIPHERAL_BUS_ANDROID; peripheralScanResult.m_iSequence = 0; return true; }
gpl-2.0
GuneetAtwal/kernel_g900p
drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
19
34012
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/mutex.h> #include <linux/io.h> #include <media/v4l2-subdev.h> #include <linux/ratelimit.h> #include "msm.h" #include "msm_isp_util.h" #include "msm_isp_axi_util.h" #include "msm_isp_stats_util.h" #include "msm_camera_io_util.h" #define MAX_ISP_V4l2_EVENTS 100 static DEFINE_MUTEX(bandwidth_mgr_mutex); static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr; #define MSM_ISP_MIN_AB 300000000ULL * 3 / 2 #define MSM_ISP_MIN_IB 450000000ULL * 3 / 2 #define MSM_ISP_MIN_AB_RECORD 300000000ULL #define MSM_ISP_MIN_IB_RECORD 450000000ULL extern int32_t isp_recording_hint; static struct msm_bus_vectors msm_isp_init_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors msm_isp_ping_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = MSM_ISP_MIN_AB, .ib = (uint64_t)MSM_ISP_MIN_IB, }, }; static struct msm_bus_vectors msm_isp_pong_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = MSM_ISP_MIN_AB, .ib = (uint64_t)MSM_ISP_MIN_IB, }, }; static struct msm_bus_paths msm_isp_bus_client_config[] = { { ARRAY_SIZE(msm_isp_init_vectors), msm_isp_init_vectors, }, { ARRAY_SIZE(msm_isp_ping_vectors), msm_isp_ping_vectors, }, { ARRAY_SIZE(msm_isp_pong_vectors), msm_isp_pong_vectors, }, }; static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = { msm_isp_bus_client_config, ARRAY_SIZE(msm_isp_bus_client_config), .name = "msm_camera_isp", }; int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client) { int rc = 0; mutex_lock(&bandwidth_mgr_mutex); isp_bandwidth_mgr.client_info[client].active = 1; if (isp_bandwidth_mgr.use_count++) { mutex_unlock(&bandwidth_mgr_mutex); return rc; } isp_bandwidth_mgr.bus_client = msm_bus_scale_register_client(&msm_isp_bus_client_pdata); if (!isp_bandwidth_mgr.bus_client) { pr_err("%s: client register failed\n", __func__); mutex_unlock(&bandwidth_mgr_mutex); return -EINVAL; } isp_bandwidth_mgr.bus_vector_active_idx = 1; msm_bus_scale_client_update_request( isp_bandwidth_mgr.bus_client, isp_bandwidth_mgr.bus_vector_active_idx); mutex_unlock(&bandwidth_mgr_mutex); return 0; } int msm_isp_update_bandwidth(enum msm_isp_hw_client client, uint64_t ab, uint64_t ib) { int i; struct msm_bus_paths *path; mutex_lock(&bandwidth_mgr_mutex); if (!isp_bandwidth_mgr.use_count || !isp_bandwidth_mgr.bus_client) { pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n", __func__, isp_bandwidth_mgr.use_count, isp_bandwidth_mgr.bus_client); return -EINVAL; } isp_bandwidth_mgr.client_info[client].ab = ab; isp_bandwidth_mgr.client_info[client].ib = ib; ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx); path = &(msm_isp_bus_client_pdata.usecase[ isp_bandwidth_mgr.bus_vector_active_idx]); if (isp_recording_hint == 1) { pr_err("%s: [syscamera] RECORD\n", __func__); path->vectors[0].ab = MSM_ISP_MIN_AB_RECORD; path->vectors[0].ib = MSM_ISP_MIN_IB_RECORD; } else { pr_err("%s: [syscamera] CAMERA\n", __func__); path->vectors[0].ab = MSM_ISP_MIN_AB; path->vectors[0].ib = MSM_ISP_MIN_IB; } for (i = 0; i < MAX_ISP_CLIENT; i++) { if (isp_bandwidth_mgr.client_info[i].active) { path->vectors[0].ab += isp_bandwidth_mgr.client_info[i].ab; path->vectors[0].ib += isp_bandwidth_mgr.client_info[i].ib; } } msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client, isp_bandwidth_mgr.bus_vector_active_idx); mutex_unlock(&bandwidth_mgr_mutex); return 0; } void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client) { mutex_lock(&bandwidth_mgr_mutex); memset(&isp_bandwidth_mgr.client_info[client], 0, sizeof(struct msm_isp_bandwidth_info)); if (--isp_bandwidth_mgr.use_count) { mutex_unlock(&bandwidth_mgr_mutex); return; } if (!isp_bandwidth_mgr.bus_client) { pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__); mutex_unlock(&bandwidth_mgr_mutex); return; } msm_bus_scale_client_update_request( isp_bandwidth_mgr.bus_client, 0); msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client); isp_bandwidth_mgr.bus_client = 0; mutex_unlock(&bandwidth_mgr_mutex); } uint32_t msm_isp_get_framedrop_period( enum msm_vfe_frame_skip_pattern frame_skip_pattern) { switch (frame_skip_pattern) { case NO_SKIP: case EVERY_2FRAME: case EVERY_3FRAME: case EVERY_4FRAME: case EVERY_5FRAME: case EVERY_6FRAME: case EVERY_7FRAME: case EVERY_8FRAME: case EVERY_9FRAME: case EVERY_10FRAME: case EVERY_11FRAME: case EVERY_12FRAME: case EVERY_13FRAME: case EVERY_14FRAME: case EVERY_15FRAME: return frame_skip_pattern + 1; case EVERY_16FRAME: return 16; break; case EVERY_32FRAME: return 32; break; case SKIP_ALL: return 1; default: return 1; } return 1; } static inline void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp) { struct timespec ts; ktime_get_ts(&ts); time_stamp->buf_time.tv_sec = ts.tv_sec; time_stamp->buf_time.tv_usec = ts.tv_nsec / 1000; do_gettimeofday(&(time_stamp->event_time)); } int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd); int rc = 0; rc = v4l2_event_subscribe(fh, sub, MAX_ISP_V4l2_EVENTS); if (rc == 0) { if (sub->type == V4L2_EVENT_ALL) { int i; vfe_dev->axi_data.event_mask = 0; for (i = 0; i < ISP_EVENT_MAX; i++) vfe_dev->axi_data.event_mask |= (1 << i); } else { int event_idx = sub->type - ISP_EVENT_BASE; vfe_dev->axi_data.event_mask |= (1 << event_idx); } } return rc; } int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd); int rc = 0; rc = v4l2_event_unsubscribe(fh, sub); if (sub->type == V4L2_EVENT_ALL) { vfe_dev->axi_data.event_mask = 0; } else { int event_idx = sub->type - ISP_EVENT_BASE; vfe_dev->axi_data.event_mask &= ~(1 << event_idx); } return rc; } static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, long *rate) { int rc = 0; int clk_idx = vfe_dev->hw_info->vfe_clk_idx; long round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate); if (round_rate < 0) { pr_err("%s: Invalid vfe clock rate\n", __func__); return round_rate; } rc = clk_set_rate(vfe_dev->vfe_clk[clk_idx], round_rate); if (rc < 0) { pr_err("%s: Vfe set rate error\n", __func__); return rc; } *rate = round_rate; return 0; } int msm_isp_cfg_pix(struct vfe_device *vfe_dev, struct msm_vfe_input_cfg *input_cfg) { int rc = 0; if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) { pr_err("%s: pixel path is active\n", __func__); return -EINVAL; } vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock = input_cfg->input_pix_clk; vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux = input_cfg->d.pix_cfg.input_mux; vfe_dev->axi_data.src_info[VFE_PIX_0].width = input_cfg->d.pix_cfg.camif_cfg.pixels_per_line; rc = msm_isp_set_clk_rate(vfe_dev, &vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock); if (rc < 0) { pr_err("%s: clock set rate failed\n", __func__); return rc; } vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif( vfe_dev, &input_cfg->d.pix_cfg); return rc; } int msm_isp_cfg_rdi(struct vfe_device *vfe_dev, struct msm_vfe_input_cfg *input_cfg) { int rc = 0; if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) { pr_err("%s: RAW%d path is active\n", __func__, input_cfg->input_src - VFE_RAW_0); return -EINVAL; } vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock = input_cfg->input_pix_clk; vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg( vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src); rc = msm_isp_set_clk_rate(vfe_dev, &vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock); if (rc < 0) { pr_err("%s: clock set rate failed\n", __func__); return rc; } return rc; } int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg) { int rc = 0; struct msm_vfe_input_cfg *input_cfg = arg; switch (input_cfg->input_src) { case VFE_PIX_0: rc = msm_isp_cfg_pix(vfe_dev, input_cfg); break; case VFE_RAW_0: case VFE_RAW_1: case VFE_RAW_2: rc = msm_isp_cfg_rdi(vfe_dev, input_cfg); break; default: pr_err("%s: Invalid input source\n", __func__); rc = -EINVAL; } return rc; } long msm_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { long rc = 0; struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd); /* Use real time mutex for hard real-time ioctls such as * buffer operations and register updates. * Use core mutex for other ioctls that could take * longer time to complete such as start/stop ISP streams * which blocks until the hardware start/stop streaming */ ISP_DBG("%s cmd: %d\n", __func__, _IOC_TYPE(cmd)); switch (cmd) { case VIDIOC_MSM_VFE_REG_CFG: { mutex_lock(&vfe_dev->realtime_mutex); rc = msm_isp_proc_cmd(vfe_dev, arg); mutex_unlock(&vfe_dev->realtime_mutex); break; } case VIDIOC_MSM_ISP_REQUEST_BUF: case VIDIOC_MSM_ISP_ENQUEUE_BUF: case VIDIOC_MSM_ISP_RELEASE_BUF: { mutex_lock(&vfe_dev->realtime_mutex); rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg); mutex_unlock(&vfe_dev->realtime_mutex); break; } case VIDIOC_MSM_ISP_REQUEST_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_request_axi_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_RELEASE_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_release_axi_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_CFG_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_cfg_axi_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_AXI_HALT: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_axi_halt(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_AXI_RESET: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_axi_reset(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_AXI_RESTART: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_axi_restart(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_INPUT_CFG: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_cfg_input(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_SET_SRC_STATE: mutex_lock(&vfe_dev->core_mutex); msm_isp_set_src_state(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_request_stats_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_release_stats_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_CFG_STATS_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_cfg_stats_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_UPDATE_STREAM: mutex_lock(&vfe_dev->core_mutex); rc = msm_isp_update_axi_stream(vfe_dev, arg); mutex_unlock(&vfe_dev->core_mutex); break; case MSM_SD_SHUTDOWN: while (vfe_dev->vfe_open_cnt != 0) msm_isp_close_node(sd, NULL); rc = 0; break; default: pr_err("%s: Invalid ISP command\n", __func__); rc = -EINVAL; } return rc; } static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev, struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd, uint32_t *cfg_data, uint32_t cmd_len) { switch (reg_cfg_cmd->cmd_type) { case VFE_WRITE: { if (resource_size(vfe_dev->vfe_mem) < (reg_cfg_cmd->u.rw_info.reg_offset + reg_cfg_cmd->u.rw_info.len)) { pr_err("%s: Invalid length\n", __func__); return -EINVAL; } msm_camera_io_memcpy(vfe_dev->vfe_base + reg_cfg_cmd->u.rw_info.reg_offset, cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset / 4, reg_cfg_cmd->u.rw_info.len); break; } case VFE_WRITE_MB: { uint32_t *data_ptr = cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset / 4; msm_camera_io_w_mb(*data_ptr, vfe_dev->vfe_base + reg_cfg_cmd->u.rw_info.reg_offset); break; } case VFE_CFG_MASK: { uint32_t temp; temp = msm_camera_io_r(vfe_dev->vfe_base + reg_cfg_cmd->u.mask_info.reg_offset); temp &= ~reg_cfg_cmd->u.mask_info.mask; temp |= reg_cfg_cmd->u.mask_info.val; msm_camera_io_w(temp, vfe_dev->vfe_base + reg_cfg_cmd->u.mask_info.reg_offset); break; } case VFE_WRITE_DMI_16BIT: case VFE_WRITE_DMI_32BIT: case VFE_WRITE_DMI_64BIT: { int i; uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL; uint32_t hi_val, lo_val, lo_val1; if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) { if (reg_cfg_cmd->u.dmi_info.hi_tbl_offset + reg_cfg_cmd->u.dmi_info.len > cmd_len) { pr_err("Invalid Hi Table out of bounds\n"); return -EINVAL; } hi_tbl_ptr = cfg_data + reg_cfg_cmd->u.dmi_info.hi_tbl_offset / 4; } if (reg_cfg_cmd->u.dmi_info.lo_tbl_offset + reg_cfg_cmd->u.dmi_info.len > cmd_len) { pr_err("Invalid Lo Table out of bounds\n"); return -EINVAL; } lo_tbl_ptr = cfg_data + reg_cfg_cmd->u.dmi_info.lo_tbl_offset / 4; if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) reg_cfg_cmd->u.dmi_info.len = reg_cfg_cmd->u.dmi_info.len / 2; for (i = 0; i < reg_cfg_cmd->u.dmi_info.len / 4; i++) { lo_val = *lo_tbl_ptr++; if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) { lo_val1 = lo_val & 0x0000FFFF; lo_val = (lo_val & 0xFFFF0000) >> 16; msm_camera_io_w(lo_val1, vfe_dev->vfe_base + vfe_dev->hw_info->dmi_reg_offset + 0x4); } else if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) { lo_tbl_ptr++; hi_val = *hi_tbl_ptr; hi_tbl_ptr = hi_tbl_ptr + 2; msm_camera_io_w(hi_val, vfe_dev->vfe_base + vfe_dev->hw_info->dmi_reg_offset); } msm_camera_io_w(lo_val, vfe_dev->vfe_base + vfe_dev->hw_info->dmi_reg_offset + 0x4); } break; } case VFE_READ_DMI_16BIT: case VFE_READ_DMI_32BIT: case VFE_READ_DMI_64BIT: { int i; uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL; uint32_t hi_val, lo_val, lo_val1; if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) { if (reg_cfg_cmd->u.dmi_info.hi_tbl_offset + reg_cfg_cmd->u.dmi_info.len > cmd_len) { pr_err("Invalid Hi Table out of bounds\n"); return -EINVAL; } hi_tbl_ptr = cfg_data + reg_cfg_cmd->u.dmi_info.hi_tbl_offset / 4; } if (reg_cfg_cmd->u.dmi_info.lo_tbl_offset + reg_cfg_cmd->u.dmi_info.len > cmd_len) { pr_err("Invalid Lo Table out of bounds\n"); return -EINVAL; } lo_tbl_ptr = cfg_data + reg_cfg_cmd->u.dmi_info.lo_tbl_offset / 4; for (i = 0; i < reg_cfg_cmd->u.dmi_info.len / 4; i++) { if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) { hi_val = msm_camera_io_r(vfe_dev->vfe_base + vfe_dev->hw_info->dmi_reg_offset); *hi_tbl_ptr++ = hi_val; } lo_val = msm_camera_io_r(vfe_dev->vfe_base + vfe_dev->hw_info->dmi_reg_offset + 0x4); if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) { lo_val1 = msm_camera_io_r(vfe_dev->vfe_base + vfe_dev->hw_info->dmi_reg_offset + 0x4); lo_val |= lo_val1 << 16; } *lo_tbl_ptr++ = lo_val; } break; } case VFE_READ: { int i; uint32_t *data_ptr = cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset / 4; for (i = 0; i < reg_cfg_cmd->u.rw_info.len / 4; i++) { *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base + reg_cfg_cmd->u.rw_info.reg_offset); reg_cfg_cmd->u.rw_info.reg_offset += 4; } break; } } return 0; } int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg) { int rc = 0, i; struct msm_vfe_cfg_cmd2 *proc_cmd = arg; struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd; uint32_t *cfg_data; reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg, GFP_KERNEL); if (!reg_cfg_cmd) { pr_err("%s: reg_cfg alloc failed\n", __func__); rc = -ENOMEM; goto reg_cfg_failed; } cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL); if (!cfg_data) { pr_err("%s: cfg_data alloc failed\n", __func__); rc = -ENOMEM; goto cfg_data_failed; } if (copy_from_user(reg_cfg_cmd, (void __user*)(proc_cmd->cfg_cmd), sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) { rc = -EFAULT; goto copy_cmd_failed; } if (copy_from_user(cfg_data, (void __user*)(proc_cmd->cfg_data), proc_cmd->cmd_len)) { rc = -EFAULT; goto copy_cmd_failed; } for (i = 0; i < proc_cmd->num_cfg; i++) msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i], cfg_data, proc_cmd->cmd_len); if (copy_to_user(proc_cmd->cfg_data, cfg_data, proc_cmd->cmd_len)) { rc = -EFAULT; goto copy_cmd_failed; } copy_cmd_failed: kfree(cfg_data); cfg_data_failed: kfree(reg_cfg_cmd); reg_cfg_failed: return rc; } int msm_isp_send_event(struct vfe_device *vfe_dev, uint32_t event_type, struct msm_isp_event_data *event_data) { struct v4l2_event isp_event; memset(&isp_event, 0, sizeof(struct v4l2_event)); isp_event.id = 0; isp_event.type = event_type; memcpy(&isp_event.u.data[0], event_data, sizeof(struct msm_isp_event_data)); v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event); return 0; } #define CAL_WORD(width, M, N) ((width * M + N - 1) / N) int msm_isp_cal_word_per_line(uint32_t output_format, uint32_t pixel_per_line) { int val = -1; switch (output_format) { case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: case V4L2_PIX_FMT_QBGGR8: case V4L2_PIX_FMT_QGBRG8: case V4L2_PIX_FMT_QGRBG8: case V4L2_PIX_FMT_QRGGB8: case V4L2_PIX_FMT_JPEG: case V4L2_PIX_FMT_META: val = CAL_WORD(pixel_per_line, 1, 8); break; case V4L2_PIX_FMT_SBGGR10: case V4L2_PIX_FMT_SGBRG10: case V4L2_PIX_FMT_SGRBG10: case V4L2_PIX_FMT_SRGGB10: val = CAL_WORD(pixel_per_line, 5, 32); break; case V4L2_PIX_FMT_SBGGR12: case V4L2_PIX_FMT_SGBRG12: case V4L2_PIX_FMT_SGRBG12: case V4L2_PIX_FMT_SRGGB12: val = CAL_WORD(pixel_per_line, 3, 16); break; case V4L2_PIX_FMT_QBGGR10: case V4L2_PIX_FMT_QGBRG10: case V4L2_PIX_FMT_QGRBG10: case V4L2_PIX_FMT_QRGGB10: val = CAL_WORD(pixel_per_line, 1, 6); break; case V4L2_PIX_FMT_QBGGR12: case V4L2_PIX_FMT_QGBRG12: case V4L2_PIX_FMT_QGRBG12: case V4L2_PIX_FMT_QRGGB12: val = CAL_WORD(pixel_per_line, 1, 5); break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV14: case V4L2_PIX_FMT_NV41: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_NV46: case V4L2_PIX_FMT_NV64: val = CAL_WORD(pixel_per_line, 1, 8); break; /*TD: Add more image format*/ default: pr_err("%s: Invalid output format\n", __func__); break; } return val; } int msm_isp_get_bit_per_pixel(uint32_t output_format) { switch (output_format) { case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: case V4L2_PIX_FMT_QBGGR8: case V4L2_PIX_FMT_QGBRG8: case V4L2_PIX_FMT_QGRBG8: case V4L2_PIX_FMT_QRGGB8: case V4L2_PIX_FMT_JPEG: case V4L2_PIX_FMT_META: return 8; case V4L2_PIX_FMT_SBGGR10: case V4L2_PIX_FMT_SGBRG10: case V4L2_PIX_FMT_SGRBG10: case V4L2_PIX_FMT_SRGGB10: case V4L2_PIX_FMT_QBGGR10: case V4L2_PIX_FMT_QGBRG10: case V4L2_PIX_FMT_QGRBG10: case V4L2_PIX_FMT_QRGGB10: return 10; case V4L2_PIX_FMT_SBGGR12: case V4L2_PIX_FMT_SGBRG12: case V4L2_PIX_FMT_SGRBG12: case V4L2_PIX_FMT_SRGGB12: case V4L2_PIX_FMT_QBGGR12: case V4L2_PIX_FMT_QGBRG12: case V4L2_PIX_FMT_QGRBG12: case V4L2_PIX_FMT_QRGGB12: return 12; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV14: case V4L2_PIX_FMT_NV41: case V4L2_PIX_FMT_NV64: case V4L2_PIX_FMT_NV46: return 8; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: return 16; /*TD: Add more image format*/ default: pr_err("%s: Invalid output format\n", __func__); return 10; } } void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev) { struct msm_vfe_error_info *error_info = &vfe_dev->error_info; error_info->info_dump_frame_count++; if (error_info->info_dump_frame_count == 0) error_info->info_dump_frame_count++; } void msm_isp_process_error_info(struct vfe_device *vfe_dev) { int i; struct msm_vfe_error_info *error_info = &vfe_dev->error_info; static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); static DEFINE_RATELIMIT_STATE(rs_stats, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); // if (error_info->error_count == 1 || // !(error_info->info_dump_frame_count % 100)) { if (1) { vfe_dev->hw_info->vfe_ops.core_ops. process_error_status(vfe_dev); error_info->error_mask0 = 0; error_info->error_mask1 = 0; error_info->camif_status = 0; error_info->violation_status = 0; for (i = 0; i < MAX_NUM_STREAM; i++) { if (error_info->stream_framedrop_count[i] != 0 && __ratelimit(&rs)) { pr_err("%s: No buffers! VFE%d, Frame Stream[%d]: dropped %d frames\n", __func__, vfe_dev->pdev->id, i, error_info->stream_framedrop_count[i]); error_info->stream_framedrop_count[i] = 0; } } for (i = 0; i < MSM_ISP_STATS_MAX; i++) { if (error_info->stats_framedrop_count[i] != 0 && __ratelimit(&rs_stats)) { pr_err("%s: No buffers! VFE%d, Stats stream[%d]: dropped %d frames\n", __func__, vfe_dev->pdev->id, i, error_info->stats_framedrop_count[i]); error_info->stats_framedrop_count[i] = 0; } } } } static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev, uint32_t error_mask0, uint32_t error_mask1) { vfe_dev->error_info.error_mask0 |= error_mask0; vfe_dev->error_info.error_mask1 |= error_mask1; vfe_dev->error_info.error_count++; } static void msm_isp_process_overflow_irq( struct vfe_device *vfe_dev, uint32_t *irq_status0, uint32_t *irq_status1) { uint32_t overflow_mask; //, rdi_wm_mask; if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id == 0 && vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id == 0) { pr_err("%s abhishek first frame. Skip \n", __func__); } /*Mask out all other irqs if recovery is started*/ if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) { uint32_t halt_restart_mask0, halt_restart_mask1; vfe_dev->hw_info->vfe_ops.core_ops. get_halt_restart_mask(&halt_restart_mask0, &halt_restart_mask1); *irq_status0 &= halt_restart_mask0; *irq_status1 &= halt_restart_mask1; return; } /*Check if any overflow bit is set*/ vfe_dev->hw_info->vfe_ops.core_ops. get_overflow_mask(&overflow_mask); overflow_mask &= *irq_status1; // vfe_dev->hw_info->vfe_ops.core_ops. // get_rdi_wm_mask(vfe_dev, &rdi_wm_mask); /* if (((overflow_mask & 0xFE00) >> 9) & rdi_wm_mask) { } */ if (overflow_mask) { struct msm_isp_event_data error_event; pr_err("%s: abhishek Bus overflow detected: 0x%x\n", __func__, overflow_mask); atomic_set(&vfe_dev->error_info.overflow_state, OVERFLOW_DETECTED); pr_err("%s: abhishek Start bus overflow recovery\n", __func__); /*Store current IRQ mask*/ vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev, &vfe_dev->error_info.overflow_recover_irq_mask0, &vfe_dev->error_info.overflow_recover_irq_mask1); /*Halt the hardware & Clear all other IRQ mask*/ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0); pr_err("%s abhishek HALT vfe_dev %p \n", __func__, vfe_dev); /*Stop CAMIF Immediately*/ vfe_dev->hw_info->vfe_ops.core_ops. update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY); /*Update overflow state*/ // atomic_set(&vfe_dev->error_info.overflow_state, HALT_REQUESTED); *irq_status0 = 0; *irq_status1 = 0; // overflow_mask &= ~(rdi_wm_mask << 9); pr_err("%s: abhishek Error! RDI overflow detected. Notify ISPIF to reset overflow_mask 0x%x\n", __func__, overflow_mask); /* frame id should be of src_info[overflow RDI WM]. For now take RAW_0 since RAW_0 * * will be the first to be allocated anyway */ error_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id; // error_event.input_src = VFE_RAW_0; /* Only support single RDI usecase currently */ error_event.u.error_info.error_mask = (1 << ISP_WM_BUS_OVERFLOW); msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR, &error_event); } } #if 1 void msm_isp_reset_burst_count_and_frame_drop( struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info) { struct msm_vfe_axi_stream_request_cmd stream_cfg_cmd; if (stream_info->state != ACTIVE || stream_info->stream_type != BURST_STREAM) { return; } if (stream_info->stream_type == BURST_STREAM && stream_info->num_burst_capture != 0) { stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle; stream_cfg_cmd.burst_count = stream_info->num_burst_capture; stream_cfg_cmd.frame_skip_pattern = stream_info->frame_skip_pattern; stream_cfg_cmd.init_frame_drop = stream_info->init_frame_drop; msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd); msm_isp_reset_framedrop(vfe_dev, stream_info); } } #endif #if 0 static inline void msm_isp_process_overflow_recovery( struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1) { uint32_t halt_restart_mask0, halt_restart_mask1; vfe_dev->hw_info->vfe_ops.core_ops. get_halt_restart_mask(&halt_restart_mask0, &halt_restart_mask1); irq_status0 &= halt_restart_mask0; irq_status1 &= halt_restart_mask1; if (irq_status0 == 0 && irq_status1 == 0) return; switch (atomic_read(&vfe_dev->error_info.overflow_state)) { case HALT_REQUESTED: { pr_err("%s: Halt done, Restart Pending\n", __func__); /*Reset the hardware*/ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0); /*Update overflow state*/ atomic_set(&vfe_dev->error_info.overflow_state, RESTART_REQUESTED); } break; case RESTART_REQUESTED: { pr_err("%s: Restart done, Resuming\n", __func__); /*Reset the burst stream frame drop pattern, in the * case where bus overflow happens during the burstshot, * the framedrop pattern might be updated after reg update * to skip all the frames after the burst shot. The burst shot * might not be completed due to the overflow, so the framedrop * pattern need to change back to the original settings in order * to recovr from overflow. */ msm_isp_reset_burst_count(vfe_dev); vfe_dev->hw_info->vfe_ops.axi_ops. reload_wm(vfe_dev, 0xFFFFFFFF); vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev); vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev); memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info)); atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW); vfe_dev->hw_info->vfe_ops.core_ops. update_camif_state(vfe_dev, ENABLE_CAMIF); } break; case NO_OVERFLOW: case OVERFLOW_DETECTED: default: break; } } #endif irqreturn_t msm_isp_process_irq(int irq_num, void *data) { unsigned long flags; struct msm_vfe_tasklet_queue_cmd *queue_cmd; struct vfe_device *vfe_dev = (struct vfe_device *)data; uint32_t irq_status0, irq_status1; uint32_t error_mask0, error_mask1; vfe_dev->hw_info->vfe_ops.irq_ops. read_irq_status(vfe_dev, &irq_status0, &irq_status1); msm_isp_process_overflow_irq(vfe_dev, &irq_status0, &irq_status1); vfe_dev->hw_info->vfe_ops.core_ops. get_error_mask(&error_mask0, &error_mask1); error_mask0 &= irq_status0; error_mask1 &= irq_status1; irq_status0 &= ~error_mask0; irq_status1 &= ~error_mask1; if ((error_mask0 != 0) || (error_mask1 != 0)) msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1); if ((irq_status0 == 0) && (irq_status1 == 0) && (!((error_mask0 != 0) || (error_mask1 != 0)) && vfe_dev->error_info.error_count == 1)) { ISP_DBG("%s: irq_status0 & 1 are both 0!\n", __func__); return IRQ_HANDLED; } /* if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) { pr_err("%s HW is in overflow state. Don't process IRQ until recovery\n", __func__); atomic_set(&vfe_dev->irq_cnt, 0); return IRQ_HANDLED; } */ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags); queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx]; if (queue_cmd->cmd_used) { pr_err_ratelimited("%s: Tasklet queue overflow: %d\n", __func__, vfe_dev->pdev->id); list_del(&queue_cmd->list); } else { atomic_add(1, &vfe_dev->irq_cnt); } queue_cmd->vfeInterruptStatus0 = irq_status0; queue_cmd->vfeInterruptStatus1 = irq_status1; msm_isp_get_timestamp(&queue_cmd->ts); queue_cmd->cmd_used = 1; vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) % MSM_VFE_TASKLETQ_SIZE; list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q); spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags); tasklet_schedule(&vfe_dev->vfe_tasklet); return IRQ_HANDLED; } void msm_isp_do_tasklet(unsigned long data) { unsigned long flags; struct vfe_device *vfe_dev = (struct vfe_device *)data; struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops; struct msm_vfe_tasklet_queue_cmd *queue_cmd; struct msm_isp_timestamp ts; uint32_t irq_status0, irq_status1; while (atomic_read(&vfe_dev->irq_cnt)) { spin_lock_irqsave(&vfe_dev->tasklet_lock, flags); queue_cmd = list_first_entry(&vfe_dev->tasklet_q, struct msm_vfe_tasklet_queue_cmd, list); if (!queue_cmd) { atomic_set(&vfe_dev->irq_cnt, 0); spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags); return; } atomic_sub(1, &vfe_dev->irq_cnt); list_del(&queue_cmd->list); queue_cmd->cmd_used = 0; irq_status0 = queue_cmd->vfeInterruptStatus0; irq_status1 = queue_cmd->vfeInterruptStatus1; ts = queue_cmd->ts; spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags); if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) { pr_err("Azam: There is Overflow, Ignore IRQs!!!"); // msm_isp_process_overflow_recovery(vfe_dev, // irq_status0, irq_status1); continue; } irq_ops->process_reset_irq(vfe_dev, irq_status0, irq_status1); irq_ops->process_halt_irq(vfe_dev, irq_status0, irq_status1); irq_ops->process_axi_irq(vfe_dev, irq_status0, irq_status1, &ts); irq_ops->process_stats_irq(vfe_dev, irq_status0, irq_status1, &ts); irq_ops->process_reg_update(vfe_dev, irq_status0, irq_status1, &ts); irq_ops->process_camif_irq(vfe_dev, irq_status0, irq_status1, &ts); msm_isp_process_error_info(vfe_dev); } } void msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg) { struct msm_vfe_axi_src_state *src_state = arg; vfe_dev->axi_data.src_info[src_state->input_src].active = src_state->src_active; } int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd); long rc; ISP_DBG("%s\n", __func__); mutex_lock(&vfe_dev->realtime_mutex); mutex_lock(&vfe_dev->core_mutex); if (vfe_dev->vfe_open_cnt == 1) { pr_err("VFE already open\n"); mutex_unlock(&vfe_dev->core_mutex); mutex_unlock(&vfe_dev->realtime_mutex); return -ENODEV; } if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) { pr_err("%s: init hardware failed\n", __func__); mutex_unlock(&vfe_dev->core_mutex); mutex_unlock(&vfe_dev->realtime_mutex); return -EBUSY; } memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info)); atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW); rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1); if (rc <= 0) { pr_err("%s: reset timeout\n", __func__); mutex_unlock(&vfe_dev->core_mutex); mutex_unlock(&vfe_dev->realtime_mutex); return -EINVAL; } vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base); ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version); vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev); vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp", 28); memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data)); memset(&vfe_dev->stats_data, 0, sizeof(struct msm_vfe_stats_shared_data)); vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info; vfe_dev->vfe_open_cnt++; vfe_dev->taskletq_idx = 0; mutex_unlock(&vfe_dev->core_mutex); mutex_unlock(&vfe_dev->realtime_mutex); return 0; } int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { long rc; struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd); ISP_DBG("%s\n", __func__); mutex_lock(&vfe_dev->realtime_mutex); mutex_lock(&vfe_dev->core_mutex); if (vfe_dev->vfe_open_cnt == 0) { pr_err("%s: Invalid close\n", __func__); mutex_unlock(&vfe_dev->core_mutex); mutex_unlock(&vfe_dev->realtime_mutex); return -ENODEV; } rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1); if (rc <= 0) pr_err("%s: halt timeout rc=%ld\n", __func__, rc); vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr); vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev); vfe_dev->vfe_open_cnt--; mutex_unlock(&vfe_dev->core_mutex); mutex_unlock(&vfe_dev->realtime_mutex); return 0; }
gpl-2.0
vitasdk/newlib
newlib/libc/string/wcstok.c
19
5681
/* FUNCTION <<wcstok>>---get next token from a string INDEX wcstok SYNOPSIS #include <wchar.h> wchar_t *wcstok(wchar_t *__restrict <[source]>, const wchar_t *__restrict <[delimiters]>, wchar_t **__restrict <[lasts]>); DESCRIPTION The <<wcstok>> function is the wide-character equivalent of the <<strtok_r>> function (which in turn is the same as the <<strtok>> function with an added argument to make it thread-safe). The <<wcstok>> function is used to isolate (one at a time) sequential tokens in a null-terminated wide-character string, <<*<[source]>>>. A token is defined as a substring not containing any wide-characters from <<*<[delimiters]>>>. The first time that <<wcstok>> is called, <<*<[source]>>> should be specified with the wide-character string to be searched, and <<*<[lasts]>>>--but not <<lasts>>, which must be non-NULL--may be random; subsequent calls, wishing to obtain further tokens from the same string, should pass a null pointer for <<*<[source]>>> instead but must supply <<*<[lasts]>>> unchanged from the last call. The separator wide-character string, <<*<[delimiters]>>>, must be supplied each time and may change between calls. A pointer to placeholder <<*<[lasts]>>> must be supplied by the caller, and is set each time as needed to save the state by <<wcstok>>. Every call to <<wcstok>> with <<*<[source]>>> == <<NULL>> must pass the value of <<*<[lasts]>>> as last set by <<wcstok>>. The <<wcstok>> function returns a pointer to the beginning of each subsequent token in the string, after replacing the separator wide-character itself with a null wide-character. When no more tokens remain, a null pointer is returned. RETURNS <<wcstok>> returns a pointer to the first wide character of a token, or <<NULL>> if there is no token. NOTES <<wcstok>> is thread-safe (unlike <<strtok>>, but like <<strtok_r>>). <<wcstok>> writes into the string being searched. PORTABILITY <<wcstok>> is C99 and POSIX.1-2001. <<wcstok>> requires no supporting OS subroutines. QUICKREF strtok ansi pure */ /* wcstok for Newlib created by adapting strtok_r, 2008. */ /* * Copyright (c) 1988 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <wchar.h> wchar_t * wcstok (register wchar_t *__restrict s, register const wchar_t *__restrict delim, wchar_t **__restrict lasts) { register const wchar_t *spanp; register int c, sc; wchar_t *tok; if (s == NULL && (s = *lasts) == NULL) return (NULL); /* * Skip (span) leading delimiters (s += wcsspn(s, delim), sort of). */ cont: c = *s++; for (spanp = delim; (sc = *spanp++) != L'\0';) { if (c == sc) goto cont; } if (c == L'\0') { /* no non-delimiter characters */ *lasts = NULL; return (NULL); } tok = s - 1; /* * Scan token (scan for delimiters: s += wcscspn(s, delim), sort of). * Note that delim must have one NUL; we stop if we see that, too. */ for (;;) { c = *s++; spanp = delim; do { if ((sc = *spanp++) == c) { if (c == L'\0') s = NULL; else s[-1] = L'\0'; *lasts = s; return (tok); } } while (sc != L'\0'); } /* NOTREACHED */ } /* The remainder of this file can serve as a regression test. Compile * with -D_REGRESSION_TEST. */ #if defined(_REGRESSION_TEST) /* [Test code: example from C99 standard */ #include <stdio.h> #include <wchar.h> /* example from C99 standard with minor additions to be a test */ int main(void) { int errs=0; static wchar_t str1[] = L"?a???b,,,#c"; static wchar_t str2[] = L"\t \t"; wchar_t *t, *ptr1, *ptr2; t = wcstok(str1, L"?", &ptr1); // t points to the token L"a" if(wcscmp(t,L"a")) errs++; t = wcstok(NULL, L",", &ptr1); // t points to the token L"??b" if(wcscmp(t,L"??b")) errs++; t = wcstok(str2, L" \t", &ptr2); // t is a null pointer if(t != NULL) errs++; t = wcstok(NULL, L"#,", &ptr1); // t points to the token L"c" if(wcscmp(t,L"c")) errs++; t = wcstok(NULL, L"?", &ptr1); // t is a null pointer if(t != NULL) errs++; printf("wcstok() test "); if(errs) printf("FAILED %d test cases", errs); else printf("passed"); printf(".\n"); return(errs); } #endif /* defined(_REGRESSION_TEST) ] */
gpl-2.0
jwpi/glibc
libio/wmemstream.c
19
4367
/* Copyright (C) 1995-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include "libioP.h" #include "strfile.h" #include <stdio.h> #include <stdlib.h> #include <wchar.h> struct _IO_FILE_wmemstream { _IO_strfile _sf; wchar_t **bufloc; _IO_size_t *sizeloc; }; static int _IO_wmem_sync (_IO_FILE* fp) __THROW; static void _IO_wmem_finish (_IO_FILE* fp, int) __THROW; static const struct _IO_jump_t _IO_wmem_jumps = { JUMP_INIT_DUMMY, JUMP_INIT (finish, _IO_wmem_finish), JUMP_INIT (overflow, (_IO_overflow_t) _IO_wstr_overflow), JUMP_INIT (underflow, (_IO_underflow_t) _IO_wstr_underflow), JUMP_INIT (uflow, (_IO_underflow_t) _IO_wdefault_uflow), JUMP_INIT (pbackfail, (_IO_pbackfail_t) _IO_wstr_pbackfail), JUMP_INIT (xsputn, _IO_wdefault_xsputn), JUMP_INIT (xsgetn, _IO_wdefault_xsgetn), JUMP_INIT (seekoff, _IO_wstr_seekoff), JUMP_INIT (seekpos, _IO_default_seekpos), JUMP_INIT (setbuf, _IO_default_setbuf), JUMP_INIT (sync, _IO_wmem_sync), JUMP_INIT (doallocate, _IO_wdefault_doallocate), JUMP_INIT (read, _IO_default_read), JUMP_INIT (write, _IO_default_write), JUMP_INIT (seek, _IO_default_seek), JUMP_INIT (close, _IO_default_close), JUMP_INIT (stat, _IO_default_stat), JUMP_INIT (showmanyc, _IO_default_showmanyc), JUMP_INIT (imbue, _IO_default_imbue) }; /* Open a stream that writes into a malloc'd buffer that is expanded as necessary. *BUFLOC and *SIZELOC are updated with the buffer's location and the number of characters written on fflush or fclose. */ _IO_FILE * open_wmemstream (bufloc, sizeloc) wchar_t **bufloc; _IO_size_t *sizeloc; { struct locked_FILE { struct _IO_FILE_wmemstream fp; #ifdef _IO_MTSAFE_IO _IO_lock_t lock; #endif struct _IO_wide_data wd; } *new_f; wchar_t *buf; new_f = (struct locked_FILE *) malloc (sizeof (struct locked_FILE)); if (new_f == NULL) return NULL; #ifdef _IO_MTSAFE_IO new_f->fp._sf._sbf._f._lock = &new_f->lock; #endif buf = calloc (1, _IO_BUFSIZ); if (buf == NULL) { free (new_f); return NULL; } _IO_no_init (&new_f->fp._sf._sbf._f, 0, 0, &new_f->wd, &_IO_wmem_jumps); _IO_fwide (&new_f->fp._sf._sbf._f, 1); _IO_wstr_init_static (&new_f->fp._sf._sbf._f, buf, _IO_BUFSIZ / sizeof (wchar_t), buf); new_f->fp._sf._sbf._f._flags2 &= ~_IO_FLAGS2_USER_WBUF; new_f->fp._sf._s._allocate_buffer = (_IO_alloc_type) malloc; new_f->fp._sf._s._free_buffer = (_IO_free_type) free; new_f->fp.bufloc = bufloc; new_f->fp.sizeloc = sizeloc; return (_IO_FILE *) &new_f->fp._sf._sbf; } static int _IO_wmem_sync (fp) _IO_FILE* fp; { struct _IO_FILE_wmemstream *mp = (struct _IO_FILE_wmemstream *) fp; if (fp->_wide_data->_IO_write_ptr == fp->_wide_data->_IO_write_end) { _IO_wstr_overflow (fp, '\0'); --fp->_wide_data->_IO_write_ptr; } else *fp->_wide_data->_IO_write_ptr = '\0'; *mp->bufloc = fp->_wide_data->_IO_write_base; *mp->sizeloc = (fp->_wide_data->_IO_write_ptr - fp->_wide_data->_IO_write_base); return 0; } static void _IO_wmem_finish (fp, dummy) _IO_FILE* fp; int dummy; { struct _IO_FILE_wmemstream *mp = (struct _IO_FILE_wmemstream *) fp; *mp->bufloc = (wchar_t *) realloc (fp->_wide_data->_IO_write_base, (fp->_wide_data->_IO_write_ptr - fp->_wide_data->_IO_write_base + 1) * sizeof (wchar_t)); if (*mp->bufloc != NULL) { size_t len = (fp->_wide_data->_IO_write_ptr - fp->_wide_data->_IO_write_base); (*mp->bufloc)[len] = '\0'; *mp->sizeloc = len; fp->_wide_data->_IO_buf_base = NULL; } _IO_wstr_finish (fp, 0); }
gpl-2.0
wdv4758h/glibc
nptl/pthread_mutexattr_setprioceiling.c
19
1863
/* Change priority ceiling setting in pthread_mutexattr_t. Copyright (C) 2006-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Jakub Jelinek <jakub@redhat.com>, 2006. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <pthreadP.h> #include <atomic.h> int pthread_mutexattr_setprioceiling (attr, prioceiling) pthread_mutexattr_t *attr; int prioceiling; { /* See __init_sched_fifo_prio. */ if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1 || atomic_load_relaxed (&__sched_fifo_max_prio) == -1) __init_sched_fifo_prio (); if (__glibc_unlikely (prioceiling < atomic_load_relaxed (&__sched_fifo_min_prio)) || __glibc_unlikely (prioceiling > atomic_load_relaxed (&__sched_fifo_max_prio)) || __glibc_unlikely ((prioceiling & (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT)) != prioceiling)) return EINVAL; struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr; iattr->mutexkind = ((iattr->mutexkind & ~PTHREAD_MUTEXATTR_PRIO_CEILING_MASK) | (prioceiling << PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT)); return 0; }
gpl-2.0
hurr1k4ne/TrinityCore
src/server/scripts/Outland/CoilfangReservoir/SerpentShrine/boss_leotheras_the_blind.cpp
19
27514
/* * Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Leotheras_The_Blind SD%Complete: 80 SDComment: Possesion Support SDCategory: Coilfang Resevoir, Serpent Shrine Cavern EndScriptData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "serpent_shrine.h" #include "Player.h" enum LeotherasTheBlind { // Spells used by Leotheras The Blind SPELL_WHIRLWIND = 37640, SPELL_CHAOS_BLAST = 37674, SPELL_BERSERK = 26662, SPELL_INSIDIOUS_WHISPER = 37676, SPELL_DUAL_WIELD = 42459, // Spells used in banish phase BANISH_BEAM = 38909, AURA_BANISH = 37833, // Spells used by Greyheart Spellbinders SPELL_EARTHSHOCK = 39076, SPELL_MINDBLAST = 37531, // Spells used by Inner Demons and Creature ID INNER_DEMON_ID = 21857, AURA_DEMONIC_ALIGNMENT = 37713, SPELL_SHADOWBOLT = 39309, SPELL_SOUL_LINK = 38007, SPELL_CONSUMING_MADNESS = 37749, //Misc. MODEL_DEMON = 20125, MODEL_NIGHTELF = 20514, DEMON_FORM = 21875, NPC_SPELLBINDER = 21806, INNER_DEMON_VICTIM = 1, SAY_AGGRO = 0, SAY_SWITCH_TO_DEMON = 1, SAY_INNER_DEMONS = 2, SAY_DEMON_SLAY = 3, SAY_NIGHTELF_SLAY = 4, SAY_FINAL_FORM = 5, SAY_FREE = 6, SAY_DEATH = 7 }; class npc_inner_demon : public CreatureScript { public: npc_inner_demon() : CreatureScript("npc_inner_demon") { } CreatureAI* GetAI(Creature* creature) const override { return new npc_inner_demonAI(creature); } struct npc_inner_demonAI : public ScriptedAI { npc_inner_demonAI(Creature* creature) : ScriptedAI(creature) { Initialize(); } void Initialize() { ShadowBolt_Timer = 10000; Link_Timer = 1000; } uint32 ShadowBolt_Timer; uint32 Link_Timer; ObjectGuid victimGUID; void Reset() override { Initialize(); } void SetGUID(ObjectGuid guid, int32 id/* = 0 */) override { if (id == INNER_DEMON_VICTIM) victimGUID = guid; } ObjectGuid GetGUID(int32 id/* = 0 */) const override { if (id == INNER_DEMON_VICTIM) return victimGUID; return ObjectGuid::Empty; } void JustDied(Unit* /*killer*/) override { Unit* unit = ObjectAccessor::GetUnit(*me, victimGUID); if (unit && unit->HasAura(SPELL_INSIDIOUS_WHISPER)) unit->RemoveAurasDueToSpell(SPELL_INSIDIOUS_WHISPER); } void DamageTaken(Unit* done_by, uint32 &damage) override { if (done_by->GetGUID() != victimGUID && done_by->GetGUID() != me->GetGUID()) { damage = 0; DoModifyThreatPercent(done_by, -100); } } void EnterCombat(Unit* /*who*/) override { if (!victimGUID) return; } void UpdateAI(uint32 diff) override { //Return since we have no target if (!UpdateVictim() || !me->GetVictim()) return; if (me->EnsureVictim()->GetGUID() != victimGUID) { DoModifyThreatPercent(me->GetVictim(), -100); Unit* owner = ObjectAccessor::GetUnit(*me, victimGUID); if (owner && owner->IsAlive()) { me->AddThreat(owner, 999999); AttackStart(owner); } else if (owner && owner->isDead()) { me->DealDamage(me, me->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); return; } } if (Link_Timer <= diff) { DoCastVictim(SPELL_SOUL_LINK, true); Link_Timer = 1000; } else Link_Timer -= diff; if (!me->HasAura(AURA_DEMONIC_ALIGNMENT)) DoCast(me, AURA_DEMONIC_ALIGNMENT, true); if (ShadowBolt_Timer <= diff) { DoCastVictim(SPELL_SHADOWBOLT, false); ShadowBolt_Timer = 10000; } else ShadowBolt_Timer -= diff; DoMeleeAttackIfReady(); } }; }; //Original Leotheras the Blind AI class boss_leotheras_the_blind : public CreatureScript { public: boss_leotheras_the_blind() : CreatureScript("boss_leotheras_the_blind") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<boss_leotheras_the_blindAI>(creature); } struct boss_leotheras_the_blindAI : public ScriptedAI { boss_leotheras_the_blindAI(Creature* creature) : ScriptedAI(creature) { Initialize(); creature->GetPosition(x, y, z); instance = creature->GetInstanceScript(); } void Initialize() { BanishTimer = 1000; Whirlwind_Timer = 15000; ChaosBlast_Timer = 1000; SwitchToDemon_Timer = 45000; SwitchToHuman_Timer = 60000; Berserk_Timer = 600000; InnerDemons_Timer = 30000; DealDamage = true; DemonForm = false; IsFinalForm = false; NeedThreatReset = false; EnrageUsed = false; memset(InnderDemon, 0, sizeof(InnderDemon)); InnerDemon_Count = 0; } InstanceScript* instance; uint32 Whirlwind_Timer; uint32 ChaosBlast_Timer; uint32 SwitchToDemon_Timer; uint32 SwitchToHuman_Timer; uint32 Berserk_Timer; uint32 InnerDemons_Timer; uint32 BanishTimer; bool DealDamage; bool NeedThreatReset; bool DemonForm; bool IsFinalForm; bool EnrageUsed; float x, y, z; ObjectGuid InnderDemon[5]; uint32 InnerDemon_Count; ObjectGuid Demon; ObjectGuid SpellBinderGUID[3]; void Reset() override { CheckChannelers(); Initialize(); me->SetCanDualWield(true); me->SetSpeedRate(MOVE_RUN, 2.0f); me->SetDisplayId(MODEL_NIGHTELF); me->SetUInt32Value(UNIT_VIRTUAL_ITEM_SLOT_ID , 0); me->SetUInt32Value(UNIT_VIRTUAL_ITEM_SLOT_ID+1, 0); DoCast(me, SPELL_DUAL_WIELD, true); me->SetCorpseDelay(1000*60*60); instance->SetData(DATA_LEOTHERASTHEBLINDEVENT, NOT_STARTED); } void CheckChannelers(/*bool DoEvade = true*/) { for (uint8 i = 0; i < 3; ++i) { if (Creature* add = ObjectAccessor::GetCreature(*me, SpellBinderGUID[i])) add->DisappearAndDie(); float nx = x; float ny = y; float o = 2.4f; if (i == 0) {nx += 10; ny -= 5; o=2.5f;} if (i == 1) {nx -= 8; ny -= 7; o=0.9f;} if (i == 2) {nx -= 3; ny += 9; o=5.0f;} Creature* binder = me->SummonCreature(NPC_SPELLBINDER, nx, ny, z, o, TEMPSUMMON_DEAD_DESPAWN, 0); if (binder) SpellBinderGUID[i] = binder->GetGUID(); } } void MoveInLineOfSight(Unit* who) override { if (me->HasAura(AURA_BANISH)) return; if (!me->GetVictim() && me->CanCreatureAttack(who)) { if (me->GetDistanceZ(who) > CREATURE_Z_ATTACK_RANGE) return; float attackRadius = me->GetAttackDistance(who); if (me->IsWithinDistInMap(who, attackRadius)) { // Check first that object is in an angle in front of this one before LoS check if (me->HasInArc(float(M_PI) / 2.0f, who) && me->IsWithinLOSInMap(who)) { AttackStart(who); } } } } void StartEvent() { Talk(SAY_AGGRO); instance->SetData(DATA_LEOTHERASTHEBLINDEVENT, IN_PROGRESS); } void CheckBanish() { uint8 AliveChannelers = 0; for (uint8 i = 0; i < 3; ++i) { Unit* add = ObjectAccessor::GetUnit(*me, SpellBinderGUID[i]); if (add && add->IsAlive()) ++AliveChannelers; } // channelers == 0 remove banish aura if (AliveChannelers == 0 && me->HasAura(AURA_BANISH)) { // removing banish aura me->RemoveAurasDueToSpell(AURA_BANISH); // Leotheras is getting immune again me->ApplySpellImmune(AURA_BANISH, IMMUNITY_MECHANIC, MECHANIC_BANISH, true); // changing model to bloodelf me->SetDisplayId(MODEL_NIGHTELF); // and reseting equipment me->LoadEquipment(); if (instance->GetGuidData(DATA_LEOTHERAS_EVENT_STARTER)) { Unit* victim = NULL; victim = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_LEOTHERAS_EVENT_STARTER)); if (victim) me->getThreatManager().addThreat(victim, 1); StartEvent(); } } else if (AliveChannelers != 0 && !me->HasAura(AURA_BANISH)) { // channelers != 0 apply banish aura // removing Leotheras banish immune to apply AURA_BANISH me->ApplySpellImmune(AURA_BANISH, IMMUNITY_MECHANIC, MECHANIC_BANISH, false); DoCast(me, AURA_BANISH); // changing model me->SetDisplayId(MODEL_DEMON); // and removing weapons me->SetUInt32Value(UNIT_VIRTUAL_ITEM_SLOT_ID , 0); me->SetUInt32Value(UNIT_VIRTUAL_ITEM_SLOT_ID+1, 0); } } //Despawn all Inner Demon summoned void DespawnDemon() { for (uint8 i=0; i<5; ++i) { if (InnderDemon[i]) { //delete creature Creature* creature = ObjectAccessor::GetCreature((*me), InnderDemon[i]); if (creature && creature->IsAlive()) creature->DespawnOrUnsummon(); InnderDemon[i].Clear(); } } InnerDemon_Count = 0; } void CastConsumingMadness() //remove this once SPELL_INSIDIOUS_WHISPER is supported by core { for (uint8 i = 0; i < 5; ++i) { if (InnderDemon[i]) { Creature* unit = ObjectAccessor::GetCreature((*me), InnderDemon[i]); if (unit && unit->IsAlive()) { Unit* unit_target = ObjectAccessor::GetUnit(*unit, unit->AI()->GetGUID(INNER_DEMON_VICTIM)); if (unit_target && unit_target->IsAlive()) { unit->CastSpell(unit_target, SPELL_CONSUMING_MADNESS, true); DoModifyThreatPercent(unit_target, -100); } } } } } void KilledUnit(Unit* victim) override { if (victim->GetTypeId() != TYPEID_PLAYER) return; Talk(DemonForm ? SAY_DEMON_SLAY : SAY_NIGHTELF_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_DEATH); //despawn copy if (Demon) { if (Creature* pDemon = ObjectAccessor::GetCreature(*me, Demon)) pDemon->DespawnOrUnsummon(); } instance->SetData(DATA_LEOTHERASTHEBLINDEVENT, DONE); } void EnterCombat(Unit* /*who*/) override { if (me->HasAura(AURA_BANISH)) return; me->LoadEquipment(); } void UpdateAI(uint32 diff) override { //Return since we have no target if (me->HasAura(AURA_BANISH) || !UpdateVictim()) { if (BanishTimer <= diff) { CheckBanish();//no need to check every update tick BanishTimer = 1000; } else BanishTimer -= diff; return; } if (me->HasAura(SPELL_WHIRLWIND)) { if (Whirlwind_Timer <= diff) { Unit* newTarget = SelectTarget(SELECT_TARGET_RANDOM, 0); if (newTarget) { DoResetThreat(); me->GetMotionMaster()->Clear(); me->GetMotionMaster()->MovePoint(0, newTarget->GetPositionX(), newTarget->GetPositionY(), newTarget->GetPositionZ()); } Whirlwind_Timer = 2000; } else Whirlwind_Timer -= diff; } // reseting after changing forms and after ending whirlwind if (NeedThreatReset && !me->HasAura(SPELL_WHIRLWIND)) { // when changing forms seting timers (or when ending whirlwind - to avoid adding new variable i use Whirlwind_Timer to countdown 2s while whirlwinding) if (DemonForm) InnerDemons_Timer = 30000; else Whirlwind_Timer = 15000; NeedThreatReset = false; DoResetThreat(); me->GetMotionMaster()->Clear(); me->GetMotionMaster()->MoveChase(me->GetVictim()); } //Enrage_Timer (10 min) if (Berserk_Timer < diff && !EnrageUsed) { me->InterruptNonMeleeSpells(false); DoCast(me, SPELL_BERSERK); EnrageUsed = true; } else Berserk_Timer -= diff; if (!DemonForm) { //Whirldind Timer if (!me->HasAura(SPELL_WHIRLWIND)) { if (Whirlwind_Timer <= diff) { DoCast(me, SPELL_WHIRLWIND); // while whirlwinding this variable is used to countdown target's change Whirlwind_Timer = 2000; NeedThreatReset = true; } else Whirlwind_Timer -= diff; } //Switch_Timer if (!IsFinalForm) { if (SwitchToDemon_Timer <= diff) { //switch to demon form me->RemoveAurasDueToSpell(SPELL_WHIRLWIND); me->SetDisplayId(MODEL_DEMON); Talk(SAY_SWITCH_TO_DEMON); me->SetUInt32Value(UNIT_VIRTUAL_ITEM_SLOT_ID , 0); me->SetUInt32Value(UNIT_VIRTUAL_ITEM_SLOT_ID+1, 0); DemonForm = true; NeedThreatReset = true; SwitchToDemon_Timer = 45000; } else SwitchToDemon_Timer -= diff; } DoMeleeAttackIfReady(); } else { //ChaosBlast_Timer if (!me->GetVictim()) return; if (me->IsWithinDist(me->GetVictim(), 30)) me->StopMoving(); if (ChaosBlast_Timer <= diff) { // will cast only when in range of spell if (me->IsWithinDist(me->GetVictim(), 30)) { //DoCastVictim(SPELL_CHAOS_BLAST, true); int damage = 100; me->CastCustomSpell(me->GetVictim(), SPELL_CHAOS_BLAST, &damage, NULL, NULL, false, NULL, NULL, me->GetGUID()); } ChaosBlast_Timer = 3000; } else ChaosBlast_Timer -= diff; //Summon Inner Demon if (InnerDemons_Timer <= diff) { ThreatContainer::StorageType const & ThreatList = me->getThreatManager().getThreatList(); std::vector<Unit*> TargetList; for (ThreatContainer::StorageType::const_iterator itr = ThreatList.begin(); itr != ThreatList.end(); ++itr) { Unit* tempTarget = ObjectAccessor::GetUnit(*me, (*itr)->getUnitGuid()); if (tempTarget && tempTarget->GetTypeId() == TYPEID_PLAYER && tempTarget->GetGUID() != me->EnsureVictim()->GetGUID() && TargetList.size()<5) TargetList.push_back(tempTarget); } //SpellInfo* spell = GET_SPELL(SPELL_INSIDIOUS_WHISPER); for (std::vector<Unit*>::const_iterator itr = TargetList.begin(); itr != TargetList.end(); ++itr) { if ((*itr) && (*itr)->IsAlive()) { Creature* demon = me->SummonCreature(INNER_DEMON_ID, (*itr)->GetPositionX()+10, (*itr)->GetPositionY()+10, (*itr)->GetPositionZ(), 0, TEMPSUMMON_TIMED_DESPAWN_OUT_OF_COMBAT, 5000); if (demon) { demon->AI()->AttackStart((*itr)); demon->AI()->SetGUID((*itr)->GetGUID(), INNER_DEMON_VICTIM); (*itr)->AddAura(SPELL_INSIDIOUS_WHISPER, *itr); if (InnerDemon_Count > 4) InnerDemon_Count = 0; //Safe storing of creatures InnderDemon[InnerDemon_Count] = demon->GetGUID(); //Update demon count ++InnerDemon_Count; } } } Talk(SAY_INNER_DEMONS); InnerDemons_Timer = 999999; } else InnerDemons_Timer -= diff; //Switch_Timer if (SwitchToHuman_Timer <= diff) { //switch to nightelf form me->SetDisplayId(MODEL_NIGHTELF); me->LoadEquipment(); CastConsumingMadness(); DespawnDemon(); DemonForm = false; NeedThreatReset = true; SwitchToHuman_Timer = 60000; } else SwitchToHuman_Timer -= diff; } if (!IsFinalForm && HealthBelowPct(15)) { //at this point he divides himself in two parts CastConsumingMadness(); DespawnDemon(); Creature* Copy = NULL; Copy = DoSpawnCreature(DEMON_FORM, 0, 0, 0, 0, TEMPSUMMON_TIMED_DESPAWN_OUT_OF_COMBAT, 6000); if (Copy) { Demon = Copy->GetGUID(); if (me->GetVictim()) Copy->AI()->AttackStart(me->GetVictim()); } //set nightelf final form IsFinalForm = true; DemonForm = false; Talk(SAY_FINAL_FORM); me->SetDisplayId(MODEL_NIGHTELF); me->LoadEquipment(); } } }; }; //Leotheras the Blind Demon Form AI class boss_leotheras_the_blind_demonform : public CreatureScript { public: boss_leotheras_the_blind_demonform() : CreatureScript("boss_leotheras_the_blind_demonform") { } CreatureAI* GetAI(Creature* creature) const override { return new boss_leotheras_the_blind_demonformAI(creature); } struct boss_leotheras_the_blind_demonformAI : public ScriptedAI { boss_leotheras_the_blind_demonformAI(Creature* creature) : ScriptedAI(creature) { Initialize(); } void Initialize() { ChaosBlast_Timer = 1000; DealDamage = true; } uint32 ChaosBlast_Timer; bool DealDamage; void Reset() override { Initialize(); } void StartEvent() { Talk(SAY_FREE); } void KilledUnit(Unit* victim) override { if (victim->GetTypeId() != TYPEID_PLAYER) return; Talk(SAY_DEMON_SLAY); } void JustDied(Unit* /*killer*/) override { //invisibility (blizzlike, at the end of the fight he doesn't die, he disappears) DoCast(me, 8149, true); } void EnterCombat(Unit* /*who*/) override { StartEvent(); } void UpdateAI(uint32 diff) override { //Return since we have no target if (!UpdateVictim()) return; //ChaosBlast_Timer if (me->IsWithinDist(me->GetVictim(), 30)) me->StopMoving(); if (ChaosBlast_Timer <= diff) { // will cast only when in range od spell if (me->IsWithinDist(me->GetVictim(), 30)) { //DoCastVictim(SPELL_CHAOS_BLAST, true); int damage = 100; me->CastCustomSpell(me->GetVictim(), SPELL_CHAOS_BLAST, &damage, NULL, NULL, false, NULL, NULL, me->GetGUID()); ChaosBlast_Timer = 3000; } } else ChaosBlast_Timer -= diff; //Do NOT deal any melee damage to the target. } }; }; class npc_greyheart_spellbinder : public CreatureScript { public: npc_greyheart_spellbinder() : CreatureScript("npc_greyheart_spellbinder") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<npc_greyheart_spellbinderAI>(creature); } struct npc_greyheart_spellbinderAI : public ScriptedAI { npc_greyheart_spellbinderAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = creature->GetInstanceScript(); AddedBanish = false; } void Initialize() { Mindblast_Timer = urand(3000, 8000); Earthshock_Timer = urand(5000, 10000); } InstanceScript* instance; ObjectGuid leotherasGUID; uint32 Mindblast_Timer; uint32 Earthshock_Timer; bool AddedBanish; void Reset() override { Initialize(); instance->SetGuidData(DATA_LEOTHERAS_EVENT_STARTER, ObjectGuid::Empty); Creature* leotheras = ObjectAccessor::GetCreature(*me, leotherasGUID); if (leotheras && leotheras->IsAlive()) ENSURE_AI(boss_leotheras_the_blind::boss_leotheras_the_blindAI, leotheras->AI())->CheckChannelers(/*false*/); } void EnterCombat(Unit* who) override { me->InterruptNonMeleeSpells(false); instance->SetGuidData(DATA_LEOTHERAS_EVENT_STARTER, who->GetGUID()); } void JustRespawned() override { AddedBanish = false; Reset(); } void CastChanneling() { if (!me->IsInCombat() && !me->GetCurrentSpell(CURRENT_CHANNELED_SPELL)) { if (leotherasGUID) { Creature* leotheras = ObjectAccessor::GetCreature(*me, leotherasGUID); if (leotheras && leotheras->IsAlive()) DoCast(leotheras, BANISH_BEAM); } } } void UpdateAI(uint32 diff) override { if (!leotherasGUID) leotherasGUID = instance->GetGuidData(DATA_LEOTHERAS); if (!me->IsInCombat() && instance->GetGuidData(DATA_LEOTHERAS_EVENT_STARTER)) { Unit* victim = NULL; victim = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_LEOTHERAS_EVENT_STARTER)); if (victim) AttackStart(victim); } if (!UpdateVictim()) { CastChanneling(); return; } if (!instance->GetGuidData(DATA_LEOTHERAS_EVENT_STARTER)) { EnterEvadeMode(); return; } if (Mindblast_Timer <= diff) { Unit* target = NULL; target = SelectTarget(SELECT_TARGET_RANDOM, 0); if (target)DoCast(target, SPELL_MINDBLAST); Mindblast_Timer = urand(10000, 15000); } else Mindblast_Timer -= diff; if (Earthshock_Timer <= diff) { Map::PlayerList const &PlayerList = me->GetMap()->GetPlayers(); for (Map::PlayerList::const_iterator itr = PlayerList.begin(); itr != PlayerList.end(); ++itr) { if (Player* i_pl = itr->GetSource()) { bool isCasting = false; for (uint8 i = 0; i < CURRENT_MAX_SPELL; ++i) if (i_pl->GetCurrentSpell(i)) isCasting = true; if (isCasting) { DoCast(i_pl, SPELL_EARTHSHOCK); break; } } } Earthshock_Timer = urand(8000, 15000); } else Earthshock_Timer -= diff; DoMeleeAttackIfReady(); } void JustDied(Unit* /*killer*/) override { } }; }; void AddSC_boss_leotheras_the_blind() { new boss_leotheras_the_blind(); new boss_leotheras_the_blind_demonform(); new npc_greyheart_spellbinder(); new npc_inner_demon(); }
gpl-2.0
daavery/audacity
src/RingBuffer.cpp
19
2863
/********************************************************************** Audacity: A Digital Audio Editor RingBuffer.cpp Dominic Mazzoni *******************************************************************//*! \class RingBuffer \brief Holds streamed audio samples. This class is thread-safe, assuming that there is only one thread writing, and one thread reading. If two threads both need to read, or both need to write, they need to lock this class from outside using their own mutex. AvailForPut and AvailForGet may underestimate but will never overestimate. *//*******************************************************************/ #include "RingBuffer.h" RingBuffer::RingBuffer(sampleFormat format, int size) { mFormat = format; mBufferSize = (size > 64? size: 64); mStart = 0; mEnd = 0; mBuffer = NewSamples(mBufferSize, mFormat); } RingBuffer::~RingBuffer() { DeleteSamples(mBuffer); } int RingBuffer::Len() { return (mEnd + mBufferSize - mStart) % mBufferSize; } // // For the writer only: // int RingBuffer::AvailForPut() { return (mBufferSize-4) - Len(); } int RingBuffer::Put(samplePtr buffer, sampleFormat format, int samplesToCopy) { samplePtr src; int block; int copied; int pos; int len = Len(); if (samplesToCopy > (mBufferSize-4) - len) samplesToCopy = (mBufferSize-4) - len; src = buffer; copied = 0; pos = mEnd; while(samplesToCopy) { block = samplesToCopy; if (block > mBufferSize - pos) block = mBufferSize - pos; CopySamples(src, format, mBuffer + pos * SAMPLE_SIZE(mFormat), mFormat, block); src += block * SAMPLE_SIZE(format); pos = (pos + block) % mBufferSize; samplesToCopy -= block; copied += block; } mEnd = pos; return copied; } // // For the reader only: // int RingBuffer::AvailForGet() { return Len(); } int RingBuffer::Get(samplePtr buffer, sampleFormat format, int samplesToCopy) { samplePtr dest; int block; int copied; int len = Len(); if (samplesToCopy > len) samplesToCopy = len; dest = buffer; copied = 0; while(samplesToCopy) { block = samplesToCopy; if (block > mBufferSize - mStart) block = mBufferSize - mStart; CopySamples(mBuffer + mStart * SAMPLE_SIZE(mFormat), mFormat, dest, format, block); dest += block * SAMPLE_SIZE(format); mStart = (mStart + block) % mBufferSize; samplesToCopy -= block; copied += block; } return copied; } int RingBuffer::Discard(int samplesToDiscard) { int len = Len(); if (samplesToDiscard > len) samplesToDiscard = len; mStart = (mStart + samplesToDiscard) % mBufferSize; return samplesToDiscard; }
gpl-2.0
carlos-wong/l430_kernel
drivers/infiniband/hw/ipath/ipath_iba6120.c
19
49945
/* * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file contains all of the code that is specific to the * InfiniPath PCIe chip. */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include "ipath_kernel.h" #include "ipath_registers.h" static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64); /* * This file contains all the chip-specific register information and * access functions for the QLogic InfiniPath PCI-Express chip. * * This lists the InfiniPath registers, in the actual chip layout. * This structure should never be directly accessed. */ struct _infinipath_do_not_use_kernel_regs { unsigned long long Revision; unsigned long long Control; unsigned long long PageAlign; unsigned long long PortCnt; unsigned long long DebugPortSelect; unsigned long long Reserved0; unsigned long long SendRegBase; unsigned long long UserRegBase; unsigned long long CounterRegBase; unsigned long long Scratch; unsigned long long Reserved1; unsigned long long Reserved2; unsigned long long IntBlocked; unsigned long long IntMask; unsigned long long IntStatus; unsigned long long IntClear; unsigned long long ErrorMask; unsigned long long ErrorStatus; unsigned long long ErrorClear; unsigned long long HwErrMask; unsigned long long HwErrStatus; unsigned long long HwErrClear; unsigned long long HwDiagCtrl; unsigned long long MDIO; unsigned long long IBCStatus; unsigned long long IBCCtrl; unsigned long long ExtStatus; unsigned long long ExtCtrl; unsigned long long GPIOOut; unsigned long long GPIOMask; unsigned long long GPIOStatus; unsigned long long GPIOClear; unsigned long long RcvCtrl; unsigned long long RcvBTHQP; unsigned long long RcvHdrSize; unsigned long long RcvHdrCnt; unsigned long long RcvHdrEntSize; unsigned long long RcvTIDBase; unsigned long long RcvTIDCnt; unsigned long long RcvEgrBase; unsigned long long RcvEgrCnt; unsigned long long RcvBufBase; unsigned long long RcvBufSize; unsigned long long RxIntMemBase; unsigned long long RxIntMemSize; unsigned long long RcvPartitionKey; unsigned long long Reserved3; unsigned long long RcvPktLEDCnt; unsigned long long Reserved4[8]; unsigned long long SendCtrl; unsigned long long SendPIOBufBase; unsigned long long SendPIOSize; unsigned long long SendPIOBufCnt; unsigned long long SendPIOAvailAddr; unsigned long long TxIntMemBase; unsigned long long TxIntMemSize; unsigned long long Reserved5; unsigned long long PCIeRBufTestReg0; unsigned long long PCIeRBufTestReg1; unsigned long long Reserved51[6]; unsigned long long SendBufferError; unsigned long long SendBufferErrorCONT1; unsigned long long Reserved6SBE[6]; unsigned long long RcvHdrAddr0; unsigned long long RcvHdrAddr1; unsigned long long RcvHdrAddr2; unsigned long long RcvHdrAddr3; unsigned long long RcvHdrAddr4; unsigned long long Reserved7RHA[11]; unsigned long long RcvHdrTailAddr0; unsigned long long RcvHdrTailAddr1; unsigned long long RcvHdrTailAddr2; unsigned long long RcvHdrTailAddr3; unsigned long long RcvHdrTailAddr4; unsigned long long Reserved8RHTA[11]; unsigned long long Reserved9SW[8]; unsigned long long SerdesConfig0; unsigned long long SerdesConfig1; unsigned long long SerdesStatus; unsigned long long XGXSConfig; unsigned long long IBPLLCfg; unsigned long long Reserved10SW2[3]; unsigned long long PCIEQ0SerdesConfig0; unsigned long long PCIEQ0SerdesConfig1; unsigned long long PCIEQ0SerdesStatus; unsigned long long Reserved11; unsigned long long PCIEQ1SerdesConfig0; unsigned long long PCIEQ1SerdesConfig1; unsigned long long PCIEQ1SerdesStatus; unsigned long long Reserved12; }; #define IPATH_KREG_OFFSET(field) (offsetof(struct \ _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) #define IPATH_CREG_OFFSET(field) (offsetof( \ struct infinipath_counters, field) / sizeof(u64)) static const struct ipath_kregs ipath_pe_kregs = { .kr_control = IPATH_KREG_OFFSET(Control), .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase), .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect), .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear), .kr_errormask = IPATH_KREG_OFFSET(ErrorMask), .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus), .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl), .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus), .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear), .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask), .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut), .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus), .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl), .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear), .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask), .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus), .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl), .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus), .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked), .kr_intclear = IPATH_KREG_OFFSET(IntClear), .kr_intmask = IPATH_KREG_OFFSET(IntMask), .kr_intstatus = IPATH_KREG_OFFSET(IntStatus), .kr_mdio = IPATH_KREG_OFFSET(MDIO), .kr_pagealign = IPATH_KREG_OFFSET(PageAlign), .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey), .kr_portcnt = IPATH_KREG_OFFSET(PortCnt), .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP), .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase), .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize), .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl), .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase), .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt), .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt), .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize), .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize), .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase), .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize), .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase), .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt), .kr_revision = IPATH_KREG_OFFSET(Revision), .kr_scratch = IPATH_KREG_OFFSET(Scratch), .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError), .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl), .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr), .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase), .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt), .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize), .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase), .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase), .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize), .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase), .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0), .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1), .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg), /* * These should not be used directly via ipath_write_kreg64(), * use them with ipath_write_kreg64_port(), */ .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), /* The rcvpktled register controls one of the debug port signals, so * a packet activity LED can be connected to it. */ .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0), .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1), .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0), .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1), .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus), .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0), .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1), .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus) }; static const struct ipath_cregs ipath_pe_cregs = { .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt), .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt), .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt), .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt), .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt), .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt), .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt), .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt), .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt), .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt), .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt), .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt), .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt), .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt), .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt), .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt), .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt), .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt), .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt), .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt), .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt), .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt), .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt), .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt), .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt), .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt), .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt), .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt), .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt), .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt), .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt), .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt), .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt) }; /* kr_intstatus, kr_intclear, kr_intmask bits */ #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL #define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0 #define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL #define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL #define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL #define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL #define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL #define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL #define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL #define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL #define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL #define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL /* kr_extstatus bits */ #define INFINIPATH_EXTS_FREQSEL 0x2 #define INFINIPATH_EXTS_SERDESSEL 0x4 #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 #define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000 #define _IPATH_GPIO_SDA_NUM 1 #define _IPATH_GPIO_SCL_NUM 0 #define IPATH_GPIO_SDA (1ULL << \ (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) #define IPATH_GPIO_SCL (1ULL << \ (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) /* 6120 specific hardware errors... */ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"), /* * In practice, it's unlikely wthat we'll see PCIe PLL, or bus * parity or memory parity error failures, because most likely we * won't be able to talk to the core of the chip. Nonetheless, we * might see them, if they are in parts of the PCIe core that aren't * essential. */ INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"), INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"), INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), }; #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) static int ipath_pe_txe_recover(struct ipath_devdata *); static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *, u32, unsigned long); /** * ipath_pe_handle_hwerrors - display hardware errors. * @dd: the infinipath device * @msg: the output buffer * @msgl: the size of the output buffer * * Use same msg buffer as regular errors to avoid excessive stack * use. Most hardware errors are catastrophic, but for right now, * we'll print them and continue. We reuse the same message buffer as * ipath_handle_errors() to avoid excessive stack usage. */ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, size_t msgl) { ipath_err_t hwerrs; u32 bits, ctrl; int isfatal = 0; char bitsmsg[64]; int log_idx; hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); if (!hwerrs) { /* * better than printing cofusing messages * This seems to be related to clearing the crc error, or * the pll error during init. */ ipath_cdbg(VERBOSE, "Called but no hardware errors set\n"); return; } else if (hwerrs == ~0ULL) { ipath_dev_err(dd, "Read of hardware error status failed " "(all bits set); ignoring\n"); return; } ipath_stats.sps_hwerrs++; /* Always clear the error status register, except MEMBISTFAIL, * regardless of whether we continue or stop using the chip. * We want that set so we know it failed, even across driver reload. * We'll still ignore it in the hwerrmask. We do this partly for * diagnostics, but also for support */ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, hwerrs&~INFINIPATH_HWE_MEMBISTFAILED); hwerrs &= dd->ipath_hwerrmask; /* We log some errors to EEPROM, check if we have any of those. */ for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log) ipath_inc_eeprom_err(dd, log_idx, 1); /* * make sure we get this much out, unless told to be quiet, * or it's occurred within the last 5 seconds */ if ((hwerrs & ~(dd->ipath_lasthwerror | ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) || (ipath_debug & __IPATH_VERBDBG)) dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " "(cleared)\n", (unsigned long long) hwerrs); dd->ipath_lasthwerror |= hwerrs; if (hwerrs & ~dd->ipath_hwe_bitsextant) ipath_dev_err(dd, "hwerror interrupt with unknown errors " "%llx set\n", (unsigned long long) (hwerrs & ~dd->ipath_hwe_bitsextant)); ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); if (ctrl & INFINIPATH_C_FREEZEMODE) { /* * parity errors in send memory are recoverable, * just cancel the send (if indicated in * sendbuffererror), * count the occurrence, unfreeze (if no other handled * hardware error bits are set), and continue. They can * occur if a processor speculative read is done to the PIO * buffer while we are sending a packet, for example. */ if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd)) hwerrs &= ~TXE_PIO_PARITY; if (hwerrs) { /* * if any set that we aren't ignoring only make the * complaint once, in case it's stuck or recurring, * and we get here multiple times * Force link down, so switch knows, and * LEDs are turned off */ if (dd->ipath_flags & IPATH_INITTED) { ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); ipath_setup_pe_setextled(dd, INFINIPATH_IBCS_L_STATE_DOWN, INFINIPATH_IBCS_LT_STATE_DISABLED); ipath_dev_err(dd, "Fatal Hardware Error (freeze " "mode), no longer usable, SN %.16s\n", dd->ipath_serial); isfatal = 1; } /* * Mark as having had an error for driver, and also * for /sys and status word mapped to user programs. * This marks unit as not usable, until reset */ *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; *dd->ipath_statusp |= IPATH_STATUS_HWERROR; dd->ipath_flags &= ~IPATH_INITTED; } else { static u32 freeze_cnt; freeze_cnt++; ipath_dbg("Clearing freezemode on ignored or recovered " "hardware error (%u)\n", freeze_cnt); ipath_clear_freeze(dd); } } *msg = '\0'; if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", msgl); /* ignore from now on, so disable until driver reloaded */ *dd->ipath_statusp |= IPATH_STATUS_HWERROR; dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); } ipath_format_hwerrors(hwerrs, ipath_6120_hwerror_msgs, sizeof(ipath_6120_hwerror_msgs)/ sizeof(ipath_6120_hwerror_msgs[0]), msg, msgl); if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { bits = (u32) ((hwerrs >> INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) & INFINIPATH_HWE_PCIEMEMPARITYERR_MASK); snprintf(bitsmsg, sizeof bitsmsg, "[PCIe Mem Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ INFINIPATH_HWE_COREPLL_RFSLIP ) if (hwerrs & _IPATH_PLL_FAIL) { snprintf(bitsmsg, sizeof bitsmsg, "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _IPATH_PLL_FAIL); strlcat(msg, bitsmsg, msgl); /* ignore from now on, so disable until driver reloaded */ dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); } if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { /* * If it occurs, it is left masked since the eternal * interface is unused */ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); } if (*msg) ipath_dev_err(dd, "%s hardware error\n", msg); if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { /* * for /sys status file ; if no trailing } is copied, we'll * know it was truncated. */ snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, "{%s}", msg); } } /** * ipath_pe_boardname - fill in the board name * @dd: the infinipath device * @name: the output buffer * @namelen: the size of the output buffer * * info is based on the board revision register */ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, size_t namelen) { char *n = NULL; u8 boardrev = dd->ipath_boardrev; int ret; switch (boardrev) { case 0: n = "InfiniPath_Emulation"; break; case 1: n = "InfiniPath_QLE7140-Bringup"; break; case 2: n = "InfiniPath_QLE7140"; break; case 3: n = "InfiniPath_QMI7140"; break; case 4: n = "InfiniPath_QEM7140"; break; case 5: n = "InfiniPath_QMH7140"; break; case 6: n = "InfiniPath_QLE7142"; break; default: ipath_dev_err(dd, "Don't yet know about board with ID %u\n", boardrev); snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", boardrev); break; } if (n) snprintf(name, namelen, "%s", n); if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", dd->ipath_majrev, dd->ipath_minrev); ret = 1; } else { ret = 0; if (dd->ipath_minrev >= 2) dd->ipath_f_put_tid = ipath_pe_put_tid_2; } return ret; } /** * ipath_pe_init_hwerrors - enable hardware errors * @dd: the infinipath device * * now that we have finished initializing everything that might reasonably * cause a hardware error, and cleared those errors bits as they occur, * we can enable hardware errors in the mask (potentially enabling * freeze mode), and enable hardware errors as errors (along with * everything else) in errormask */ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) { ipath_err_t val; u64 extsval; extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) ipath_dev_err(dd, "MemBIST did not complete!\n"); if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND) ipath_dbg("MemBIST corrected\n"); val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ if (!dd->ipath_boardrev) // no PLL for Emulator val &= ~INFINIPATH_HWE_SERDESPLLFAILED; if (dd->ipath_minrev < 2) { /* workaround bug 9460 in internal interface bus parity * checking. Fixed (HW bug 9490) in Rev2. */ val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM; } dd->ipath_hwerrmask = val; } /** * ipath_pe_bringup_serdes - bring up the serdes * @dd: the infinipath device */ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) { u64 val, config1, prev_val; int ret = 0; ipath_dbg("Trying to bringup serdes\n"); if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) & INFINIPATH_HWE_SERDESPLLFAILED) { ipath_dbg("At start, serdes PLL failed bit set " "in hwerrstatus, clearing and continuing\n"); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, INFINIPATH_HWE_SERDESPLLFAILED); } val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, " "xgxsconfig %llx\n", (unsigned long long) val, (unsigned long long) config1, (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); /* * Force reset on, also set rxdetect enable. Must do before reading * serdesstatus at least for simulation, or some of the bits in * serdes status will come back as undefined and cause simulation * failures */ val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_L1PWR_DN; ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); /* be sure chip saw it */ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); udelay(5); /* need pll reset set at least for a bit */ /* * after PLL is reset, set the per-lane Resets and TxIdle and * clear the PLL reset and rxdetect (to get falling edge). * Leave L1PWR bits set (permanently) */ val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_L1PWR_DN); val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE; ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets " "and txidle (%llx)\n", (unsigned long long) val); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); /* be sure chip saw it */ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); /* need PLL reset clear for at least 11 usec before lane * resets cleared; give it a few more to be sure */ udelay(15); val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE); ipath_cdbg(VERBOSE, "Clearing lane resets and txidle " "(writing %llx)\n", (unsigned long long) val); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); /* be sure chip saw it */ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); prev_val = val; if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) & INFINIPATH_XGXS_MDIOADDR_MASK) != 3) { val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK << INFINIPATH_XGXS_MDIOADDR_SHIFT); /* MDIO address 3 */ val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT; } if (val & INFINIPATH_XGXS_RESET) { val &= ~INFINIPATH_XGXS_RESET; } if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { /* need to compensate for Tx inversion in partner */ val &= ~(INFINIPATH_XGXS_RX_POL_MASK << INFINIPATH_XGXS_RX_POL_SHIFT); val |= dd->ipath_rx_pol_inv << INFINIPATH_XGXS_RX_POL_SHIFT; } if (val != prev_val) ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); /* clear current and de-emphasis bits */ config1 &= ~0x0ffffffff00ULL; /* set current to 20ma */ config1 |= 0x00000000000ULL; /* set de-emphasis to -5.68dB */ config1 |= 0x0cccc000000ULL; ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1); ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx " "config1=%llx, sstatus=%llx xgxs=%llx\n", (unsigned long long) val, (unsigned long long) config1, (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus), (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); if (!ipath_waitfor_mdio_cmdready(dd)) { ipath_write_kreg( dd, dd->ipath_kregs->kr_mdio, ipath_mdio_req(IPATH_MDIO_CMD_READ, 31, IPATH_MDIO_CTRL_XGXS_REG_8, 0)); if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio, IPATH_MDIO_DATAVALID, &val)) ipath_dbg("Never got MDIO data for XGXS " "status read\n"); else ipath_cdbg(VERBOSE, "MDIO Read reg8, " "'bank' 31 %x\n", (u32) val); } else ipath_dbg("Never got MDIO cmdready for XGXS status read\n"); return ret; } /** * ipath_pe_quiet_serdes - set serdes to txidle * @dd: the infinipath device * Called when driver is being unloaded */ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) { u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); val |= INFINIPATH_SERDC0_TXIDLE; ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", (unsigned long long) val); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); } static int ipath_pe_intconfig(struct ipath_devdata *dd) { u32 chiprev; /* * If the chip supports added error indication via GPIO pins, * enable interrupts on those bits so the interrupt routine * can count the events. Also set flag so interrupt routine * can know they are expected. */ chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT; if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { /* Rev2+ reports extra errors via internal GPIO pins */ dd->ipath_flags |= IPATH_GPIO_ERRINTRS; dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK; ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); } return 0; } /** * ipath_setup_pe_setextled - set the state of the two external LEDs * @dd: the infinipath device * @lst: the L state * @ltst: the LT state * These LEDs indicate the physical and logical state of IB link. * For this chip (at least with recommended board pinouts), LED1 * is Yellow (logical state) and LED2 is Green (physical state), * * Note: We try to match the Mellanox HCA LED behavior as best * we can. Green indicates physical link state is OK (something is * plugged in, and we can train). * Amber indicates the link is logically up (ACTIVE). * Mellanox further blinks the amber LED to indicate data packet * activity, but we have no hardware support for that, so it would * require waking up every 10-20 msecs and checking the counters * on the chip, and then turning the LED off if appropriate. That's * visible overhead, so not something we will do. * */ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst, u64 ltst) { u64 extctl; unsigned long flags = 0; /* the diags use the LED to indicate diag info, so we leave * the external LED alone when the diags are running */ if (ipath_diag_inuse) return; /* Allow override of LED display for, e.g. Locating system in rack */ if (dd->ipath_led_override) { ltst = (dd->ipath_led_override & IPATH_LED_PHYS) ? INFINIPATH_IBCS_LT_STATE_LINKUP : INFINIPATH_IBCS_LT_STATE_DISABLED; lst = (dd->ipath_led_override & IPATH_LED_LOG) ? INFINIPATH_IBCS_L_STATE_ACTIVE : INFINIPATH_IBCS_L_STATE_DOWN; } spin_lock_irqsave(&dd->ipath_gpio_lock, flags); extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | INFINIPATH_EXTC_LED2PRIPORT_ON); if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP) extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; dd->ipath_extctrl = extctl; ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags); } /** * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff * @dd: the infinipath device * * This is called during driver unload. * We do the pci_disable_msi here, not in generic code, because it * isn't used for the HT chips. If we do end up needing pci_enable_msi * at some point in the future for HT, we'll move the call back * into the main init_one code. */ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) { dd->ipath_msi_lo = 0; /* just in case unload fails */ pci_disable_msi(dd->pcidev); } /** * ipath_setup_pe_config - setup PCIe config related stuff * @dd: the infinipath device * @pdev: the PCI device * * The pci_enable_msi() call will fail on systems with MSI quirks * such as those with AMD8131, even if the device of interest is not * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed * late in 2.6.16). * All that can be done is to edit the kernel source to remove the quirk * check until that is fixed. * We do not need to call enable_msi() for our HyperTransport chip, * even though it uses MSI, and we want to avoid the quirk warning, so * So we call enable_msi only for PCIe. If we do end up needing * pci_enable_msi at some point in the future for HT, we'll move the * call back into the main init_one code. * We save the msi lo and hi values, so we can restore them after * chip reset (the kernel PCI infrastructure doesn't yet handle that * correctly). */ static int ipath_setup_pe_config(struct ipath_devdata *dd, struct pci_dev *pdev) { int pos, ret; dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ ret = pci_enable_msi(dd->pcidev); if (ret) ipath_dev_err(dd, "pci_enable_msi failed: %d, " "interrupts may not work\n", ret); /* continue even if it fails, we may still be OK... */ dd->ipath_irq = pdev->irq; if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { u16 control; pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, &dd->ipath_msi_lo); pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, &dd->ipath_msi_hi); pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); /* now save the data (vector) info */ pci_read_config_word(dd->pcidev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), &dd->ipath_msi_data); ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset " "0x%x, control=0x%x\n", dd->ipath_msi_data, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), control); /* we save the cachelinesize also, although it doesn't * really matter */ pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, &dd->ipath_pci_cacheline); } else ipath_dev_err(dd, "Can't find MSI capability, " "can't save MSI settings for reset\n"); if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) { u16 linkstat; pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA, &linkstat); linkstat >>= 4; linkstat &= 0x1f; if (linkstat != 8) ipath_dev_err(dd, "PCIe width %u, " "performance reduced\n", linkstat); } else ipath_dev_err(dd, "Can't find PCI Express " "capability!\n"); return 0; } static void ipath_init_pe_variables(struct ipath_devdata *dd) { /* * bits for selecting i2c direction and values, * used for I2C serial flash */ dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; dd->ipath_gpio_sda = IPATH_GPIO_SDA; dd->ipath_gpio_scl = IPATH_GPIO_SCL; /* variables for sanity checking interrupt and errors */ dd->ipath_hwe_bitsextant = (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | INFINIPATH_HWE_PCIE1PLLFAILED | INFINIPATH_HWE_PCIE0PLLFAILED | INFINIPATH_HWE_PCIEPOISONEDTLP | INFINIPATH_HWE_PCIECPLTIMEOUT | INFINIPATH_HWE_PCIEBUSPARITYXTLH | INFINIPATH_HWE_PCIEBUSPARITYXADM | INFINIPATH_HWE_PCIEBUSPARITYRADM | INFINIPATH_HWE_MEMBISTFAILED | INFINIPATH_HWE_COREPLL_FBSLIP | INFINIPATH_HWE_COREPLL_RFSLIP | INFINIPATH_HWE_SERDESPLLFAILED | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; dd->ipath_i_bitsextant = (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | (INFINIPATH_I_RCVAVAIL_MASK << INFINIPATH_I_RCVAVAIL_SHIFT) | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; dd->ipath_e_bitsextant = INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP | INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION | INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN | INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK | INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE; dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; /* * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. * 2 is Some Misc, 3 is reserved for future. */ dd->ipath_eep_st_masks[0].hwerrs_to_log = INFINIPATH_HWE_TXEMEMPARITYERR_MASK << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT; /* Ignore errors in PIO/PBC on systems with unordered write-combining */ if (ipath_unordered_wc()) dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY; dd->ipath_eep_st_masks[1].hwerrs_to_log = INFINIPATH_HWE_RXEMEMPARITYERR_MASK << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET; } /* setup the MSI stuff again after a reset. I'd like to just call * pci_enable_msi() and request_irq() again, but when I do that, * the MSI enable bit doesn't get set in the command word, and * we switch to to a different interrupt vector, which is confusing, * so I instead just do it all inline. Perhaps somehow can tie this * into the PCIe hotplug support at some point * Note, because I'm doing it all here, I don't call pci_disable_msi() * or free_irq() at the start of ipath_setup_pe_reset(). */ static int ipath_reinit_msi(struct ipath_devdata *dd) { int pos; u16 control; int ret; if (!dd->ipath_msi_lo) { dev_info(&dd->pcidev->dev, "Can't restore MSI config, " "initial setup failed?\n"); ret = 0; goto bail; } if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { ipath_dev_err(dd, "Can't find MSI capability, " "can't restore MSI settings\n"); ret = 0; goto bail; } ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO); pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, dd->ipath_msi_lo); ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI); pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, dd->ipath_msi_hi); pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); if (!(control & PCI_MSI_FLAGS_ENABLE)) { ipath_cdbg(VERBOSE, "MSI control at off %x was %x, " "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS, control, control | PCI_MSI_FLAGS_ENABLE); control |= PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, control); } /* now rewrite the data (vector) info */ pci_write_config_word(dd->pcidev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), dd->ipath_msi_data); /* we restore the cachelinesize also, although it doesn't really * matter */ pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, dd->ipath_pci_cacheline); /* and now set the pci master bit again */ pci_set_master(dd->pcidev); ret = 1; bail: return ret; } /* This routine sleeps, so it can only be called from user context, not * from interrupt context. If we need interrupt context, we can split * it into two routines. */ static int ipath_setup_pe_reset(struct ipath_devdata *dd) { u64 val; int i; int ret; /* Use ERROR so it shows up in logs, etc. */ ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); /* keep chip from being accessed in a few places */ dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); val = dd->ipath_control | INFINIPATH_C_RESET; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); mb(); for (i = 1; i <= 5; i++) { int r; /* allow MBIST, etc. to complete; longer on each retry. * We sometimes get machine checks from bus timeout if no * response, so for now, make it *really* long. */ msleep(1000 + (1 + i) * 2000); if ((r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->ipath_pcibar0))) ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r); if ((r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->ipath_pcibar1))) ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r); /* now re-enable memory access */ if ((r = pci_enable_device(dd->pcidev))) ipath_dev_err(dd, "pci_enable_device failed after " "reset: %d\n", r); /* whether it worked or not, mark as present, again */ dd->ipath_flags |= IPATH_PRESENT; val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); if (val == dd->ipath_revision) { ipath_cdbg(VERBOSE, "Got matching revision " "register %llx on try %d\n", (unsigned long long) val, i); ret = ipath_reinit_msi(dd); goto bail; } /* Probably getting -1 back */ ipath_dbg("Didn't get expected revision register, " "got %llx, try %d\n", (unsigned long long) val, i + 1); } ret = 0; /* failed */ bail: return ret; } /** * ipath_pe_put_tid - write a TID in chip * @dd: the infinipath device * @tidptr: pointer to the expected TID (in chip) to udpate * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * * This exists as a separate routine to allow for special locking etc. * It's used for both the full cleanup on exit, as well as the normal * setup and teardown. */ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, u32 type, unsigned long pa) { u32 __iomem *tidp32 = (u32 __iomem *)tidptr; unsigned long flags = 0; /* keep gcc quiet */ if (pa != dd->ipath_tidinvalid) { if (pa & ((1U << 11) - 1)) { dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " "not 4KB aligned!\n", pa); return; } pa >>= 11; /* paranoia check */ if (pa & (7<<29)) ipath_dev_err(dd, "BUG: Physical page address 0x%lx " "has bits set in 31-29\n", pa); if (type == RCVHQ_RCV_TYPE_EAGER) pa |= dd->ipath_tidtemplate; else /* for now, always full 4KB page */ pa |= 2 << 29; } /* * Workaround chip bug 9437 by writing the scratch register * before and after the TID, and with an io write barrier. * We use a spinlock around the writes, so they can't intermix * with other TID (eager or expected) writes (the chip bug * is triggered by back to back TID writes). Unfortunately, this * call can be done from interrupt level for the port 0 eager TIDs, * so we have to use irqsave locks. */ spin_lock_irqsave(&dd->ipath_tid_lock, flags); ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf); if (dd->ipath_kregbase) writel(pa, tidp32); ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef); mmiowb(); spin_unlock_irqrestore(&dd->ipath_tid_lock, flags); } /** * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher * @dd: the infinipath device * @tidptr: pointer to the expected TID (in chip) to udpate * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * * This exists as a separate routine to allow for selection of the * appropriate "flavor". The static calls in cleanup just use the * revision-agnostic form, as they are not performance critical. */ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr, u32 type, unsigned long pa) { u32 __iomem *tidp32 = (u32 __iomem *)tidptr; if (pa != dd->ipath_tidinvalid) { if (pa & ((1U << 11) - 1)) { dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " "not 2KB aligned!\n", pa); return; } pa >>= 11; /* paranoia check */ if (pa & (7<<29)) ipath_dev_err(dd, "BUG: Physical page address 0x%lx " "has bits set in 31-29\n", pa); if (type == RCVHQ_RCV_TYPE_EAGER) pa |= dd->ipath_tidtemplate; else /* for now, always full 4KB page */ pa |= 2 << 29; } if (dd->ipath_kregbase) writel(pa, tidp32); mmiowb(); } /** * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager * @dd: the infinipath device * @port: the port * * clear all TID entries for a port, expected and eager. * Used from ipath_close(). On this chip, TIDs are only 32 bits, * not 64, but they are still on 64 bit boundaries, so tidbase * is declared as u64 * for the pointer math, even though we write 32 bits */ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port) { u64 __iomem *tidbase; unsigned long tidinv; int i; if (!dd->ipath_kregbase) return; ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port); tidinv = dd->ipath_tidinvalid; tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvtidbase + port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); for (i = 0; i < dd->ipath_rcvtidcnt; i++) dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, tidinv); tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvegrbase + port * dd->ipath_rcvegrcnt * sizeof(*tidbase)); for (i = 0; i < dd->ipath_rcvegrcnt; i++) dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, tidinv); } /** * ipath_pe_tidtemplate - setup constants for TID updates * @dd: the infinipath device * * We setup stuff that we use a lot, to avoid calculating each time */ static void ipath_pe_tidtemplate(struct ipath_devdata *dd) { u32 egrsize = dd->ipath_rcvegrbufsize; /* For now, we always allocate 4KB buffers (at init) so we can * receive max size packets. We may want a module parameter to * specify 2KB or 4KB and/or make be per port instead of per device * for those who want to reduce memory footprint. Note that the * ipath_rcvhdrentsize size must be large enough to hold the largest * IB header (currently 96 bytes) that we expect to handle (plus of * course the 2 dwords of RHF). */ if (egrsize == 2048) dd->ipath_tidtemplate = 1U << 29; else if (egrsize == 4096) dd->ipath_tidtemplate = 2U << 29; else { egrsize = 4096; dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize " "%u, using %u\n", dd->ipath_rcvegrbufsize, egrsize); dd->ipath_tidtemplate = 2U << 29; } dd->ipath_tidinvalid = 0; } static int ipath_pe_early_init(struct ipath_devdata *dd) { dd->ipath_flags |= IPATH_4BYTE_TID; if (ipath_unordered_wc()) dd->ipath_flags |= IPATH_PIO_FLUSH_WC; /* * For openfabrics, we need to be able to handle an IB header of * 24 dwords. HT chip has arbitrary sized receive buffers, so we * made them the same size as the PIO buffers. This chip does not * handle arbitrary size buffers, so we need the header large enough * to handle largest IB header, but still have room for a 2KB MTU * standard IB packet. */ dd->ipath_rcvhdrentsize = 24; dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; /* * To truly support a 4KB MTU (for usermode), we need to * bump this to a larger value. For now, we use them for * the kernel only. */ dd->ipath_rcvegrbufsize = 2048; /* * the min() check here is currently a nop, but it may not always * be, depending on just how we do ipath_rcvegrbufsize */ dd->ipath_ibmaxlen = min(dd->ipath_piosize2k, dd->ipath_rcvegrbufsize + (dd->ipath_rcvhdrentsize << 2)); dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; /* * We can request a receive interrupt for 1 or * more packets from current offset. For now, we set this * up for a single packet. */ dd->ipath_rhdrhead_intr_off = 1ULL<<32; ipath_get_eeprom_info(dd); return 0; } int __attribute__((weak)) ipath_unordered_wc(void) { return 0; } /** * ipath_init_pe_get_base_info - set chip-specific flags for user code * @pd: the infinipath port * @kbase: ipath_base_info pointer * * We set the PCIE flag because the lower bandwidth on PCIe vs * HyperTransport can affect some user packet algorithms. */ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) { struct ipath_base_info *kinfo = kbase; struct ipath_devdata *dd; if (ipath_unordered_wc()) { kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER; ipath_cdbg(PROC, "Intel processor, forcing WC order\n"); } else ipath_cdbg(PROC, "Not Intel processor, WC ordered\n"); if (pd == NULL) goto done; dd = pd->port_dd; done: kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE | IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED; return 0; } static void ipath_pe_free_irq(struct ipath_devdata *dd) { free_irq(dd->ipath_irq, dd); dd->ipath_irq = 0; } /* * On platforms using this chip, and not having ordered WC stores, we * can get TXE parity errors due to speculative reads to the PIO buffers, * and this, due to a chip bug can result in (many) false parity error * reports. So it's a debug print on those, and an info print on systems * where the speculative reads don't occur. * Because we can get lots of false errors, we have no upper limit * on recovery attempts on those platforms. */ static int ipath_pe_txe_recover(struct ipath_devdata *dd) { if (ipath_unordered_wc()) ipath_dbg("Recovering from TXE PIO parity error\n"); else { int cnt = ++ipath_stats.sps_txeparity; if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) { if (cnt == IPATH_MAX_PARITY_ATTEMPTS) ipath_dev_err(dd, "Too many attempts to recover from " "TXE parity, giving up\n"); return 0; } dev_info(&dd->pcidev->dev, "Recovering from TXE PIO parity error\n"); } return 1; } /** * ipath_init_iba6120_funcs - set up the chip-specific function pointers * @dd: the infinipath device * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ void ipath_init_iba6120_funcs(struct ipath_devdata *dd) { dd->ipath_f_intrsetup = ipath_pe_intconfig; dd->ipath_f_bus = ipath_setup_pe_config; dd->ipath_f_reset = ipath_setup_pe_reset; dd->ipath_f_get_boardname = ipath_pe_boardname; dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors; dd->ipath_f_early_init = ipath_pe_early_init; dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors; dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes; dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes; dd->ipath_f_clear_tids = ipath_pe_clear_tids; /* * this may get changed after we read the chip revision, * but we start with the safe version for all revs */ dd->ipath_f_put_tid = ipath_pe_put_tid; dd->ipath_f_cleanup = ipath_setup_pe_cleanup; dd->ipath_f_setextled = ipath_setup_pe_setextled; dd->ipath_f_get_base_info = ipath_pe_get_base_info; dd->ipath_f_free_irq = ipath_pe_free_irq; /* initialize chip-specific variables */ dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; /* * setup the register offsets, since they are different for each * chip */ dd->ipath_kregs = &ipath_pe_kregs; dd->ipath_cregs = &ipath_pe_cregs; ipath_init_pe_variables(dd); }
gpl-2.0
ptmr3/GalaxyNote2_Kernel
drivers/ata/ahci.c
531
40861
/* * ahci.c - AHCI SATA support * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2004-2005 Red Hat, Inc. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * AHCI hardware documentation: * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/dmi.h> #include <linux/gfp.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> #include "ahci.h" #define DRV_NAME "ahci" #define DRV_VERSION "3.0" enum { AHCI_PCI_BAR = 5, }; enum board_ids { /* board IDs by feature in alphabetical order */ board_ahci, board_ahci_ign_iferr, board_ahci_nosntf, board_ahci_yes_fbs, /* board IDs for specific chipsets in alphabetical order */ board_ahci_mcp65, board_ahci_mcp77, board_ahci_mcp89, board_ahci_mv, board_ahci_sb600, board_ahci_sb700, /* for SB700 and SB800 */ board_ahci_vt8251, /* aliases */ board_ahci_mcp_linux = board_ahci_mcp65, board_ahci_mcp67 = board_ahci_mcp65, board_ahci_mcp73 = board_ahci_mcp65, board_ahci_mcp79 = board_ahci_mcp77, }; static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); #ifdef CONFIG_PM static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int ahci_pci_device_resume(struct pci_dev *pdev); #endif static struct scsi_host_template ahci_sht = { AHCI_SHT("ahci"), }; static struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, .hardreset = ahci_vt8251_hardreset, }; static struct ata_port_operations ahci_p5wdh_ops = { .inherits = &ahci_ops, .hardreset = ahci_p5wdh_hardreset, }; static struct ata_port_operations ahci_sb600_ops = { .inherits = &ahci_ops, .softreset = ahci_sb600_softreset, .pmp_softreset = ahci_sb600_softreset, }; #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) static const struct ata_port_info ahci_port_info[] = { /* by features */ [board_ahci] = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_ign_iferr] = { AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_nosntf] = { AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_yes_fbs] = { AHCI_HFLAGS (AHCI_HFLAG_YES_FBS), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, /* by chipsets */ [board_ahci_mcp65] = { AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ), .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_mcp77] = { AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_mcp89] = { AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_mv] = { AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, [board_ahci_sb600] = { AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | AHCI_HFLAG_32BIT_ONLY), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_sb600_ops, }, [board_ahci_sb700] = /* for SB700 and SB800 */ { AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_sb600_ops, }, [board_ahci_vt8251] = { AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_vt8251_ops, }, }; static const struct pci_device_id ahci_pci_tbl[] = { /* Intel */ { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */ { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, /* ATI */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ /* AMD */ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ /* AMD is using RAID class only for ahci controllers */ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, /* VIA */ { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ /* NVIDIA */ { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */ /* SiS */ { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ /* Marvell */ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ { PCI_DEVICE(0x1b4b, 0x9123), .class = PCI_CLASS_STORAGE_SATA_AHCI, .class_mask = 0xffffff, .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ { PCI_DEVICE(0x1b4b, 0x9125), .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ { PCI_DEVICE(0x1b4b, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(0x1b4b, 0x9192), .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ /* Asmedia */ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, { } /* terminate list */ }; static struct pci_driver ahci_pci_driver = { .name = DRV_NAME, .id_table = ahci_pci_tbl, .probe = ahci_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ahci_pci_device_suspend, .resume = ahci_pci_device_resume, #endif }; #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) static int marvell_enable; #else static int marvell_enable = 1; #endif module_param(marvell_enable, int, 0644); MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); static void ahci_pci_save_initial_config(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { unsigned int force_port_map = 0; unsigned int mask_port_map = 0; if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { dev_info(&pdev->dev, "JMB361 has only one port\n"); force_port_map = 1; } /* * Temporary Marvell 6145 hack: PATA port presence * is asserted through the standard AHCI port * presence register, as bit 4 (counting from 0) */ if (hpriv->flags & AHCI_HFLAG_MV_PATA) { if (pdev->device == 0x6121) mask_port_map = 0x3; else mask_port_map = 0xf; dev_info(&pdev->dev, "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); } ahci_save_initial_config(&pdev->dev, hpriv, force_port_map, mask_port_map); } static int ahci_pci_reset_controller(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); ahci_reset_controller(host); if (pdev->vendor == PCI_VENDOR_ID_INTEL) { struct ahci_host_priv *hpriv = host->private_data; u16 tmp16; /* configure PCS */ pci_read_config_word(pdev, 0x92, &tmp16); if ((tmp16 & hpriv->port_map) != hpriv->port_map) { tmp16 |= hpriv->port_map; pci_write_config_word(pdev, 0x92, tmp16); } } return 0; } static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; struct pci_dev *pdev = to_pci_dev(host->dev); void __iomem *port_mmio; u32 tmp; int mv; if (hpriv->flags & AHCI_HFLAG_MV_PATA) { if (pdev->device == 0x6121) mv = 2; else mv = 4; port_mmio = __ahci_port_base(host, mv); writel(0, port_mmio + PORT_IRQ_MASK); /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); } ahci_init_controller(host); } static int ahci_sb600_check_ready(struct ata_link *link) { void __iomem *port_mmio = ahci_port_base(link->ap); u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); /* * There is no need to check TFDATA if BAD PMP is found due to HW bug, * which can save timeout delay. */ if (irq_status & PORT_IRQ_BAD_PMP) return -EIO; return ata_check_ready(status); } static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; void __iomem *port_mmio = ahci_port_base(ap); int pmp = sata_srst_pmp(link); int rc; u32 irq_sts; DPRINTK("ENTER\n"); rc = ahci_do_softreset(link, class, pmp, deadline, ahci_sb600_check_ready); /* * Soft reset fails on some ATI chips with IPMS set when PMP * is enabled but SATA HDD/ODD is connected to SATA port, * do soft reset again to port 0. */ if (rc == -EIO) { irq_sts = readl(port_mmio + PORT_IRQ_STAT); if (irq_sts & PORT_IRQ_BAD_PMP) { ata_link_printk(link, KERN_WARNING, "applying SB600 PMP SRST workaround " "and retrying\n"); rc = ahci_do_softreset(link, class, 0, deadline, ahci_check_ready); } } return rc; } static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; bool online; int rc; DPRINTK("ENTER\n"); ahci_stop_engine(ap); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); ahci_start_engine(ap); DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); /* vt8251 doesn't clear BSY on signature FIS reception, * request follow-up softreset. */ return online ? -EAGAIN : rc; } static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; int rc; ahci_stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); tf.command = 0x80; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); ahci_start_engine(ap); /* The pseudo configuration device on SIMG4726 attached to * ASUS P5W-DH Deluxe doesn't send signature FIS after * hardreset if no device is attached to the first downstream * port && the pseudo device locks up on SRST w/ PMP==0. To * work around this, wait for !BSY only briefly. If BSY isn't * cleared, perform CLO and proceed to IDENTIFY (achieved by * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). * * Wait for two seconds. Devices attached to downstream port * which can't process the following IDENTIFY after this will * have to be reset again. For most cases, this should * suffice while making probing snappish enough. */ if (online) { rc = ata_wait_after_reset(link, jiffies + 2 * HZ, ahci_check_ready); if (rc) ahci_kick_engine(ap); } return rc; } #ifdef CONFIG_PM static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; if (mesg.event & PM_EVENT_SUSPEND && hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_printk(KERN_ERR, &pdev->dev, "BIOS update required for suspend/resume\n"); return -EIO; } if (mesg.event & PM_EVENT_SLEEP) { /* AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a * transition of the HBA to D3 state. */ ctl = readl(mmio + HOST_CTL); ctl &= ~HOST_IRQ_EN; writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ } return ata_pci_device_suspend(pdev, mesg); } static int ahci_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { rc = ahci_pci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); } ata_host_resume(host); return 0; } #endif static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) { int rc; if (using_dac && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return 0; } static void ahci_pci_print_info(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); u16 cc; const char *scc_s; pci_read_config_word(pdev, 0x0a, &cc); if (cc == PCI_CLASS_STORAGE_IDE) scc_s = "IDE"; else if (cc == PCI_CLASS_STORAGE_SATA) scc_s = "SATA"; else if (cc == PCI_CLASS_STORAGE_RAID) scc_s = "RAID"; else scc_s = "unknown"; ahci_print_info(host, scc_s); } /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't * support PMP and the 4726 either directly exports the device * attached to the first downstream port or acts as a hardware storage * controller and emulate a single ATA device (can be RAID 0/1 or some * other configuration). * * When there's no device attached to the first downstream port of the * 4726, "Config Disk" appears, which is a pseudo ATA device to * configure the 4726. However, ATA emulation of the device is very * lame. It doesn't send signature D2H Reg FIS after the initial * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. * * The following function works around the problem by always using * hardreset on the port and not depending on receiving signature FIS * afterward. If signature FIS isn't received soon, ATA class is * assumed without follow-up softreset. */ static void ahci_p5wdh_workaround(struct ata_host *host) { static struct dmi_system_id sysids[] = { { .ident = "P5W DH Deluxe", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"), DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), }, }, { } }; struct pci_dev *pdev = to_pci_dev(host->dev); if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && dmi_check_system(sysids)) { struct ata_port *ap = host->ports[1]; dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " "Deluxe on-board SIMG4726 workaround\n"); ap->ops = &ahci_p5wdh_ops; ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; } } /* only some SB600 ahci controllers can do 64bit DMA */ static bool ahci_sb600_enable_64bit(struct pci_dev *pdev) { static const struct dmi_system_id sysids[] = { /* * The oldest version known to be broken is 0901 and * working is 1501 which was released on 2007-10-26. * Enable 64bit DMA on 1501 and anything newer. * * Please read bko#9412 for more info. */ { .ident = "ASUS M2A-VM", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), }, .driver_data = "20071026", /* yyyymmdd */ }, /* * All BIOS versions for the MSI K9A2 Platinum (MS-7376) * support 64bit DMA. * * BIOS versions earlier than 1.5 had the Manufacturer DMI * fields as "MICRO-STAR INTERANTIONAL CO.,LTD". * This spelling mistake was fixed in BIOS version 1.5, so * 1.5 and later have the Manufacturer as * "MICRO-STAR INTERNATIONAL CO.,LTD". * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER". * * BIOS versions earlier than 1.9 had a Board Product Name * DMI field of "MS-7376". This was changed to be * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still * match on DMI_BOARD_NAME of "MS-7376". */ { .ident = "MSI K9A2 Platinum", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTER"), DMI_MATCH(DMI_BOARD_NAME, "MS-7376"), }, }, /* * All BIOS versions for the Asus M3A support 64bit DMA. * (all release versions from 0301 to 1206 were tested) */ { .ident = "ASUS M3A", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "M3A"), }, }, { } }; const struct dmi_system_id *match; int year, month, date; char buf[9]; match = dmi_first_match(sysids); if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || !match) return false; if (!match->driver_data) goto enable_64bit; dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); if (strcmp(buf, match->driver_data) >= 0) goto enable_64bit; else { dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, " "forcing 32bit DMA, update BIOS\n", match->ident); return false; } enable_64bit: dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n", match->ident); return true; } static bool ahci_broken_system_poweroff(struct pci_dev *pdev) { static const struct dmi_system_id broken_systems[] = { { .ident = "HP Compaq nx6310", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"), }, /* PCI slot number of the controller */ .driver_data = (void *)0x1FUL, }, { .ident = "HP Compaq 6720s", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"), }, /* PCI slot number of the controller */ .driver_data = (void *)0x1FUL, }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(broken_systems); if (dmi) { unsigned long slot = (unsigned long)dmi->driver_data; /* apply the quirk only to on-board controllers */ return slot == PCI_SLOT(pdev->devfn); } return false; } static bool ahci_broken_suspend(struct pci_dev *pdev) { static const struct dmi_system_id sysids[] = { /* * On HP dv[4-6] and HDX18 with earlier BIOSen, link * to the harddisk doesn't become online after * resuming from STR. Warn and fail suspend. * * http://bugzilla.kernel.org/show_bug.cgi?id=12276 * * Use dates instead of versions to match as HP is * apparently recycling both product and version * strings. * * http://bugzilla.kernel.org/show_bug.cgi?id=15462 */ { .ident = "dv4", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, .driver_data = "20090105", /* F.30 */ }, { .ident = "dv5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv5 Notebook PC"), }, .driver_data = "20090506", /* F.16 */ }, { .ident = "dv6", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"), }, .driver_data = "20090423", /* F.21 */ }, { .ident = "HDX18", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP HDX18 Notebook PC"), }, .driver_data = "20090430", /* F.23 */ }, /* * Acer eMachines G725 has the same problem. BIOS * V1.03 is known to be broken. V3.04 is known to * work. Between, there are V1.06, V2.06 and V3.03 * that we don't have much idea about. For now, * blacklist anything older than V3.04. * * http://bugzilla.kernel.org/show_bug.cgi?id=15104 */ { .ident = "G725", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), }, .driver_data = "20091216", /* V3.04 */ }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); int year, month, date; char buf[9]; if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) return false; dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); return strcmp(buf, dmi->driver_data) < 0; } static bool ahci_broken_online(struct pci_dev *pdev) { #define ENCODE_BUSDEVFN(bus, slot, func) \ (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) static const struct dmi_system_id sysids[] = { /* * There are several gigabyte boards which use * SIMG5723s configured as hardware RAID. Certain * 5723 firmware revisions shipped there keep the link * online but fail to answer properly to SRST or * IDENTIFY when no device is attached downstream * causing libata to retry quite a few times leading * to excessive detection delay. * * As these firmwares respond to the second reset try * with invalid device signature, considering unknown * sig as offline works around the problem acceptably. */ { .ident = "EP45-DQ6", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), }, .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), }, { .ident = "EP45-DS5", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), }, .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), }, { } /* terminate list */ }; #undef ENCODE_BUSDEVFN const struct dmi_system_id *dmi = dmi_first_match(sysids); unsigned int val; if (!dmi) return false; val = (unsigned long)dmi->driver_data; return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); } #ifdef CONFIG_ATA_ACPI static void ahci_gtf_filter_workaround(struct ata_host *host) { static const struct dmi_system_id sysids[] = { /* * Aspire 3810T issues a bunch of SATA enable commands * via _GTF including an invalid one and one which is * rejected by the device. Among the successful ones * is FPDMA non-zero offset enable which when enabled * only on the drive side leads to NCQ command * failures. Filter it out. */ { .ident = "Aspire 3810T", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"), }, .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET, }, { } }; const struct dmi_system_id *dmi = dmi_first_match(sysids); unsigned int filter; int i; if (!dmi) return; filter = (unsigned long)dmi->driver_data; dev_printk(KERN_INFO, host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n", filter, dmi->ident); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct ata_link *link; struct ata_device *dev; ata_for_each_link(link, ap, EDGE) ata_for_each_dev(dev, link, ALL) dev->gtf_filter |= filter; } } #else static inline void ahci_gtf_filter_workaround(struct ata_host *host) {} #endif static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; unsigned int board_id = ent->driver_data; struct ata_port_info pi = ahci_port_info[board_id]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; struct ata_host *host; int n_ports, i, rc; VPRINTK("ENTER\n"); WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); /* The AHCI driver can only drive the SATA ports, the PATA driver can drive them all so if both drivers are selected make sure AHCI stays out of the way */ if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) return -ENODEV; /* * For some reason, MCP89 on MacBook 7,1 doesn't work with * ahci, use ata_generic instead. */ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && pdev->subsystem_device == 0xcb89) return -ENODEV; /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. * At the moment, we can only use the AHCI mode. Let the users know * that for SAS drives they're out of luck. */ if (pdev->vendor == PCI_VENDOR_ID_PROMISE) dev_printk(KERN_INFO, &pdev->dev, "PDC42819 " "can only drive SATA devices with this driver\n"); /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) return rc; /* AHCI controllers often implement SFF compatible interface. * Grab all PCI BARs just in case. */ rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == 0x2652 || pdev->device == 0x2653)) { u8 map; /* ICH6s share the same PCI ID for both piix and ahci * modes. Enabling ahci mode while MAP indicates * combined mode is a bad idea. Yield to ata_piix. */ pci_read_config_byte(pdev, ICH_MAP, &map); if (map & 0x3) { dev_printk(KERN_INFO, &pdev->dev, "controller is in " "combined mode, can't enable AHCI mode\n"); return -ENODEV; } } hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; hpriv->flags |= (unsigned long)pi.private_data; /* MCP65 revision A1 and A2 can't do MSI */ if (board_id == board_ahci_mcp65 && (pdev->revision == 0xa1 || pdev->revision == 0xa2)) hpriv->flags |= AHCI_HFLAG_NO_MSI; /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; /* only some SB600s can do 64bit DMA */ if (ahci_sb600_enable_64bit(pdev)) hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) pci_intx(pdev, 1); hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; /* * Auto-activate optimization is supposed to be * supported on all AHCI controllers indicating NCQ * capability, but it seems to be broken on some * chipsets including NVIDIAs. */ if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA)) pi.flags |= ATA_FLAG_FPDMA_AA; } if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; ahci_set_em_messages(hpriv, &pi); if (ahci_broken_system_poweroff(pdev)) { pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; dev_info(&pdev->dev, "quirky BIOS, skipping spindown on poweroff\n"); } if (ahci_broken_suspend(pdev)) { hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; dev_printk(KERN_WARNING, &pdev->dev, "BIOS update required for suspend/resume\n"); } if (ahci_broken_online(pdev)) { hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; dev_info(&pdev->dev, "online status unreliable, applying workaround\n"); } /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); if (!host) return -ENOMEM; host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; else printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); ata_port_pbar_desc(ap, AHCI_PCI_BAR, 0x100 + ap->port_no * 0x80, "port"); /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } /* apply workaround for ASUS P5W DH Deluxe mainboard */ ahci_p5wdh_workaround(host); /* apply gtf filter quirk */ ahci_gtf_filter_workaround(host); /* initialize adapter */ rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); if (rc) return rc; rc = ahci_pci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); ahci_pci_print_info(host); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, &ahci_sht); } static int __init ahci_init(void) { return pci_register_driver(&ahci_pci_driver); } static void __exit ahci_exit(void) { pci_unregister_driver(&ahci_pci_driver); } MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("AHCI SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); MODULE_VERSION(DRV_VERSION); module_init(ahci_init); module_exit(ahci_exit);
gpl-2.0
nxnfufunezn/linux
drivers/net/wireless/ti/wl12xx/scan.c
1043
13424
/* * This file is part of wl12xx * * Copyright (C) 2012 Texas Instruments. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/ieee80211.h> #include "scan.h" #include "../wlcore/debug.h" #include "../wlcore/tx.h" static int wl1271_get_scan_channels(struct wl1271 *wl, struct cfg80211_scan_request *req, struct basic_scan_channel_params *channels, enum ieee80211_band band, bool passive) { struct conf_scan_settings *c = &wl->conf.scan; int i, j; u32 flags; for (i = 0, j = 0; i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS; i++) { flags = req->channels[i]->flags; if (!test_bit(i, wl->scan.scanned_ch) && !(flags & IEEE80211_CHAN_DISABLED) && (req->channels[i]->band == band) && /* * In passive scans, we scan all remaining * channels, even if not marked as such. * In active scans, we only scan channels not * marked as passive. */ (passive || !(flags & IEEE80211_CHAN_NO_IR))) { wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", req->channels[i]->band, req->channels[i]->center_freq); wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X", req->channels[i]->hw_value, req->channels[i]->flags); wl1271_debug(DEBUG_SCAN, "max_antenna_gain %d, max_power %d", req->channels[i]->max_antenna_gain, req->channels[i]->max_power); wl1271_debug(DEBUG_SCAN, "beacon_found %d", req->channels[i]->beacon_found); if (!passive) { channels[j].min_duration = cpu_to_le32(c->min_dwell_time_active); channels[j].max_duration = cpu_to_le32(c->max_dwell_time_active); } else { channels[j].min_duration = cpu_to_le32(c->dwell_time_passive); channels[j].max_duration = cpu_to_le32(c->dwell_time_passive); } channels[j].early_termination = 0; channels[j].tx_power_att = req->channels[i]->max_power; channels[j].channel = req->channels[i]->hw_value; memset(&channels[j].bssid_lsb, 0xff, 4); memset(&channels[j].bssid_msb, 0xff, 2); /* Mark the channels we already used */ set_bit(i, wl->scan.scanned_ch); j++; } } return j; } #define WL1271_NOTHING_TO_SCAN 1 static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum ieee80211_band band, bool passive, u32 basic_rate) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); struct wl1271_cmd_scan *cmd; struct wl1271_cmd_trigger_scan_to *trigger; int ret; u16 scan_options = 0; /* skip active scans if we don't have SSIDs */ if (!passive && wl->scan.req->n_ssids == 0) return WL1271_NOTHING_TO_SCAN; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); if (!cmd || !trigger) { ret = -ENOMEM; goto out; } if (wl->conf.scan.split_scan_timeout) scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN; if (passive) scan_options |= WL1271_SCAN_OPT_PASSIVE; cmd->params.role_id = wlvif->role_id; if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; goto out; } cmd->params.scan_options = cpu_to_le16(scan_options); cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, cmd->channels, band, passive); if (cmd->params.n_ch == 0) { ret = WL1271_NOTHING_TO_SCAN; goto out; } cmd->params.tx_rate = cpu_to_le32(basic_rate); cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; cmd->params.tid_trigger = CONF_TX_AC_ANY_TID; cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; if (band == IEEE80211_BAND_2GHZ) cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ; else cmd->params.band = WL1271_SCAN_BAND_5_GHZ; if (wl->scan.ssid_len && wl->scan.ssid) { cmd->params.ssid_len = wl->scan.ssid_len; memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); } memcpy(cmd->addr, vif->addr, ETH_ALEN); ret = wl12xx_cmd_build_probe_req(wl, wlvif, cmd->params.role_id, band, wl->scan.ssid, wl->scan.ssid_len, wl->scan.req->ie, wl->scan.req->ie_len, NULL, 0, false); if (ret < 0) { wl1271_error("PROBE request template failed"); goto out; } trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout); ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, sizeof(*trigger), 0); if (ret < 0) { wl1271_error("trigger scan to failed for hw scan"); goto out; } wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd)); ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_error("SCAN failed"); goto out; } out: kfree(cmd); kfree(trigger); return ret; } int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_cmd_header *cmd = NULL; int ret = 0; if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE)) return -EINVAL; wl1271_debug(DEBUG_CMD, "cmd scan stop"); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { ret = -ENOMEM; goto out; } ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_error("cmd stop_scan failed"); goto out; } out: kfree(cmd); return ret; } void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; enum ieee80211_band band; u32 rate, mask; switch (wl->scan.state) { case WL1271_SCAN_STATE_IDLE: break; case WL1271_SCAN_STATE_2GHZ_ACTIVE: band = IEEE80211_BAND_2GHZ; mask = wlvif->bitrate_masks[band]; if (wl->scan.req->no_cck) { mask &= ~CONF_TX_CCK_RATES; if (!mask) mask = CONF_TX_RATE_MASK_BASIC_P2P; } rate = wl1271_tx_min_rate_get(wl, mask); ret = wl1271_scan_send(wl, wlvif, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; wl1271_scan_stm(wl, wlvif); } break; case WL1271_SCAN_STATE_2GHZ_PASSIVE: band = IEEE80211_BAND_2GHZ; mask = wlvif->bitrate_masks[band]; if (wl->scan.req->no_cck) { mask &= ~CONF_TX_CCK_RATES; if (!mask) mask = CONF_TX_RATE_MASK_BASIC_P2P; } rate = wl1271_tx_min_rate_get(wl, mask); ret = wl1271_scan_send(wl, wlvif, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { if (wl->enable_11a) wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; else wl->scan.state = WL1271_SCAN_STATE_DONE; wl1271_scan_stm(wl, wlvif); } break; case WL1271_SCAN_STATE_5GHZ_ACTIVE: band = IEEE80211_BAND_5GHZ; rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); ret = wl1271_scan_send(wl, wlvif, band, false, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; wl1271_scan_stm(wl, wlvif); } break; case WL1271_SCAN_STATE_5GHZ_PASSIVE: band = IEEE80211_BAND_5GHZ; rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); ret = wl1271_scan_send(wl, wlvif, band, true, rate); if (ret == WL1271_NOTHING_TO_SCAN) { wl->scan.state = WL1271_SCAN_STATE_DONE; wl1271_scan_stm(wl, wlvif); } break; case WL1271_SCAN_STATE_DONE: wl->scan.failed = false; cancel_delayed_work(&wl->scan_complete_work); ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, msecs_to_jiffies(0)); break; default: wl1271_error("invalid scan state"); break; } if (ret < 0) { cancel_delayed_work(&wl->scan_complete_work); ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, msecs_to_jiffies(0)); } } static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd, struct wlcore_scan_channels *cmd_channels) { memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive)); memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active)); cmd->dfs = cmd_channels->dfs; cmd->n_pactive_ch = cmd_channels->passive_active; memcpy(cmd->channels_2, cmd_channels->channels_2, sizeof(cmd->channels_2)); memcpy(cmd->channels_5, cmd_channels->channels_5, sizeof(cmd->channels_5)); /* channels_4 are not supported, so no need to copy them */ } int wl1271_scan_sched_scan_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { struct wl1271_cmd_sched_scan_config *cfg = NULL; struct wlcore_scan_channels *cfg_channels = NULL; struct conf_sched_scan_settings *c = &wl->conf.sched_scan; int i, ret; bool force_passive = !req->n_ssids; wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->role_id = wlvif->role_id; cfg->rssi_threshold = c->rssi_threshold; cfg->snr_threshold = c->snr_threshold; cfg->n_probe_reqs = c->num_probe_reqs; /* cycles set to 0 it means infinite (until manually stopped) */ cfg->cycles = 0; /* report APs when at least 1 is found */ cfg->report_after = 1; /* don't stop scanning automatically when something is found */ cfg->terminate = 0; cfg->tag = WL1271_SCAN_DEFAULT_TAG; /* don't filter on BSS type */ cfg->bss_type = SCAN_BSS_TYPE_ANY; /* currently NL80211 supports only a single interval */ for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) cfg->intervals[i] = cpu_to_le32(req->interval); cfg->ssid_len = 0; ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req); if (ret < 0) goto out; cfg->filter_type = ret; wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type); cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL); if (!cfg_channels) { ret = -ENOMEM; goto out; } if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels, req->n_channels, req->n_ssids, SCAN_TYPE_PERIODIC)) { wl1271_error("scan channel list is empty"); ret = -EINVAL; goto out; } wl12xx_adjust_channels(cfg, cfg_channels); if (!force_passive && cfg->active[0]) { u8 band = IEEE80211_BAND_2GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, wlvif->role_id, band, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ies[band], ies->len[band], ies->common_ies, ies->common_ie_len, true); if (ret < 0) { wl1271_error("2.4GHz PROBE request template failed"); goto out; } } if (!force_passive && cfg->active[1]) { u8 band = IEEE80211_BAND_5GHZ; ret = wl12xx_cmd_build_probe_req(wl, wlvif, wlvif->role_id, band, req->ssids[0].ssid, req->ssids[0].ssid_len, ies->ies[band], ies->len[band], ies->common_ies, ies->common_ie_len, true); if (ret < 0) { wl1271_error("5GHz PROBE request template failed"); goto out; } } wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg)); ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg, sizeof(*cfg), 0); if (ret < 0) { wl1271_error("SCAN configuration failed"); goto out; } out: kfree(cfg_channels); kfree(cfg); return ret; } int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_cmd_sched_scan_start *start; int ret = 0; wl1271_debug(DEBUG_CMD, "cmd periodic scan start"); if (wlvif->bss_type != BSS_TYPE_STA_BSS) return -EOPNOTSUPP; if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) && test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) return -EBUSY; start = kzalloc(sizeof(*start), GFP_KERNEL); if (!start) return -ENOMEM; start->role_id = wlvif->role_id; start->tag = WL1271_SCAN_DEFAULT_TAG; ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, sizeof(*start), 0); if (ret < 0) { wl1271_error("failed to send scan start command"); goto out_free; } out_free: kfree(start); return ret; } int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { int ret; ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); if (ret < 0) return ret; return wl1271_scan_sched_scan_start(wl, wlvif); } void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_cmd_sched_scan_stop *stop; int ret = 0; wl1271_debug(DEBUG_CMD, "cmd periodic scan stop"); /* FIXME: what to do if alloc'ing to stop fails? */ stop = kzalloc(sizeof(*stop), GFP_KERNEL); if (!stop) { wl1271_error("failed to alloc memory to send sched scan stop"); return; } stop->role_id = wlvif->role_id; stop->tag = WL1271_SCAN_DEFAULT_TAG; ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, sizeof(*stop), 0); if (ret < 0) { wl1271_error("failed to send sched scan stop command"); goto out_free; } out_free: kfree(stop); } int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_scan_request *req) { wl1271_scan_stm(wl, wlvif); return 0; } void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif) { wl1271_scan_stm(wl, wlvif); }
gpl-2.0