repo_name
string
path
string
copies
string
size
string
content
string
license
string
OSLL/greenmesh
drivers/media/video/gspca/stv0680.c
162
10489
/* * STV0680 USB Camera Driver * * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com> * * This module is adapted from the in kernel v4l1 stv680 driver: * * STV0680 USB Camera Driver, by Kevin Sisson (kjsisson@bellsouth.net) * * Thanks to STMicroelectronics for information on the usb commands, and * to Steve Miller at STM for his help and encouragement while I was * writing this driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stv0680" #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("STV0680 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_pix_format mode; u8 orig_mode; u8 video_mode; u8 current_mode; }; /* V4L2 controls supported by the driver */ static const struct ctrl sd_ctrls[] = { }; static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val, int size) { int ret = -1; u8 req_type = 0; unsigned int pipe = 0; switch (set) { case 0: /* 0xc1 */ req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); break; case 1: /* 0x41 */ req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; pipe = usb_sndctrlpipe(gspca_dev->dev, 0); break; case 2: /* 0x80 */ req_type = USB_DIR_IN | USB_RECIP_DEVICE; pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); break; case 3: /* 0x40 */ req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; pipe = usb_sndctrlpipe(gspca_dev->dev, 0); break; } ret = usb_control_msg(gspca_dev->dev, pipe, req, req_type, val, 0, gspca_dev->usb_buf, size, 500); if ((ret < 0) && (req != 0x0a)) pr_err("usb_control_msg error %i, request = 0x%x, error = %i\n", set, req, ret); return ret; } static int stv0680_handle_error(struct gspca_dev *gspca_dev, int ret) { stv_sndctrl(gspca_dev, 0, 0x80, 0, 0x02); /* Get Last Error */ PDEBUG(D_ERR, "last error: %i, command = 0x%x", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); return ret; } static int stv0680_get_video_mode(struct gspca_dev *gspca_dev) { /* Note not sure if this init of usb_buf is really necessary */ memset(gspca_dev->usb_buf, 0, 8); gspca_dev->usb_buf[0] = 0x0f; if (stv_sndctrl(gspca_dev, 0, 0x87, 0, 0x08) != 0x08) { PDEBUG(D_ERR, "Get_Camera_Mode failed"); return stv0680_handle_error(gspca_dev, -EIO); } return gspca_dev->usb_buf[0]; /* 01 = VGA, 03 = QVGA, 00 = CIF */ } static int stv0680_set_video_mode(struct gspca_dev *gspca_dev, u8 mode) { struct sd *sd = (struct sd *) gspca_dev; if (sd->current_mode == mode) return 0; memset(gspca_dev->usb_buf, 0, 8); gspca_dev->usb_buf[0] = mode; if (stv_sndctrl(gspca_dev, 3, 0x07, 0x0100, 0x08) != 0x08) { PDEBUG(D_ERR, "Set_Camera_Mode failed"); return stv0680_handle_error(gspca_dev, -EIO); } /* Verify we got what we've asked for */ if (stv0680_get_video_mode(gspca_dev) != mode) { PDEBUG(D_ERR, "Error setting camera video mode!"); return -EIO; } sd->current_mode = mode; return 0; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { int ret; struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; /* Give the camera some time to settle, otherwise initalization will fail on hotplug, and yes it really needs a full second. */ msleep(1000); /* ping camera to be sure STV0680 is present */ if (stv_sndctrl(gspca_dev, 0, 0x88, 0x5678, 0x02) != 0x02 || gspca_dev->usb_buf[0] != 0x56 || gspca_dev->usb_buf[1] != 0x78) { PDEBUG(D_ERR, "STV(e): camera ping failed!!"); return stv0680_handle_error(gspca_dev, -ENODEV); } /* get camera descriptor */ if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x09) != 0x09) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x22) != 0x22 || gspca_dev->usb_buf[7] != 0xa0 || gspca_dev->usb_buf[8] != 0x23) { PDEBUG(D_ERR, "Could not get descriptor 0200."); return stv0680_handle_error(gspca_dev, -ENODEV); } if (stv_sndctrl(gspca_dev, 0, 0x8a, 0, 0x02) != 0x02) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 0, 0x8b, 0, 0x24) != 0x24) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -ENODEV); if (!(gspca_dev->usb_buf[7] & 0x09)) { PDEBUG(D_ERR, "Camera supports neither CIF nor QVGA mode"); return -ENODEV; } if (gspca_dev->usb_buf[7] & 0x01) PDEBUG(D_PROBE, "Camera supports CIF mode"); if (gspca_dev->usb_buf[7] & 0x02) PDEBUG(D_PROBE, "Camera supports VGA mode"); if (gspca_dev->usb_buf[7] & 0x04) PDEBUG(D_PROBE, "Camera supports QCIF mode"); if (gspca_dev->usb_buf[7] & 0x08) PDEBUG(D_PROBE, "Camera supports QVGA mode"); if (gspca_dev->usb_buf[7] & 0x01) sd->video_mode = 0x00; /* CIF */ else sd->video_mode = 0x03; /* QVGA */ /* FW rev, ASIC rev, sensor ID */ PDEBUG(D_PROBE, "Firmware rev is %i.%i", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); PDEBUG(D_PROBE, "ASIC rev is %i.%i", gspca_dev->usb_buf[2], gspca_dev->usb_buf[3]); PDEBUG(D_PROBE, "Sensor ID is %i", (gspca_dev->usb_buf[4]*16) + (gspca_dev->usb_buf[5]>>4)); ret = stv0680_get_video_mode(gspca_dev); if (ret < 0) return ret; sd->current_mode = sd->orig_mode = ret; ret = stv0680_set_video_mode(gspca_dev, sd->video_mode); if (ret < 0) return ret; /* Get mode details */ if (stv_sndctrl(gspca_dev, 0, 0x8f, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -EIO); cam->bulk = 1; cam->bulk_nurbs = 1; /* The cam cannot handle more */ cam->bulk_size = (gspca_dev->usb_buf[0] << 24) | (gspca_dev->usb_buf[1] << 16) | (gspca_dev->usb_buf[2] << 8) | (gspca_dev->usb_buf[3]); sd->mode.width = (gspca_dev->usb_buf[4] << 8) | (gspca_dev->usb_buf[5]); /* 322, 356, 644 */ sd->mode.height = (gspca_dev->usb_buf[6] << 8) | (gspca_dev->usb_buf[7]); /* 242, 292, 484 */ sd->mode.pixelformat = V4L2_PIX_FMT_STV0680; sd->mode.field = V4L2_FIELD_NONE; sd->mode.bytesperline = sd->mode.width; sd->mode.sizeimage = cam->bulk_size; sd->mode.colorspace = V4L2_COLORSPACE_SRGB; /* origGain = gspca_dev->usb_buf[12]; */ cam->cam_mode = &sd->mode; cam->nmodes = 1; ret = stv0680_set_video_mode(gspca_dev, sd->orig_mode); if (ret < 0) return ret; if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 || gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) { pr_err("Could not get descriptor 0100\n"); return stv0680_handle_error(gspca_dev, -EIO); } return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { int ret; struct sd *sd = (struct sd *) gspca_dev; ret = stv0680_set_video_mode(gspca_dev, sd->video_mode); if (ret < 0) return ret; if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -EIO); /* Start stream at: 0x0000 = CIF (352x288) 0x0100 = VGA (640x480) 0x0300 = QVGA (320x240) */ if (stv_sndctrl(gspca_dev, 1, 0x09, sd->video_mode << 8, 0x0) != 0x0) return stv0680_handle_error(gspca_dev, -EIO); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* This is a high priority command; it stops all lower order cmds */ if (stv_sndctrl(gspca_dev, 1, 0x04, 0x0000, 0x0) != 0x0) stv0680_handle_error(gspca_dev, -EIO); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->gspca_dev.present) return; stv0680_set_video_mode(gspca_dev, sd->orig_mode); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* Every now and then the camera sends a 16 byte packet, no idea what it contains, but it is not image data, when this happens the frame received before this packet is corrupt, so discard it. */ if (len != sd->mode.sizeimage) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* Finish the previous frame, we do this upon reception of the next packet, even though it is already complete so that the strange 16 byte packets send after a corrupt frame can discard it. */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); /* Store the just received frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0553, 0x0202)}, {USB_DEVICE(0x041e, 0x4007)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { return usb_register(&sd_driver); } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
xaxaxa/linux-3.13-socfpga-vserver-aufs
drivers/gpu/drm/radeon/btc_dpm.c
162
87364
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include "drmP.h" #include "radeon.h" #include "btcd.h" #include "r600_dpm.h" #include "cypress_dpm.h" #include "btc_dpm.h" #include "atom.h" #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b #define MC_CG_ARB_FREQ_F2 0x0c #define MC_CG_ARB_FREQ_F3 0x0d #define MC_CG_SEQ_DRAMCONF_S0 0x05 #define MC_CG_SEQ_DRAMCONF_S1 0x06 #define MC_CG_SEQ_YCLK_SUSPEND 0x04 #define MC_CG_SEQ_YCLK_RESUME 0x0a #define SMC_RAM_END 0x8000 #ifndef BTC_MGCG_SEQUENCE #define BTC_MGCG_SEQUENCE 300 struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps); struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev); //********* BARTS **************// static const u32 barts_cgcg_cgls_default[] = { /* Register, Value, Mask bits */ 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff }; #define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32)) static const u32 barts_cgcg_cgls_disable[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x00000644, 0x000f7912, 0x001f4180, 0x00000644, 0x000f3812, 0x001f4180 }; #define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32)) static const u32 barts_cgcg_cgls_enable[] = { /* 0x0000c124, 0x84180000, 0x00180000, */ 0x00000644, 0x000f7892, 0x001f4080, 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff }; #define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32)) static const u32 barts_mgcg_default[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x00005448, 0x00000100, 0xffffffff, 0x000055e4, 0x00600100, 0xffffffff, 0x0000160c, 0x00000100, 0xffffffff, 0x0000c164, 0x00000100, 0xffffffff, 0x00008a18, 0x00000100, 0xffffffff, 0x0000897c, 0x06000100, 0xffffffff, 0x00008b28, 0x00000100, 0xffffffff, 0x00009144, 0x00000100, 0xffffffff, 0x00009a60, 0x00000100, 0xffffffff, 0x00009868, 0x00000100, 0xffffffff, 0x00008d58, 0x00000100, 0xffffffff, 0x00009510, 0x00000100, 0xffffffff, 0x0000949c, 0x00000100, 0xffffffff, 0x00009654, 0x00000100, 0xffffffff, 0x00009030, 0x00000100, 0xffffffff, 0x00009034, 0x00000100, 0xffffffff, 0x00009038, 0x00000100, 0xffffffff, 0x0000903c, 0x00000100, 0xffffffff, 0x00009040, 0x00000100, 0xffffffff, 0x0000a200, 0x00000100, 0xffffffff, 0x0000a204, 0x00000100, 0xffffffff, 0x0000a208, 0x00000100, 0xffffffff, 0x0000a20c, 0x00000100, 0xffffffff, 0x0000977c, 0x00000100, 0xffffffff, 0x00003f80, 0x00000100, 0xffffffff, 0x0000a210, 0x00000100, 0xffffffff, 0x0000a214, 0x00000100, 0xffffffff, 0x000004d8, 0x00000100, 0xffffffff, 0x00009784, 0x00000100, 0xffffffff, 0x00009698, 0x00000100, 0xffffffff, 0x000004d4, 0x00000200, 0xffffffff, 0x000004d0, 0x00000000, 0xffffffff, 0x000030cc, 0x00000100, 0xffffffff, 0x0000d0c0, 0xff000100, 0xffffffff, 0x0000802c, 0x40000000, 0xffffffff, 0x0000915c, 0x00010000, 0xffffffff, 0x00009160, 0x00030002, 0xffffffff, 0x00009164, 0x00050004, 0xffffffff, 0x00009168, 0x00070006, 0xffffffff, 0x00009178, 0x00070000, 0xffffffff, 0x0000917c, 0x00030002, 0xffffffff, 0x00009180, 0x00050004, 0xffffffff, 0x0000918c, 0x00010006, 0xffffffff, 0x00009190, 0x00090008, 0xffffffff, 0x00009194, 0x00070000, 0xffffffff, 0x00009198, 0x00030002, 0xffffffff, 0x0000919c, 0x00050004, 0xffffffff, 0x000091a8, 0x00010006, 0xffffffff, 0x000091ac, 0x00090008, 0xffffffff, 0x000091b0, 0x00070000, 0xffffffff, 0x000091b4, 0x00030002, 0xffffffff, 0x000091b8, 0x00050004, 0xffffffff, 0x000091c4, 0x00010006, 0xffffffff, 0x000091c8, 0x00090008, 0xffffffff, 0x000091cc, 0x00070000, 0xffffffff, 0x000091d0, 0x00030002, 0xffffffff, 0x000091d4, 0x00050004, 0xffffffff, 0x000091e0, 0x00010006, 0xffffffff, 0x000091e4, 0x00090008, 0xffffffff, 0x000091e8, 0x00000000, 0xffffffff, 0x000091ec, 0x00070000, 0xffffffff, 0x000091f0, 0x00030002, 0xffffffff, 0x000091f4, 0x00050004, 0xffffffff, 0x00009200, 0x00010006, 0xffffffff, 0x00009204, 0x00090008, 0xffffffff, 0x00009208, 0x00070000, 0xffffffff, 0x0000920c, 0x00030002, 0xffffffff, 0x00009210, 0x00050004, 0xffffffff, 0x0000921c, 0x00010006, 0xffffffff, 0x00009220, 0x00090008, 0xffffffff, 0x00009224, 0x00070000, 0xffffffff, 0x00009228, 0x00030002, 0xffffffff, 0x0000922c, 0x00050004, 0xffffffff, 0x00009238, 0x00010006, 0xffffffff, 0x0000923c, 0x00090008, 0xffffffff, 0x00009294, 0x00000000, 0xffffffff, 0x0000802c, 0x40010000, 0xffffffff, 0x0000915c, 0x00010000, 0xffffffff, 0x00009160, 0x00030002, 0xffffffff, 0x00009164, 0x00050004, 0xffffffff, 0x00009168, 0x00070006, 0xffffffff, 0x00009178, 0x00070000, 0xffffffff, 0x0000917c, 0x00030002, 0xffffffff, 0x00009180, 0x00050004, 0xffffffff, 0x0000918c, 0x00010006, 0xffffffff, 0x00009190, 0x00090008, 0xffffffff, 0x00009194, 0x00070000, 0xffffffff, 0x00009198, 0x00030002, 0xffffffff, 0x0000919c, 0x00050004, 0xffffffff, 0x000091a8, 0x00010006, 0xffffffff, 0x000091ac, 0x00090008, 0xffffffff, 0x000091b0, 0x00070000, 0xffffffff, 0x000091b4, 0x00030002, 0xffffffff, 0x000091b8, 0x00050004, 0xffffffff, 0x000091c4, 0x00010006, 0xffffffff, 0x000091c8, 0x00090008, 0xffffffff, 0x000091cc, 0x00070000, 0xffffffff, 0x000091d0, 0x00030002, 0xffffffff, 0x000091d4, 0x00050004, 0xffffffff, 0x000091e0, 0x00010006, 0xffffffff, 0x000091e4, 0x00090008, 0xffffffff, 0x000091e8, 0x00000000, 0xffffffff, 0x000091ec, 0x00070000, 0xffffffff, 0x000091f0, 0x00030002, 0xffffffff, 0x000091f4, 0x00050004, 0xffffffff, 0x00009200, 0x00010006, 0xffffffff, 0x00009204, 0x00090008, 0xffffffff, 0x00009208, 0x00070000, 0xffffffff, 0x0000920c, 0x00030002, 0xffffffff, 0x00009210, 0x00050004, 0xffffffff, 0x0000921c, 0x00010006, 0xffffffff, 0x00009220, 0x00090008, 0xffffffff, 0x00009224, 0x00070000, 0xffffffff, 0x00009228, 0x00030002, 0xffffffff, 0x0000922c, 0x00050004, 0xffffffff, 0x00009238, 0x00010006, 0xffffffff, 0x0000923c, 0x00090008, 0xffffffff, 0x00009294, 0x00000000, 0xffffffff, 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff }; #define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32)) static const u32 barts_mgcg_disable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000001, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000002, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000003, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x00009150, 0x00600000, 0xffffffff }; #define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32)) static const u32 barts_mgcg_enable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000001, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000002, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000003, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x00009150, 0x81944000, 0xffffffff }; #define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32)) //********* CAICOS **************// static const u32 caicos_cgcg_cgls_default[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff }; #define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32)) static const u32 caicos_cgcg_cgls_disable[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x00000644, 0x000f7912, 0x001f4180, 0x00000644, 0x000f3812, 0x001f4180 }; #define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32)) static const u32 caicos_cgcg_cgls_enable[] = { /* 0x0000c124, 0x84180000, 0x00180000, */ 0x00000644, 0x000f7892, 0x001f4080, 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff }; #define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32)) static const u32 caicos_mgcg_default[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x00005448, 0x00000100, 0xffffffff, 0x000055e4, 0x00600100, 0xffffffff, 0x0000160c, 0x00000100, 0xffffffff, 0x0000c164, 0x00000100, 0xffffffff, 0x00008a18, 0x00000100, 0xffffffff, 0x0000897c, 0x06000100, 0xffffffff, 0x00008b28, 0x00000100, 0xffffffff, 0x00009144, 0x00000100, 0xffffffff, 0x00009a60, 0x00000100, 0xffffffff, 0x00009868, 0x00000100, 0xffffffff, 0x00008d58, 0x00000100, 0xffffffff, 0x00009510, 0x00000100, 0xffffffff, 0x0000949c, 0x00000100, 0xffffffff, 0x00009654, 0x00000100, 0xffffffff, 0x00009030, 0x00000100, 0xffffffff, 0x00009034, 0x00000100, 0xffffffff, 0x00009038, 0x00000100, 0xffffffff, 0x0000903c, 0x00000100, 0xffffffff, 0x00009040, 0x00000100, 0xffffffff, 0x0000a200, 0x00000100, 0xffffffff, 0x0000a204, 0x00000100, 0xffffffff, 0x0000a208, 0x00000100, 0xffffffff, 0x0000a20c, 0x00000100, 0xffffffff, 0x0000977c, 0x00000100, 0xffffffff, 0x00003f80, 0x00000100, 0xffffffff, 0x0000a210, 0x00000100, 0xffffffff, 0x0000a214, 0x00000100, 0xffffffff, 0x000004d8, 0x00000100, 0xffffffff, 0x00009784, 0x00000100, 0xffffffff, 0x00009698, 0x00000100, 0xffffffff, 0x000004d4, 0x00000200, 0xffffffff, 0x000004d0, 0x00000000, 0xffffffff, 0x000030cc, 0x00000100, 0xffffffff, 0x0000d0c0, 0xff000100, 0xffffffff, 0x0000915c, 0x00010000, 0xffffffff, 0x00009160, 0x00030002, 0xffffffff, 0x00009164, 0x00050004, 0xffffffff, 0x00009168, 0x00070006, 0xffffffff, 0x00009178, 0x00070000, 0xffffffff, 0x0000917c, 0x00030002, 0xffffffff, 0x00009180, 0x00050004, 0xffffffff, 0x0000918c, 0x00010006, 0xffffffff, 0x00009190, 0x00090008, 0xffffffff, 0x00009194, 0x00070000, 0xffffffff, 0x00009198, 0x00030002, 0xffffffff, 0x0000919c, 0x00050004, 0xffffffff, 0x000091a8, 0x00010006, 0xffffffff, 0x000091ac, 0x00090008, 0xffffffff, 0x000091e8, 0x00000000, 0xffffffff, 0x00009294, 0x00000000, 0xffffffff, 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff }; #define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32)) static const u32 caicos_mgcg_disable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000001, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000002, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000003, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x00009150, 0x00600000, 0xffffffff }; #define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32)) static const u32 caicos_mgcg_enable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000001, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000002, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000003, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x00009150, 0x46944040, 0xffffffff }; #define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32)) //********* TURKS **************// static const u32 turks_cgcg_cgls_default[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff }; #define TURKS_CGCG_CGLS_DEFAULT_LENGTH sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32)) static const u32 turks_cgcg_cgls_disable[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x00000644, 0x000f7912, 0x001f4180, 0x00000644, 0x000f3812, 0x001f4180 }; #define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32)) static const u32 turks_cgcg_cgls_enable[] = { /* 0x0000c124, 0x84180000, 0x00180000, */ 0x00000644, 0x000f7892, 0x001f4080, 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000020, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000021, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000022, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000023, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000024, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000025, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000026, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000027, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000028, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000029, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000002a, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x0000002b, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff }; #define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32)) // These are the sequences for turks_mgcg_shls static const u32 turks_mgcg_default[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x00005448, 0x00000100, 0xffffffff, 0x000055e4, 0x00600100, 0xffffffff, 0x0000160c, 0x00000100, 0xffffffff, 0x0000c164, 0x00000100, 0xffffffff, 0x00008a18, 0x00000100, 0xffffffff, 0x0000897c, 0x06000100, 0xffffffff, 0x00008b28, 0x00000100, 0xffffffff, 0x00009144, 0x00000100, 0xffffffff, 0x00009a60, 0x00000100, 0xffffffff, 0x00009868, 0x00000100, 0xffffffff, 0x00008d58, 0x00000100, 0xffffffff, 0x00009510, 0x00000100, 0xffffffff, 0x0000949c, 0x00000100, 0xffffffff, 0x00009654, 0x00000100, 0xffffffff, 0x00009030, 0x00000100, 0xffffffff, 0x00009034, 0x00000100, 0xffffffff, 0x00009038, 0x00000100, 0xffffffff, 0x0000903c, 0x00000100, 0xffffffff, 0x00009040, 0x00000100, 0xffffffff, 0x0000a200, 0x00000100, 0xffffffff, 0x0000a204, 0x00000100, 0xffffffff, 0x0000a208, 0x00000100, 0xffffffff, 0x0000a20c, 0x00000100, 0xffffffff, 0x0000977c, 0x00000100, 0xffffffff, 0x00003f80, 0x00000100, 0xffffffff, 0x0000a210, 0x00000100, 0xffffffff, 0x0000a214, 0x00000100, 0xffffffff, 0x000004d8, 0x00000100, 0xffffffff, 0x00009784, 0x00000100, 0xffffffff, 0x00009698, 0x00000100, 0xffffffff, 0x000004d4, 0x00000200, 0xffffffff, 0x000004d0, 0x00000000, 0xffffffff, 0x000030cc, 0x00000100, 0xffffffff, 0x0000d0c0, 0x00000100, 0xffffffff, 0x0000915c, 0x00010000, 0xffffffff, 0x00009160, 0x00030002, 0xffffffff, 0x00009164, 0x00050004, 0xffffffff, 0x00009168, 0x00070006, 0xffffffff, 0x00009178, 0x00070000, 0xffffffff, 0x0000917c, 0x00030002, 0xffffffff, 0x00009180, 0x00050004, 0xffffffff, 0x0000918c, 0x00010006, 0xffffffff, 0x00009190, 0x00090008, 0xffffffff, 0x00009194, 0x00070000, 0xffffffff, 0x00009198, 0x00030002, 0xffffffff, 0x0000919c, 0x00050004, 0xffffffff, 0x000091a8, 0x00010006, 0xffffffff, 0x000091ac, 0x00090008, 0xffffffff, 0x000091b0, 0x00070000, 0xffffffff, 0x000091b4, 0x00030002, 0xffffffff, 0x000091b8, 0x00050004, 0xffffffff, 0x000091c4, 0x00010006, 0xffffffff, 0x000091c8, 0x00090008, 0xffffffff, 0x000091cc, 0x00070000, 0xffffffff, 0x000091d0, 0x00030002, 0xffffffff, 0x000091d4, 0x00050004, 0xffffffff, 0x000091e0, 0x00010006, 0xffffffff, 0x000091e4, 0x00090008, 0xffffffff, 0x000091e8, 0x00000000, 0xffffffff, 0x000091ec, 0x00070000, 0xffffffff, 0x000091f0, 0x00030002, 0xffffffff, 0x000091f4, 0x00050004, 0xffffffff, 0x00009200, 0x00010006, 0xffffffff, 0x00009204, 0x00090008, 0xffffffff, 0x00009208, 0x00070000, 0xffffffff, 0x0000920c, 0x00030002, 0xffffffff, 0x00009210, 0x00050004, 0xffffffff, 0x0000921c, 0x00010006, 0xffffffff, 0x00009220, 0x00090008, 0xffffffff, 0x00009294, 0x00000000, 0xffffffff, 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000012, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000013, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000014, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000015, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000016, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000017, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000018, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000019, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001a, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x0000001b, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff }; #define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32)) static const u32 turks_mgcg_disable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000001, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000002, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000003, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x00009150, 0x00600000, 0xffffffff }; #define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32)) static const u32 turks_mgcg_enable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000001, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000002, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000003, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x00009150, 0x6e944000, 0xffffffff }; #define TURKS_MGCG_ENABLE_LENGTH sizeof(turks_mgcg_enable) / (3 * sizeof(u32)) #endif #ifndef BTC_SYSLS_SEQUENCE #define BTC_SYSLS_SEQUENCE 100 //********* BARTS **************// static const u32 barts_sysls_default[] = { /* Register, Value, Mask bits */ 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, 0x0000264c, 0x000c0400, 0xffffffff, 0x00002648, 0x000c0400, 0xffffffff, 0x00002650, 0x000c0400, 0xffffffff, 0x000020b8, 0x000c0400, 0xffffffff, 0x000020bc, 0x000c0400, 0xffffffff, 0x000020c0, 0x000c0c80, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680fff, 0xffffffff, 0x000004c8, 0x00000001, 0xffffffff, 0x000064ec, 0x00000000, 0xffffffff, 0x00000c7c, 0x00000000, 0xffffffff, 0x00006dfc, 0x00000000, 0xffffffff }; #define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32)) static const u32 barts_sysls_disable[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x00041401, 0xffffffff, 0x0000264c, 0x00040400, 0xffffffff, 0x00002648, 0x00040400, 0xffffffff, 0x00002650, 0x00040400, 0xffffffff, 0x000020b8, 0x00040400, 0xffffffff, 0x000020bc, 0x00040400, 0xffffffff, 0x000020c0, 0x00040c80, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680000, 0xffffffff, 0x000004c8, 0x00000001, 0xffffffff, 0x000064ec, 0x00007ffd, 0xffffffff, 0x00000c7c, 0x0000ff00, 0xffffffff, 0x00006dfc, 0x0000007f, 0xffffffff }; #define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32)) static const u32 barts_sysls_enable[] = { 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, 0x0000264c, 0x000c0400, 0xffffffff, 0x00002648, 0x000c0400, 0xffffffff, 0x00002650, 0x000c0400, 0xffffffff, 0x000020b8, 0x000c0400, 0xffffffff, 0x000020bc, 0x000c0400, 0xffffffff, 0x000020c0, 0x000c0c80, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680fff, 0xffffffff, 0x000004c8, 0x00000000, 0xffffffff, 0x000064ec, 0x00000000, 0xffffffff, 0x00000c7c, 0x00000000, 0xffffffff, 0x00006dfc, 0x00000000, 0xffffffff }; #define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32)) //********* CAICOS **************// static const u32 caicos_sysls_default[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, 0x0000264c, 0x000c0400, 0xffffffff, 0x00002648, 0x000c0400, 0xffffffff, 0x00002650, 0x000c0400, 0xffffffff, 0x000020b8, 0x000c0400, 0xffffffff, 0x000020bc, 0x000c0400, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680fff, 0xffffffff, 0x000004c8, 0x00000001, 0xffffffff, 0x000064ec, 0x00000000, 0xffffffff, 0x00000c7c, 0x00000000, 0xffffffff, 0x00006dfc, 0x00000000, 0xffffffff }; #define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32)) static const u32 caicos_sysls_disable[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x00041401, 0xffffffff, 0x0000264c, 0x00040400, 0xffffffff, 0x00002648, 0x00040400, 0xffffffff, 0x00002650, 0x00040400, 0xffffffff, 0x000020b8, 0x00040400, 0xffffffff, 0x000020bc, 0x00040400, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680000, 0xffffffff, 0x000004c8, 0x00000001, 0xffffffff, 0x000064ec, 0x00007ffd, 0xffffffff, 0x00000c7c, 0x0000ff00, 0xffffffff, 0x00006dfc, 0x0000007f, 0xffffffff }; #define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32)) static const u32 caicos_sysls_enable[] = { 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, 0x0000264c, 0x000c0400, 0xffffffff, 0x00002648, 0x000c0400, 0xffffffff, 0x00002650, 0x000c0400, 0xffffffff, 0x000020b8, 0x000c0400, 0xffffffff, 0x000020bc, 0x000c0400, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680fff, 0xffffffff, 0x000064ec, 0x00000000, 0xffffffff, 0x00000c7c, 0x00000000, 0xffffffff, 0x00006dfc, 0x00000000, 0xffffffff, 0x000004c8, 0x00000000, 0xffffffff }; #define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32)) //********* TURKS **************// static const u32 turks_sysls_default[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, 0x0000264c, 0x000c0400, 0xffffffff, 0x00002648, 0x000c0400, 0xffffffff, 0x00002650, 0x000c0400, 0xffffffff, 0x000020b8, 0x000c0400, 0xffffffff, 0x000020bc, 0x000c0400, 0xffffffff, 0x000020c0, 0x000c0c80, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680fff, 0xffffffff, 0x000004c8, 0x00000001, 0xffffffff, 0x000064ec, 0x00000000, 0xffffffff, 0x00000c7c, 0x00000000, 0xffffffff, 0x00006dfc, 0x00000000, 0xffffffff }; #define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32)) static const u32 turks_sysls_disable[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x00041401, 0xffffffff, 0x0000264c, 0x00040400, 0xffffffff, 0x00002648, 0x00040400, 0xffffffff, 0x00002650, 0x00040400, 0xffffffff, 0x000020b8, 0x00040400, 0xffffffff, 0x000020bc, 0x00040400, 0xffffffff, 0x000020c0, 0x00040c80, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680000, 0xffffffff, 0x000004c8, 0x00000001, 0xffffffff, 0x000064ec, 0x00007ffd, 0xffffffff, 0x00000c7c, 0x0000ff00, 0xffffffff, 0x00006dfc, 0x0000007f, 0xffffffff }; #define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32)) static const u32 turks_sysls_enable[] = { 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, 0x0000264c, 0x000c0400, 0xffffffff, 0x00002648, 0x000c0400, 0xffffffff, 0x00002650, 0x000c0400, 0xffffffff, 0x000020b8, 0x000c0400, 0xffffffff, 0x000020bc, 0x000c0400, 0xffffffff, 0x000020c0, 0x000c0c80, 0xffffffff, 0x0000f4a0, 0x000000c0, 0xffffffff, 0x0000f4a4, 0x00680fff, 0xffffffff, 0x000004c8, 0x00000000, 0xffffffff, 0x000064ec, 0x00000000, 0xffffffff, 0x00000c7c, 0x00000000, 0xffffffff, 0x00006dfc, 0x00000000, 0xffffffff }; #define TURKS_SYSLS_ENABLE_LENGTH sizeof(turks_sysls_enable) / (3 * sizeof(u32)) #endif u32 btc_valid_sclk[40] = { 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000, 105000, 110000, 11500, 120000, 125000, 130000, 135000, 140000, 145000, 150000, 155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000 }; static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = { { 10000, 30000, RADEON_SCLK_UP }, { 15000, 30000, RADEON_SCLK_UP }, { 20000, 30000, RADEON_SCLK_UP }, { 25000, 30000, RADEON_SCLK_UP } }; void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, u32 *max_clock) { u32 i, clock = 0; if ((table == NULL) || (table->count == 0)) { *max_clock = clock; return; } for (i = 0; i < table->count; i++) { if (clock < table->entries[i].clk) clock = table->entries[i].clk; } *max_clock = clock; } void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage) { u32 i; if ((table == NULL) || (table->count == 0)) return; for (i= 0; i < table->count; i++) { if (clock <= table->entries[i].clk) { if (*voltage < table->entries[i].v) *voltage = (u16)((table->entries[i].v < max_voltage) ? table->entries[i].v : max_voltage); return; } } *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; } static u32 btc_find_valid_clock(struct radeon_clock_array *clocks, u32 max_clock, u32 requested_clock) { unsigned int i; if ((clocks == NULL) || (clocks->count == 0)) return (requested_clock < max_clock) ? requested_clock : max_clock; for (i = 0; i < clocks->count; i++) { if (clocks->values[i] >= requested_clock) return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; } return (clocks->values[clocks->count - 1] < max_clock) ? clocks->values[clocks->count - 1] : max_clock; } static u32 btc_get_valid_mclk(struct radeon_device *rdev, u32 max_mclk, u32 requested_mclk) { return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_mclk_values, max_mclk, requested_mclk); } static u32 btc_get_valid_sclk(struct radeon_device *rdev, u32 max_sclk, u32 requested_sclk) { return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_sclk_values, max_sclk, requested_sclk); } void btc_skip_blacklist_clocks(struct radeon_device *rdev, const u32 max_sclk, const u32 max_mclk, u32 *sclk, u32 *mclk) { int i, num_blacklist_clocks; if ((sclk == NULL) || (mclk == NULL)) return; num_blacklist_clocks = ARRAY_SIZE(btc_blacklist_clocks); for (i = 0; i < num_blacklist_clocks; i++) { if ((btc_blacklist_clocks[i].sclk == *sclk) && (btc_blacklist_clocks[i].mclk == *mclk)) break; } if (i < num_blacklist_clocks) { if (btc_blacklist_clocks[i].action == RADEON_SCLK_UP) { *sclk = btc_get_valid_sclk(rdev, max_sclk, *sclk + 1); if (*sclk < max_sclk) btc_skip_blacklist_clocks(rdev, max_sclk, max_mclk, sclk, mclk); } } } void btc_adjust_clock_combinations(struct radeon_device *rdev, const struct radeon_clock_and_voltage_limits *max_limits, struct rv7xx_pl *pl) { if ((pl->mclk == 0) || (pl->sclk == 0)) return; if (pl->mclk == pl->sclk) return; if (pl->mclk > pl->sclk) { if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > rdev->pm.dpm.dyn_state.mclk_sclk_ratio) pl->sclk = btc_get_valid_sclk(rdev, max_limits->sclk, (pl->mclk + (rdev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / rdev->pm.dpm.dyn_state.mclk_sclk_ratio); } else { if ((pl->sclk - pl->mclk) > rdev->pm.dpm.dyn_state.sclk_mclk_delta) pl->mclk = btc_get_valid_mclk(rdev, max_limits->mclk, pl->sclk - rdev->pm.dpm.dyn_state.sclk_mclk_delta); } } static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) { unsigned int i; for (i = 0; i < table->count; i++) { if (voltage <= table->entries[i].value) return table->entries[i].value; } return table->entries[table->count - 1].value; } void btc_apply_voltage_delta_rules(struct radeon_device *rdev, u16 max_vddc, u16 max_vddci, u16 *vddc, u16 *vddci) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); u16 new_voltage; if ((0 == *vddc) || (0 == *vddci)) return; if (*vddc > *vddci) { if ((*vddc - *vddci) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) { new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, (*vddc - rdev->pm.dpm.dyn_state.vddc_vddci_delta)); *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; } } else { if ((*vddci - *vddc) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) { new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, (*vddci - rdev->pm.dpm.dyn_state.vddc_vddci_delta)); *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; } } } static void btc_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 tmp, bif; tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); if (enable) { if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) { if (!pi->boot_in_gen2) { bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK; bif |= CG_CLIENT_REQ(0xd); WREG32(CG_BIF_REQ_AND_RSP, bif); tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; tmp |= LC_HW_VOLTAGE_IF_CONTROL(1); tmp |= LC_GEN2_EN_STRAP; tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); udelay(10); tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); } } } else { if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) || (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) { if (!pi->boot_in_gen2) { bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK; bif |= CG_CLIENT_REQ(0xd); WREG32(CG_BIF_REQ_AND_RSP, bif); tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; tmp &= ~LC_GEN2_EN_STRAP; } WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); } } } static void btc_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) { btc_enable_bif_dynamic_pcie_gen2(rdev, enable); if (enable) WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); else WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); } static int btc_disable_ulv(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); if (eg_pi->ulv.supported) { if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) != PPSMC_Result_OK) return -EINVAL; } return 0; } static int btc_populate_ulv_state(struct radeon_device *rdev, RV770_SMC_STATETABLE *table) { int ret = -EINVAL; struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl; if (ulv_pl->vddc) { ret = cypress_convert_power_level_to_smc(rdev, ulv_pl, &table->ULVState.levels[0], PPSMC_DISPLAY_WATERMARK_LOW); if (ret == 0) { table->ULVState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; table->ULVState.levels[0].ACIndex = 1; table->ULVState.levels[1] = table->ULVState.levels[0]; table->ULVState.levels[2] = table->ULVState.levels[0]; table->ULVState.flags |= PPSMC_SWSTATE_FLAG_DC; WREG32(CG_ULV_CONTROL, BTC_CGULVCONTROL_DFLT); WREG32(CG_ULV_PARAMETER, BTC_CGULVPARAMETER_DFLT); } } return ret; } static int btc_populate_smc_acpi_state(struct radeon_device *rdev, RV770_SMC_STATETABLE *table) { int ret = cypress_populate_smc_acpi_state(rdev, table); if (ret == 0) { table->ACPIState.levels[0].ACIndex = 0; table->ACPIState.levels[1].ACIndex = 0; table->ACPIState.levels[2].ACIndex = 0; } return ret; } void btc_program_mgcg_hw_sequence(struct radeon_device *rdev, const u32 *sequence, u32 count) { u32 i, length = count * 3; u32 tmp; for (i = 0; i < length; i+=3) { tmp = RREG32(sequence[i]); tmp &= ~sequence[i+2]; tmp |= sequence[i+1] & sequence[i+2]; WREG32(sequence[i], tmp); } } static void btc_cg_clock_gating_default(struct radeon_device *rdev) { u32 count; const u32 *p = NULL; if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_cgcg_cgls_default; count = BARTS_CGCG_CGLS_DEFAULT_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_cgcg_cgls_default; count = TURKS_CGCG_CGLS_DEFAULT_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_cgcg_cgls_default; count = CAICOS_CGCG_CGLS_DEFAULT_LENGTH; } else return; btc_program_mgcg_hw_sequence(rdev, p, count); } static void btc_cg_clock_gating_enable(struct radeon_device *rdev, bool enable) { u32 count; const u32 *p = NULL; if (enable) { if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_cgcg_cgls_enable; count = BARTS_CGCG_CGLS_ENABLE_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_cgcg_cgls_enable; count = TURKS_CGCG_CGLS_ENABLE_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_cgcg_cgls_enable; count = CAICOS_CGCG_CGLS_ENABLE_LENGTH; } else return; } else { if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_cgcg_cgls_disable; count = BARTS_CGCG_CGLS_DISABLE_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_cgcg_cgls_disable; count = TURKS_CGCG_CGLS_DISABLE_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_cgcg_cgls_disable; count = CAICOS_CGCG_CGLS_DISABLE_LENGTH; } else return; } btc_program_mgcg_hw_sequence(rdev, p, count); } static void btc_mg_clock_gating_default(struct radeon_device *rdev) { u32 count; const u32 *p = NULL; if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_mgcg_default; count = BARTS_MGCG_DEFAULT_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_mgcg_default; count = TURKS_MGCG_DEFAULT_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_mgcg_default; count = CAICOS_MGCG_DEFAULT_LENGTH; } else return; btc_program_mgcg_hw_sequence(rdev, p, count); } static void btc_mg_clock_gating_enable(struct radeon_device *rdev, bool enable) { u32 count; const u32 *p = NULL; if (enable) { if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_mgcg_enable; count = BARTS_MGCG_ENABLE_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_mgcg_enable; count = TURKS_MGCG_ENABLE_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_mgcg_enable; count = CAICOS_MGCG_ENABLE_LENGTH; } else return; } else { if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_mgcg_disable[0]; count = BARTS_MGCG_DISABLE_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_mgcg_disable[0]; count = TURKS_MGCG_DISABLE_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_mgcg_disable[0]; count = CAICOS_MGCG_DISABLE_LENGTH; } else return; } btc_program_mgcg_hw_sequence(rdev, p, count); } static void btc_ls_clock_gating_default(struct radeon_device *rdev) { u32 count; const u32 *p = NULL; if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_sysls_default; count = BARTS_SYSLS_DEFAULT_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_sysls_default; count = TURKS_SYSLS_DEFAULT_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_sysls_default; count = CAICOS_SYSLS_DEFAULT_LENGTH; } else return; btc_program_mgcg_hw_sequence(rdev, p, count); } static void btc_ls_clock_gating_enable(struct radeon_device *rdev, bool enable) { u32 count; const u32 *p = NULL; if (enable) { if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_sysls_enable; count = BARTS_SYSLS_ENABLE_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_sysls_enable; count = TURKS_SYSLS_ENABLE_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_sysls_enable; count = CAICOS_SYSLS_ENABLE_LENGTH; } else return; } else { if (rdev->family == CHIP_BARTS) { p = (const u32 *)&barts_sysls_disable; count = BARTS_SYSLS_DISABLE_LENGTH; } else if (rdev->family == CHIP_TURKS) { p = (const u32 *)&turks_sysls_disable; count = TURKS_SYSLS_DISABLE_LENGTH; } else if (rdev->family == CHIP_CAICOS) { p = (const u32 *)&caicos_sysls_disable; count = CAICOS_SYSLS_DISABLE_LENGTH; } else return; } btc_program_mgcg_hw_sequence(rdev, p, count); } bool btc_dpm_enabled(struct radeon_device *rdev) { if (rv770_is_smc_running(rdev)) return true; else return false; } static int btc_init_smc_table(struct radeon_device *rdev, struct radeon_ps *radeon_boot_state) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); RV770_SMC_STATETABLE *table = &pi->smc_statetable; int ret; memset(table, 0, sizeof(RV770_SMC_STATETABLE)); cypress_populate_smc_voltage_tables(rdev, table); switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_EVERGREEN: case THERMAL_TYPE_EMC2103_WITH_INTERNAL: table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; break; case THERMAL_TYPE_NONE: table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; break; default: table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; break; } if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; if (pi->mem_gddr5) table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table); if (ret) return ret; if (eg_pi->sclk_deep_sleep) WREG32_P(SCLK_PSKIP_CNTL, PSKIP_ON_ALLOW_STOP_HI(32), ~PSKIP_ON_ALLOW_STOP_HI_MASK); ret = btc_populate_smc_acpi_state(rdev, table); if (ret) return ret; if (eg_pi->ulv.supported) { ret = btc_populate_ulv_state(rdev, table); if (ret) eg_pi->ulv.supported = false; } table->driverState = table->initialState; return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table, sizeof(RV770_SMC_STATETABLE), pi->sram_end); } static void btc_set_at_for_uvd(struct radeon_device *rdev, struct radeon_ps *radeon_new_state) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); int idx = 0; if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) idx = 1; if ((idx == 1) && !eg_pi->smu_uvd_hs) { pi->rlp = 10; pi->rmp = 100; pi->lhp = 100; pi->lmp = 10; } else { pi->rlp = eg_pi->ats[idx].rlp; pi->rmp = eg_pi->ats[idx].rmp; pi->lhp = eg_pi->ats[idx].lhp; pi->lmp = eg_pi->ats[idx].lmp; } } void btc_notify_uvd_to_smc(struct radeon_device *rdev, struct radeon_ps *radeon_new_state) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); if (r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) { rv770_write_smc_soft_register(rdev, RV770_SMC_SOFT_REGISTER_uvd_enabled, 1); eg_pi->uvd_enabled = true; } else { rv770_write_smc_soft_register(rdev, RV770_SMC_SOFT_REGISTER_uvd_enabled, 0); eg_pi->uvd_enabled = false; } } int btc_reset_to_default(struct radeon_device *rdev) { if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) != PPSMC_Result_OK) return -EINVAL; return 0; } static void btc_stop_smc(struct radeon_device *rdev) { int i; for (i = 0; i < rdev->usec_timeout; i++) { if (((RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK) >> LB_SYNC_RESET_SEL_SHIFT) != 1) break; udelay(1); } udelay(100); r7xx_stop_smc(rdev); } void btc_read_arb_registers(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct evergreen_arb_registers *arb_registers = &eg_pi->bootup_arb_registers; arb_registers->mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); arb_registers->mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); arb_registers->mc_arb_rfsh_rate = RREG32(MC_ARB_RFSH_RATE); arb_registers->mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME); } static void btc_set_arb0_registers(struct radeon_device *rdev, struct evergreen_arb_registers *arb_registers) { u32 val; WREG32(MC_ARB_DRAM_TIMING, arb_registers->mc_arb_dram_timing); WREG32(MC_ARB_DRAM_TIMING2, arb_registers->mc_arb_dram_timing2); val = (arb_registers->mc_arb_rfsh_rate & POWERMODE0_MASK) >> POWERMODE0_SHIFT; WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK); val = (arb_registers->mc_arb_burst_time & STATE0_MASK) >> STATE0_SHIFT; WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK); } static void btc_set_boot_state_timing(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); if (eg_pi->ulv.supported) btc_set_arb0_registers(rdev, &eg_pi->bootup_arb_registers); } static bool btc_is_state_ulv_compatible(struct radeon_device *rdev, struct radeon_ps *radeon_state) { struct rv7xx_ps *state = rv770_get_ps(radeon_state); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl; if (state->low.mclk != ulv_pl->mclk) return false; if (state->low.vddci != ulv_pl->vddci) return false; /* XXX check minclocks, etc. */ return true; } static int btc_set_ulv_dram_timing(struct radeon_device *rdev) { u32 val; struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl; radeon_atom_set_engine_dram_timings(rdev, ulv_pl->sclk, ulv_pl->mclk); val = rv770_calculate_memory_refresh_rate(rdev, ulv_pl->sclk); WREG32_P(MC_ARB_RFSH_RATE, POWERMODE0(val), ~POWERMODE0_MASK); val = cypress_calculate_burst_time(rdev, ulv_pl->sclk, ulv_pl->mclk); WREG32_P(MC_ARB_BURST_TIME, STATE0(val), ~STATE0_MASK); return 0; } static int btc_enable_ulv(struct radeon_device *rdev) { if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) != PPSMC_Result_OK) return -EINVAL; return 0; } static int btc_set_power_state_conditionally_enable_ulv(struct radeon_device *rdev, struct radeon_ps *radeon_new_state) { int ret = 0; struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); if (eg_pi->ulv.supported) { if (btc_is_state_ulv_compatible(rdev, radeon_new_state)) { // Set ARB[0] to reflect the DRAM timing needed for ULV. ret = btc_set_ulv_dram_timing(rdev); if (ret == 0) ret = btc_enable_ulv(rdev); } } return ret; } static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) { bool result = true; switch (in_reg) { case MC_SEQ_RAS_TIMING >> 2: *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; break; case MC_SEQ_CAS_TIMING >> 2: *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; break; case MC_SEQ_MISC_TIMING >> 2: *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; break; case MC_SEQ_MISC_TIMING2 >> 2: *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; break; case MC_SEQ_RD_CTL_D0 >> 2: *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; break; case MC_SEQ_RD_CTL_D1 >> 2: *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; break; case MC_SEQ_WR_CTL_D0 >> 2: *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; break; case MC_SEQ_WR_CTL_D1 >> 2: *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; break; case MC_PMG_CMD_EMRS >> 2: *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; break; case MC_PMG_CMD_MRS >> 2: *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; break; case MC_PMG_CMD_MRS1 >> 2: *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; break; default: result = false; break; } return result; } static void btc_set_valid_flag(struct evergreen_mc_reg_table *table) { u8 i, j; for (i = 0; i < table->last; i++) { for (j = 1; j < table->num_entries; j++) { if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { table->valid_flag |= (1 << i); break; } } } } static int btc_set_mc_special_registers(struct radeon_device *rdev, struct evergreen_mc_reg_table *table) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u8 i, j, k; u32 tmp; for (i = 0, j = table->last; i < table->last; i++) { switch (table->mc_reg_address[i].s1) { case MC_SEQ_MISC1 >> 2: tmp = RREG32(MC_PMG_CMD_EMRS); table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; for (k = 0; k < table->num_entries; k++) { table->mc_reg_table_entry[k].mc_data[j] = ((tmp & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); } j++; if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) return -EINVAL; tmp = RREG32(MC_PMG_CMD_MRS); table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; for (k = 0; k < table->num_entries; k++) { table->mc_reg_table_entry[k].mc_data[j] = (tmp & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); if (!pi->mem_gddr5) table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) return -EINVAL; break; case MC_SEQ_RESERVE_M >> 2: tmp = RREG32(MC_PMG_CMD_MRS1); table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; for (k = 0; k < table->num_entries; k++) { table->mc_reg_table_entry[k].mc_data[j] = (tmp & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); } j++; if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) return -EINVAL; break; default: break; } } table->last = j; return 0; } static void btc_set_s0_mc_reg_index(struct evergreen_mc_reg_table *table) { u32 i; u16 address; for (i = 0; i < table->last; i++) { table->mc_reg_address[i].s0 = btc_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? address : table->mc_reg_address[i].s1; } } static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, struct evergreen_mc_reg_table *eg_table) { u8 i, j; if (table->last > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (table->num_entries > MAX_AC_TIMING_ENTRIES) return -EINVAL; for (i = 0; i < table->last; i++) eg_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; eg_table->last = table->last; for (i = 0; i < table->num_entries; i++) { eg_table->mc_reg_table_entry[i].mclk_max = table->mc_reg_table_entry[i].mclk_max; for(j = 0; j < table->last; j++) eg_table->mc_reg_table_entry[i].mc_data[j] = table->mc_reg_table_entry[i].mc_data[j]; } eg_table->num_entries = table->num_entries; return 0; } static int btc_initialize_mc_reg_table(struct radeon_device *rdev) { int ret; struct atom_mc_reg_table *table; struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct evergreen_mc_reg_table *eg_table = &eg_pi->mc_reg_table; u8 module_index = rv770_get_memory_module_index(rdev); table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); if (!table) return -ENOMEM; /* Program additional LP registers that are no longer programmed by VBIOS */ WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); if (ret) goto init_mc_done; ret = btc_copy_vbios_mc_reg_table(table, eg_table); if (ret) goto init_mc_done; btc_set_s0_mc_reg_index(eg_table); ret = btc_set_mc_special_registers(rdev, eg_table); if (ret) goto init_mc_done; btc_set_valid_flag(eg_table); init_mc_done: kfree(table); return ret; } static void btc_init_stutter_mode(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 tmp; if (pi->mclk_stutter_mode_threshold) { if (pi->mem_gddr5) { tmp = RREG32(MC_PMG_AUTO_CFG); if ((0x200 & tmp) == 0) { tmp = (tmp & 0xfffffc0b) | 0x204; WREG32(MC_PMG_AUTO_CFG, tmp); } } } } bool btc_dpm_vblank_too_short(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 vblank_time = r600_dpm_get_vblank_time(rdev); u32 switch_limit = pi->mem_gddr5 ? 450 : 100; if (vblank_time < switch_limit) return true; else return false; } static void btc_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *rps) { struct rv7xx_ps *ps = rv770_get_ps(rps); struct radeon_clock_and_voltage_limits *max_limits; bool disable_mclk_switching; u32 mclk, sclk; u16 vddc, vddci; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; if ((rdev->pm.dpm.new_active_crtc_count > 1) || btc_dpm_vblank_too_short(rdev)) disable_mclk_switching = true; else disable_mclk_switching = false; if (rdev->pm.dpm.ac_power) max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; else max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; if (rdev->pm.dpm.ac_power == false) { if (ps->high.mclk > max_limits->mclk) ps->high.mclk = max_limits->mclk; if (ps->high.sclk > max_limits->sclk) ps->high.sclk = max_limits->sclk; if (ps->high.vddc > max_limits->vddc) ps->high.vddc = max_limits->vddc; if (ps->high.vddci > max_limits->vddci) ps->high.vddci = max_limits->vddci; if (ps->medium.mclk > max_limits->mclk) ps->medium.mclk = max_limits->mclk; if (ps->medium.sclk > max_limits->sclk) ps->medium.sclk = max_limits->sclk; if (ps->medium.vddc > max_limits->vddc) ps->medium.vddc = max_limits->vddc; if (ps->medium.vddci > max_limits->vddci) ps->medium.vddci = max_limits->vddci; if (ps->low.mclk > max_limits->mclk) ps->low.mclk = max_limits->mclk; if (ps->low.sclk > max_limits->sclk) ps->low.sclk = max_limits->sclk; if (ps->low.vddc > max_limits->vddc) ps->low.vddc = max_limits->vddc; if (ps->low.vddci > max_limits->vddci) ps->low.vddci = max_limits->vddci; } /* limit clocks to max supported clocks based on voltage dependency tables */ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, &max_sclk_vddc); btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, &max_mclk_vddci); btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, &max_mclk_vddc); if (max_sclk_vddc) { if (ps->low.sclk > max_sclk_vddc) ps->low.sclk = max_sclk_vddc; if (ps->medium.sclk > max_sclk_vddc) ps->medium.sclk = max_sclk_vddc; if (ps->high.sclk > max_sclk_vddc) ps->high.sclk = max_sclk_vddc; } if (max_mclk_vddci) { if (ps->low.mclk > max_mclk_vddci) ps->low.mclk = max_mclk_vddci; if (ps->medium.mclk > max_mclk_vddci) ps->medium.mclk = max_mclk_vddci; if (ps->high.mclk > max_mclk_vddci) ps->high.mclk = max_mclk_vddci; } if (max_mclk_vddc) { if (ps->low.mclk > max_mclk_vddc) ps->low.mclk = max_mclk_vddc; if (ps->medium.mclk > max_mclk_vddc) ps->medium.mclk = max_mclk_vddc; if (ps->high.mclk > max_mclk_vddc) ps->high.mclk = max_mclk_vddc; } /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { sclk = ps->low.sclk; mclk = ps->high.mclk; vddc = ps->low.vddc; vddci = ps->high.vddci; } else { sclk = ps->low.sclk; mclk = ps->low.mclk; vddc = ps->low.vddc; vddci = ps->low.vddci; } /* adjusted low state */ ps->low.sclk = sclk; ps->low.mclk = mclk; ps->low.vddc = vddc; ps->low.vddci = vddci; btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, &ps->low.sclk, &ps->low.mclk); /* adjusted medium, high states */ if (ps->medium.sclk < ps->low.sclk) ps->medium.sclk = ps->low.sclk; if (ps->medium.vddc < ps->low.vddc) ps->medium.vddc = ps->low.vddc; if (ps->high.sclk < ps->medium.sclk) ps->high.sclk = ps->medium.sclk; if (ps->high.vddc < ps->medium.vddc) ps->high.vddc = ps->medium.vddc; if (disable_mclk_switching) { mclk = ps->low.mclk; if (mclk < ps->medium.mclk) mclk = ps->medium.mclk; if (mclk < ps->high.mclk) mclk = ps->high.mclk; ps->low.mclk = mclk; ps->low.vddci = vddci; ps->medium.mclk = mclk; ps->medium.vddci = vddci; ps->high.mclk = mclk; ps->high.vddci = vddci; } else { if (ps->medium.mclk < ps->low.mclk) ps->medium.mclk = ps->low.mclk; if (ps->medium.vddci < ps->low.vddci) ps->medium.vddci = ps->low.vddci; if (ps->high.mclk < ps->medium.mclk) ps->high.mclk = ps->medium.mclk; if (ps->high.vddci < ps->medium.vddci) ps->high.vddci = ps->medium.vddci; } btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, &ps->medium.sclk, &ps->medium.mclk); btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, &ps->high.sclk, &ps->high.mclk); btc_adjust_clock_combinations(rdev, max_limits, &ps->low); btc_adjust_clock_combinations(rdev, max_limits, &ps->medium); btc_adjust_clock_combinations(rdev, max_limits, &ps->high); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, ps->low.sclk, max_limits->vddc, &ps->low.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, ps->low.mclk, max_limits->vddci, &ps->low.vddci); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, ps->low.mclk, max_limits->vddc, &ps->low.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, rdev->clock.current_dispclk, max_limits->vddc, &ps->low.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, ps->medium.sclk, max_limits->vddc, &ps->medium.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, ps->medium.mclk, max_limits->vddci, &ps->medium.vddci); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, ps->medium.mclk, max_limits->vddc, &ps->medium.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, rdev->clock.current_dispclk, max_limits->vddc, &ps->medium.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, ps->high.sclk, max_limits->vddc, &ps->high.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, ps->high.mclk, max_limits->vddci, &ps->high.vddci); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, ps->high.mclk, max_limits->vddc, &ps->high.vddc); btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, rdev->clock.current_dispclk, max_limits->vddc, &ps->high.vddc); btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci, &ps->low.vddc, &ps->low.vddci); btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci, &ps->medium.vddc, &ps->medium.vddci); btc_apply_voltage_delta_rules(rdev, max_limits->vddc, max_limits->vddci, &ps->high.vddc, &ps->high.vddci); if ((ps->high.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) && (ps->medium.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) && (ps->low.vddc <= rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)) ps->dc_compatible = true; else ps->dc_compatible = false; if (ps->low.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2) ps->low.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2; if (ps->medium.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2) ps->medium.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2; if (ps->high.vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2) ps->high.flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2; } static void btc_update_current_ps(struct radeon_device *rdev, struct radeon_ps *rps) { struct rv7xx_ps *new_ps = rv770_get_ps(rps); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); eg_pi->current_rps = *rps; eg_pi->current_ps = *new_ps; eg_pi->current_rps.ps_priv = &eg_pi->current_ps; } static void btc_update_requested_ps(struct radeon_device *rdev, struct radeon_ps *rps) { struct rv7xx_ps *new_ps = rv770_get_ps(rps); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); eg_pi->requested_rps = *rps; eg_pi->requested_ps = *new_ps; eg_pi->requested_rps.ps_priv = &eg_pi->requested_ps; } void btc_dpm_reset_asic(struct radeon_device *rdev) { rv770_restrict_performance_levels_before_switch(rdev); btc_disable_ulv(rdev); btc_set_boot_state_timing(rdev); rv770_set_boot_state(rdev); } int btc_dpm_pre_set_power_state(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; struct radeon_ps *new_ps = &requested_ps; btc_update_requested_ps(rdev, new_ps); btc_apply_state_adjust_rules(rdev, &eg_pi->requested_rps); return 0; } int btc_dpm_set_power_state(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct radeon_ps *new_ps = &eg_pi->requested_rps; struct radeon_ps *old_ps = &eg_pi->current_rps; int ret; ret = btc_disable_ulv(rdev); btc_set_boot_state_timing(rdev); ret = rv770_restrict_performance_levels_before_switch(rdev); if (ret) { DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n"); return ret; } if (eg_pi->pcie_performance_request) cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps); rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); ret = rv770_halt_smc(rdev); if (ret) { DRM_ERROR("rv770_halt_smc failed\n"); return ret; } btc_set_at_for_uvd(rdev, new_ps); if (eg_pi->smu_uvd_hs) btc_notify_uvd_to_smc(rdev, new_ps); ret = cypress_upload_sw_state(rdev, new_ps); if (ret) { DRM_ERROR("cypress_upload_sw_state failed\n"); return ret; } if (eg_pi->dynamic_ac_timing) { ret = cypress_upload_mc_reg_table(rdev, new_ps); if (ret) { DRM_ERROR("cypress_upload_mc_reg_table failed\n"); return ret; } } cypress_program_memory_timing_parameters(rdev, new_ps); ret = rv770_resume_smc(rdev); if (ret) { DRM_ERROR("rv770_resume_smc failed\n"); return ret; } ret = rv770_set_sw_state(rdev); if (ret) { DRM_ERROR("rv770_set_sw_state failed\n"); return ret; } rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); if (eg_pi->pcie_performance_request) cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); ret = btc_set_power_state_conditionally_enable_ulv(rdev, new_ps); if (ret) { DRM_ERROR("btc_set_power_state_conditionally_enable_ulv failed\n"); return ret; } return 0; } void btc_dpm_post_set_power_state(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct radeon_ps *new_ps = &eg_pi->requested_rps; btc_update_current_ps(rdev, new_ps); } int btc_dpm_enable(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; int ret; if (pi->gfx_clock_gating) btc_cg_clock_gating_default(rdev); if (btc_dpm_enabled(rdev)) return -EINVAL; if (pi->mg_clock_gating) btc_mg_clock_gating_default(rdev); if (eg_pi->ls_clock_gating) btc_ls_clock_gating_default(rdev); if (pi->voltage_control) { rv770_enable_voltage_control(rdev, true); ret = cypress_construct_voltage_tables(rdev); if (ret) { DRM_ERROR("cypress_construct_voltage_tables failed\n"); return ret; } } if (pi->mvdd_control) { ret = cypress_get_mvdd_configuration(rdev); if (ret) { DRM_ERROR("cypress_get_mvdd_configuration failed\n"); return ret; } } if (eg_pi->dynamic_ac_timing) { ret = btc_initialize_mc_reg_table(rdev); if (ret) eg_pi->dynamic_ac_timing = false; } if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS) rv770_enable_backbias(rdev, true); if (pi->dynamic_ss) cypress_enable_spread_spectrum(rdev, true); if (pi->thermal_protection) rv770_enable_thermal_protection(rdev, true); rv770_setup_bsp(rdev); rv770_program_git(rdev); rv770_program_tp(rdev); rv770_program_tpp(rdev); rv770_program_sstp(rdev); rv770_program_engine_speed_parameters(rdev); cypress_enable_display_gap(rdev); rv770_program_vc(rdev); if (pi->dynamic_pcie_gen2) btc_enable_dynamic_pcie_gen2(rdev, true); ret = rv770_upload_firmware(rdev); if (ret) { DRM_ERROR("rv770_upload_firmware failed\n"); return ret; } ret = cypress_get_table_locations(rdev); if (ret) { DRM_ERROR("cypress_get_table_locations failed\n"); return ret; } ret = btc_init_smc_table(rdev, boot_ps); if (ret) return ret; if (eg_pi->dynamic_ac_timing) { ret = cypress_populate_mc_reg_table(rdev, boot_ps); if (ret) { DRM_ERROR("cypress_populate_mc_reg_table failed\n"); return ret; } } cypress_program_response_times(rdev); r7xx_start_smc(rdev); ret = cypress_notify_smc_display_change(rdev, false); if (ret) { DRM_ERROR("cypress_notify_smc_display_change failed\n"); return ret; } cypress_enable_sclk_control(rdev, true); if (eg_pi->memory_transition) cypress_enable_mclk_control(rdev, true); cypress_start_dpm(rdev); if (pi->gfx_clock_gating) btc_cg_clock_gating_enable(rdev, true); if (pi->mg_clock_gating) btc_mg_clock_gating_enable(rdev, true); if (eg_pi->ls_clock_gating) btc_ls_clock_gating_enable(rdev, true); if (rdev->irq.installed && r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { PPSMC_Result result; ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); if (ret) return ret; rdev->irq.dpm_thermal = true; radeon_irq_set(rdev); result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); if (result != PPSMC_Result_OK) DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); } rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); btc_init_stutter_mode(rdev); btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps); return 0; }; void btc_dpm_disable(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); if (!btc_dpm_enabled(rdev)) return; rv770_clear_vc(rdev); if (pi->thermal_protection) rv770_enable_thermal_protection(rdev, false); if (pi->dynamic_pcie_gen2) btc_enable_dynamic_pcie_gen2(rdev, false); if (rdev->irq.installed && r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { rdev->irq.dpm_thermal = false; radeon_irq_set(rdev); } if (pi->gfx_clock_gating) btc_cg_clock_gating_enable(rdev, false); if (pi->mg_clock_gating) btc_mg_clock_gating_enable(rdev, false); if (eg_pi->ls_clock_gating) btc_ls_clock_gating_enable(rdev, false); rv770_stop_dpm(rdev); btc_reset_to_default(rdev); btc_stop_smc(rdev); cypress_enable_spread_spectrum(rdev, false); btc_update_current_ps(rdev, rdev->pm.dpm.boot_ps); } void btc_dpm_setup_asic(struct radeon_device *rdev) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); rv770_get_memory_type(rdev); rv740_read_clock_registers(rdev); btc_read_arb_registers(rdev); rv770_read_voltage_smio_registers(rdev); if (eg_pi->pcie_performance_request) cypress_advertise_gen2_capability(rdev); rv770_get_pcie_gen2_status(rdev); rv770_enable_acpi_pm(rdev); } int btc_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; struct atom_clock_dividers dividers; int ret; eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL); if (eg_pi == NULL) return -ENOMEM; rdev->pm.dpm.priv = eg_pi; pi = &eg_pi->rv7xx; rv770_get_max_vddc(rdev); eg_pi->ulv.supported = false; pi->acpi_vddc = 0; eg_pi->acpi_vddci = 0; pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; ret = rv7xx_parse_power_table(rdev); if (ret) return ret; ret = r600_parse_extended_power_table(rdev); if (ret) return ret; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { r600_free_extended_power_table(rdev); return -ENOMEM; } rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 800; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 800; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 800; if (rdev->pm.dpm.voltage_response_time == 0) rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; if (rdev->pm.dpm.backbias_response_time == 0) rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 0, false, &dividers); if (ret) pi->ref_div = dividers.ref_div + 1; else pi->ref_div = R600_REFERENCEDIVIDER_DFLT; pi->mclk_strobe_mode_threshold = 40000; pi->mclk_edc_enable_threshold = 40000; eg_pi->mclk_edc_wr_enable_threshold = 40000; pi->rlp = RV770_RLP_DFLT; pi->rmp = RV770_RMP_DFLT; pi->lhp = RV770_LHP_DFLT; pi->lmp = RV770_LMP_DFLT; eg_pi->ats[0].rlp = RV770_RLP_DFLT; eg_pi->ats[0].rmp = RV770_RMP_DFLT; eg_pi->ats[0].lhp = RV770_LHP_DFLT; eg_pi->ats[0].lmp = RV770_LMP_DFLT; eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT; eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT; eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT; eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT; eg_pi->smu_uvd_hs = true; pi->voltage_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0); pi->mvdd_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; pi->vrc = CYPRESS_VRC_DFLT; pi->power_gating = false; pi->gfx_clock_gating = true; pi->mg_clock_gating = true; pi->mgcgtssm = true; eg_pi->ls_clock_gating = false; eg_pi->sclk_deep_sleep = false; pi->dynamic_pcie_gen2 = true; if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; pi->display_gap = true; if (rdev->flags & RADEON_IS_MOBILITY) pi->dcodt = true; else pi->dcodt = false; pi->ulps = true; eg_pi->dynamic_ac_timing = true; eg_pi->abm = true; eg_pi->mcls = true; eg_pi->light_sleep = true; eg_pi->memory_transition = true; #if defined(CONFIG_ACPI) eg_pi->pcie_performance_request = radeon_acpi_is_pcie_performance_request_supported(rdev); #else eg_pi->pcie_performance_request = false; #endif if (rdev->family == CHIP_BARTS) eg_pi->dll_default_on = true; else eg_pi->dll_default_on = false; eg_pi->sclk_deep_sleep = false; if (ASIC_IS_LOMBOK(rdev)) pi->mclk_stutter_mode_threshold = 30000; else pi->mclk_stutter_mode_threshold = 0; pi->sram_end = SMC_RAM_END; rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900; rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk); rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk; rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; if (rdev->family == CHIP_TURKS) rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; else rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000; /* make sure dc limits are valid */ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; return 0; } void btc_dpm_fini(struct radeon_device *rdev) { int i; for (i = 0; i < rdev->pm.dpm.num_ps; i++) { kfree(rdev->pm.dpm.ps[i].ps_priv); } kfree(rdev->pm.dpm.ps); kfree(rdev->pm.dpm.priv); kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); r600_free_extended_power_table(rdev); } u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps); if (low) return requested_state->low.sclk; else return requested_state->high.sclk; } u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_ps *requested_state = rv770_get_ps(&eg_pi->requested_rps); if (low) return requested_state->low.mclk; else return requested_state->high.mclk; }
gpl-2.0
jcadduono/android_kernel_lge_msm8996
drivers/gpu/drm/radeon/radeon_uvd.c
162
24295
/* * Copyright 2011 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Christian König <deathsimple@vodafone.de> */ #include <linux/firmware.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/drm.h> #include "radeon.h" #include "r600d.h" /* 1 second timeout */ #define UVD_IDLE_TIMEOUT_MS 1000 /* Firmware Names */ #define FIRMWARE_R600 "radeon/R600_uvd.bin" #define FIRMWARE_RS780 "radeon/RS780_uvd.bin" #define FIRMWARE_RV770 "radeon/RV770_uvd.bin" #define FIRMWARE_RV710 "radeon/RV710_uvd.bin" #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" #define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin" MODULE_FIRMWARE(FIRMWARE_R600); MODULE_FIRMWARE(FIRMWARE_RS780); MODULE_FIRMWARE(FIRMWARE_RV770); MODULE_FIRMWARE(FIRMWARE_RV710); MODULE_FIRMWARE(FIRMWARE_CYPRESS); MODULE_FIRMWARE(FIRMWARE_SUMO); MODULE_FIRMWARE(FIRMWARE_TAHITI); MODULE_FIRMWARE(FIRMWARE_BONAIRE); static void radeon_uvd_idle_work_handler(struct work_struct *work); int radeon_uvd_init(struct radeon_device *rdev) { unsigned long bo_size; const char *fw_name; int i, r; INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); switch (rdev->family) { case CHIP_RV610: case CHIP_RV630: case CHIP_RV670: case CHIP_RV620: case CHIP_RV635: fw_name = FIRMWARE_R600; break; case CHIP_RS780: case CHIP_RS880: fw_name = FIRMWARE_RS780; break; case CHIP_RV770: fw_name = FIRMWARE_RV770; break; case CHIP_RV710: case CHIP_RV730: case CHIP_RV740: fw_name = FIRMWARE_RV710; break; case CHIP_CYPRESS: case CHIP_HEMLOCK: case CHIP_JUNIPER: case CHIP_REDWOOD: case CHIP_CEDAR: fw_name = FIRMWARE_CYPRESS; break; case CHIP_SUMO: case CHIP_SUMO2: case CHIP_PALM: case CHIP_CAYMAN: case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: fw_name = FIRMWARE_SUMO; break; case CHIP_TAHITI: case CHIP_VERDE: case CHIP_PITCAIRN: case CHIP_ARUBA: case CHIP_OLAND: fw_name = FIRMWARE_TAHITI; break; case CHIP_BONAIRE: case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_MULLINS: fw_name = FIRMWARE_BONAIRE; break; default: return -EINVAL; } r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); if (r) { dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", fw_name); return r; } bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE + RADEON_GPU_PAGE_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &rdev->uvd.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); if (r) { radeon_bo_unref(&rdev->uvd.vcpu_bo); dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); return r; } r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, &rdev->uvd.gpu_addr); if (r) { radeon_bo_unreserve(rdev->uvd.vcpu_bo); radeon_bo_unref(&rdev->uvd.vcpu_bo); dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); return r; } r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); if (r) { dev_err(rdev->dev, "(%d) UVD map failed\n", r); return r; } radeon_bo_unreserve(rdev->uvd.vcpu_bo); for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); rdev->uvd.filp[i] = NULL; rdev->uvd.img_size[i] = 0; } return 0; } void radeon_uvd_fini(struct radeon_device *rdev) { int r; if (rdev->uvd.vcpu_bo == NULL) return; r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); radeon_bo_unreserve(rdev->uvd.vcpu_bo); } radeon_bo_unref(&rdev->uvd.vcpu_bo); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); release_firmware(rdev->uvd_fw); } int radeon_uvd_suspend(struct radeon_device *rdev) { unsigned size; void *ptr; int i; if (rdev->uvd.vcpu_bo == NULL) return 0; for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) if (atomic_read(&rdev->uvd.handles[i])) break; if (i == RADEON_MAX_UVD_HANDLES) return 0; size = radeon_bo_size(rdev->uvd.vcpu_bo); size -= rdev->uvd_fw->size; ptr = rdev->uvd.cpu_addr; ptr += rdev->uvd_fw->size; rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); memcpy(rdev->uvd.saved_bo, ptr, size); return 0; } int radeon_uvd_resume(struct radeon_device *rdev) { unsigned size; void *ptr; if (rdev->uvd.vcpu_bo == NULL) return -EINVAL; memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); size = radeon_bo_size(rdev->uvd.vcpu_bo); size -= rdev->uvd_fw->size; ptr = rdev->uvd.cpu_addr; ptr += rdev->uvd_fw->size; if (rdev->uvd.saved_bo != NULL) { memcpy(ptr, rdev->uvd.saved_bo, size); kfree(rdev->uvd.saved_bo); rdev->uvd.saved_bo = NULL; } else memset(ptr, 0, size); return 0; } void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, uint32_t allowed_domains) { int i; for (i = 0; i < rbo->placement.num_placement; ++i) { rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; } /* If it must be in VRAM it must be in the first segment as well */ if (allowed_domains == RADEON_GEM_DOMAIN_VRAM) return; /* abort if we already have more than one placement */ if (rbo->placement.num_placement > 1) return; /* add another 256MB segment */ rbo->placements[1] = rbo->placements[0]; rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; rbo->placement.num_placement++; rbo->placement.num_busy_placement++; } void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) { int i, r; for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { uint32_t handle = atomic_read(&rdev->uvd.handles[i]); if (handle != 0 && rdev->uvd.filp[i] == filp) { struct radeon_fence *fence; radeon_uvd_note_usage(rdev); r = radeon_uvd_get_destroy_msg(rdev, R600_RING_TYPE_UVD_INDEX, handle, &fence); if (r) { DRM_ERROR("Error destroying UVD (%d)!\n", r); continue; } radeon_fence_wait(fence, false); radeon_fence_unref(&fence); rdev->uvd.filp[i] = NULL; atomic_set(&rdev->uvd.handles[i], 0); } } } static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) { unsigned stream_type = msg[4]; unsigned width = msg[6]; unsigned height = msg[7]; unsigned dpb_size = msg[9]; unsigned pitch = msg[28]; unsigned width_in_mb = width / 16; unsigned height_in_mb = ALIGN(height / 16, 2); unsigned image_size, tmp, min_dpb_size; image_size = width * height; image_size += image_size / 2; image_size = ALIGN(image_size, 1024); switch (stream_type) { case 0: /* H264 */ /* reference picture buffer */ min_dpb_size = image_size * 17; /* macroblock context buffer */ min_dpb_size += width_in_mb * height_in_mb * 17 * 192; /* IT surface buffer */ min_dpb_size += width_in_mb * height_in_mb * 32; break; case 1: /* VC1 */ /* reference picture buffer */ min_dpb_size = image_size * 3; /* CONTEXT_BUFFER */ min_dpb_size += width_in_mb * height_in_mb * 128; /* IT surface buffer */ min_dpb_size += width_in_mb * 64; /* DB surface buffer */ min_dpb_size += width_in_mb * 128; /* BP */ tmp = max(width_in_mb, height_in_mb); min_dpb_size += ALIGN(tmp * 7 * 16, 64); break; case 3: /* MPEG2 */ /* reference picture buffer */ min_dpb_size = image_size * 3; break; case 4: /* MPEG4 */ /* reference picture buffer */ min_dpb_size = image_size * 3; /* CM */ min_dpb_size += width_in_mb * height_in_mb * 64; /* IT surface buffer */ min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); break; default: DRM_ERROR("UVD codec not handled %d!\n", stream_type); return -EINVAL; } if (width > pitch) { DRM_ERROR("Invalid UVD decoding target pitch!\n"); return -EINVAL; } if (dpb_size < min_dpb_size) { DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", dpb_size, min_dpb_size); return -EINVAL; } buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; return 0; } static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, unsigned stream_type) { switch (stream_type) { case 0: /* H264 */ case 1: /* VC1 */ /* always supported */ return 0; case 3: /* MPEG2 */ case 4: /* MPEG4 */ /* only since UVD 3 */ if (p->rdev->family >= CHIP_PALM) return 0; /* fall through */ default: DRM_ERROR("UVD codec not supported by hardware %d!\n", stream_type); return -EINVAL; } } static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, unsigned offset, unsigned buf_sizes[]) { int32_t *msg, msg_type, handle; unsigned img_size = 0; struct fence *f; void *ptr; int i, r; if (offset & 0x3F) { DRM_ERROR("UVD messages must be 64 byte aligned!\n"); return -EINVAL; } f = reservation_object_get_excl(bo->tbo.resv); if (f) { r = radeon_fence_wait((struct radeon_fence *)f, false); if (r) { DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); return r; } } r = radeon_bo_kmap(bo, &ptr); if (r) { DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); return r; } msg = ptr + offset; msg_type = msg[1]; handle = msg[2]; if (handle == 0) { DRM_ERROR("Invalid UVD handle!\n"); return -EINVAL; } switch (msg_type) { case 0: /* it's a create msg, calc image size (width * height) */ img_size = msg[7] * msg[8]; r = radeon_uvd_validate_codec(p, msg[4]); radeon_bo_kunmap(bo); if (r) return r; /* try to alloc a new handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { DRM_ERROR("Handle 0x%x already in use!\n", handle); return -EINVAL; } if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { p->rdev->uvd.filp[i] = p->filp; p->rdev->uvd.img_size[i] = img_size; return 0; } } DRM_ERROR("No more free UVD handles!\n"); return -EINVAL; case 1: /* it's a decode msg, validate codec and calc buffer sizes */ r = radeon_uvd_validate_codec(p, msg[4]); if (!r) r = radeon_uvd_cs_msg_decode(msg, buf_sizes); radeon_bo_kunmap(bo); if (r) return r; /* validate the handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { if (p->rdev->uvd.filp[i] != p->filp) { DRM_ERROR("UVD handle collision detected!\n"); return -EINVAL; } return 0; } } DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); return -ENOENT; case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); radeon_bo_kunmap(bo); return 0; default: DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); return -EINVAL; } BUG(); return -EINVAL; } static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, int data0, int data1, unsigned buf_sizes[], bool *has_msg_cmd) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_reloc *reloc; unsigned idx, cmd, offset; uint64_t start, end; int r; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; offset = radeon_get_ib_value(p, data0); idx = radeon_get_ib_value(p, data1); if (idx >= relocs_chunk->length_dw) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, relocs_chunk->length_dw); return -EINVAL; } reloc = p->relocs_ptr[(idx / 4)]; start = reloc->gpu_offset; end = start + radeon_bo_size(reloc->robj); start += offset; p->ib.ptr[data0] = start & 0xFFFFFFFF; p->ib.ptr[data1] = start >> 32; cmd = radeon_get_ib_value(p, p->idx) >> 1; if (cmd < 0x4) { if (end <= start) { DRM_ERROR("invalid reloc offset %X!\n", offset); return -EINVAL; } if ((end - start) < buf_sizes[cmd]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); return -EINVAL; } } else if (cmd != 0x100) { DRM_ERROR("invalid UVD command %X!\n", cmd); return -EINVAL; } if ((start >> 28) != ((end - 1) >> 28)) { DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", start, end); return -EINVAL; } /* TODO: is this still necessary on NI+ ? */ if ((cmd == 0 || cmd == 0x3) && (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); return -EINVAL; } if (cmd == 0) { if (*has_msg_cmd) { DRM_ERROR("More than one message in a UVD-IB!\n"); return -EINVAL; } *has_msg_cmd = true; r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); if (r) return r; } else if (!*has_msg_cmd) { DRM_ERROR("Message needed before other commands are send!\n"); return -EINVAL; } return 0; } static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, int *data0, int *data1, unsigned buf_sizes[], bool *has_msg_cmd) { int i, r; p->idx++; for (i = 0; i <= pkt->count; ++i) { switch (pkt->reg + i*4) { case UVD_GPCOM_VCPU_DATA0: *data0 = p->idx; break; case UVD_GPCOM_VCPU_DATA1: *data1 = p->idx; break; case UVD_GPCOM_VCPU_CMD: r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes, has_msg_cmd); if (r) return r; break; case UVD_ENGINE_CNTL: break; default: DRM_ERROR("Invalid reg 0x%X!\n", pkt->reg + i*4); return -EINVAL; } p->idx++; } return 0; } int radeon_uvd_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; int r, data0 = 0, data1 = 0; /* does the IB has a msg command */ bool has_msg_cmd = false; /* minimum buffer sizes */ unsigned buf_sizes[] = { [0x00000000] = 2048, [0x00000001] = 32 * 1024 * 1024, [0x00000002] = 2048 * 1152 * 3, [0x00000003] = 2048, }; if (p->chunks[p->chunk_ib_idx].length_dw % 16) { DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", p->chunks[p->chunk_ib_idx].length_dw); return -EINVAL; } if (p->chunk_relocs_idx == -1) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } do { r = radeon_cs_packet_parse(p, &pkt, p->idx); if (r) return r; switch (pkt.type) { case RADEON_PACKET_TYPE0: r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, buf_sizes, &has_msg_cmd); if (r) return r; break; case RADEON_PACKET_TYPE2: p->idx += pkt.count + 2; break; default: DRM_ERROR("Unknown packet type %d !\n", pkt.type); return -EINVAL; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); if (!has_msg_cmd) { DRM_ERROR("UVD-IBs need a msg command!\n"); return -EINVAL; } return 0; } static int radeon_uvd_send_msg(struct radeon_device *rdev, int ring, uint64_t addr, struct radeon_fence **fence) { struct radeon_ib ib; int i, r; r = radeon_ib_get(rdev, ring, &ib, NULL, 64); if (r) return r; ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); ib.ptr[1] = addr; ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); ib.ptr[3] = addr >> 32; ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); ib.ptr[5] = 0; for (i = 6; i < 16; ++i) ib.ptr[i] = PACKET2(0); ib.length_dw = 16; r = radeon_ib_schedule(rdev, &ib, NULL, false); if (fence) *fence = radeon_fence_ref(ib.fence); radeon_ib_free(rdev, &ib); return r; } /* multiple fence commands without any stream commands in between can crash the vcpu so just try to emmit a dummy create/destroy msg to avoid this */ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence) { /* we use the last page of the vcpu bo for the UVD message */ uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - RADEON_GPU_PAGE_SIZE; uint32_t *msg = rdev->uvd.cpu_addr + offs; uint64_t addr = rdev->uvd.gpu_addr + offs; int r, i; r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); if (r) return r; /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); msg[2] = cpu_to_le32(handle); msg[3] = cpu_to_le32(0x00000000); msg[4] = cpu_to_le32(0x00000000); msg[5] = cpu_to_le32(0x00000000); msg[6] = cpu_to_le32(0x00000000); msg[7] = cpu_to_le32(0x00000780); msg[8] = cpu_to_le32(0x00000440); msg[9] = cpu_to_le32(0x00000000); msg[10] = cpu_to_le32(0x01b37000); for (i = 11; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); r = radeon_uvd_send_msg(rdev, ring, addr, fence); radeon_bo_unreserve(rdev->uvd.vcpu_bo); return r; } int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence) { /* we use the last page of the vcpu bo for the UVD message */ uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - RADEON_GPU_PAGE_SIZE; uint32_t *msg = rdev->uvd.cpu_addr + offs; uint64_t addr = rdev->uvd.gpu_addr + offs; int r, i; r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); if (r) return r; /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); msg[2] = cpu_to_le32(handle); msg[3] = cpu_to_le32(0x00000000); for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); r = radeon_uvd_send_msg(rdev, ring, addr, fence); radeon_bo_unreserve(rdev->uvd.vcpu_bo); return r; } /** * radeon_uvd_count_handles - count number of open streams * * @rdev: radeon_device pointer * @sd: number of SD streams * @hd: number of HD streams * * Count the number of open SD/HD streams as a hint for power mangement */ static void radeon_uvd_count_handles(struct radeon_device *rdev, unsigned *sd, unsigned *hd) { unsigned i; *sd = 0; *hd = 0; for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { if (!atomic_read(&rdev->uvd.handles[i])) continue; if (rdev->uvd.img_size[i] >= 720*576) ++(*hd); else ++(*sd); } } static void radeon_uvd_idle_work_handler(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, uvd.idle_work.work); if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd, &rdev->pm.dpm.hd); radeon_dpm_enable_uvd(rdev, false); } else { radeon_set_uvd_clocks(rdev, 0, 0); } } else { schedule_delayed_work(&rdev->uvd.idle_work, msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); } } void radeon_uvd_note_usage(struct radeon_device *rdev) { bool streams_changed = false; bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { unsigned hd = 0, sd = 0; radeon_uvd_count_handles(rdev, &sd, &hd); if ((rdev->pm.dpm.sd != sd) || (rdev->pm.dpm.hd != hd)) { rdev->pm.dpm.sd = sd; rdev->pm.dpm.hd = hd; /* disable this for now */ /*streams_changed = true;*/ } } if (set_clocks || streams_changed) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { radeon_dpm_enable_uvd(rdev, true); } else { radeon_set_uvd_clocks(rdev, 53300, 40000); } } } static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, unsigned target_freq, unsigned pd_min, unsigned pd_even) { unsigned post_div = vco_freq / target_freq; /* adjust to post divider minimum value */ if (post_div < pd_min) post_div = pd_min; /* we alway need a frequency less than or equal the target */ if ((vco_freq / post_div) > target_freq) post_div += 1; /* post dividers above a certain value must be even */ if (post_div > pd_even && post_div % 2) post_div += 1; return post_div; } /** * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers * * @rdev: radeon_device pointer * @vclk: wanted VCLK * @dclk: wanted DCLK * @vco_min: minimum VCO frequency * @vco_max: maximum VCO frequency * @fb_factor: factor to multiply vco freq with * @fb_mask: limit and bitmask for feedback divider * @pd_min: post divider minimum * @pd_max: post divider maximum * @pd_even: post divider must be even above this value * @optimal_fb_div: resulting feedback divider * @optimal_vclk_div: resulting vclk post divider * @optimal_dclk_div: resulting dclk post divider * * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). * Returns zero on success -EINVAL on error. */ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, unsigned vclk, unsigned dclk, unsigned vco_min, unsigned vco_max, unsigned fb_factor, unsigned fb_mask, unsigned pd_min, unsigned pd_max, unsigned pd_even, unsigned *optimal_fb_div, unsigned *optimal_vclk_div, unsigned *optimal_dclk_div) { unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; /* start off with something large */ unsigned optimal_score = ~0; /* loop through vco from low to high */ vco_min = max(max(vco_min, vclk), dclk); for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { uint64_t fb_div = (uint64_t)vco_freq * fb_factor; unsigned vclk_div, dclk_div, score; do_div(fb_div, ref_freq); /* fb div out of range ? */ if (fb_div > fb_mask) break; /* it can oly get worse */ fb_div &= fb_mask; /* calc vclk divider with current vco freq */ vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, pd_min, pd_even); if (vclk_div > pd_max) break; /* vco is too big, it has to stop */ /* calc dclk divider with current vco freq */ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, pd_min, pd_even); if (vclk_div > pd_max) break; /* vco is too big, it has to stop */ /* calc score with current vco freq */ score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); /* determine if this vco setting is better than current optimal settings */ if (score < optimal_score) { *optimal_fb_div = fb_div; *optimal_vclk_div = vclk_div; *optimal_dclk_div = dclk_div; optimal_score = score; if (optimal_score == 0) break; /* it can't get better than this */ } } /* did we found a valid setup ? */ if (optimal_score == ~0) return -EINVAL; return 0; } int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, unsigned cg_upll_func_cntl) { unsigned i; /* make sure UPLL_CTLREQ is deasserted */ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); mdelay(10); /* assert UPLL_CTLREQ */ WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); /* wait for CTLACK and CTLACK2 to get asserted */ for (i = 0; i < 100; ++i) { uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; if ((RREG32(cg_upll_func_cntl) & mask) == mask) break; mdelay(10); } /* deassert UPLL_CTLREQ */ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); if (i == 100) { DRM_ERROR("Timeout setting UVD clocks!\n"); return -ETIMEDOUT; } return 0; }
gpl-2.0
gromikakao/e980-zeKrnl
drivers/video/msm/mipi_truly_tft540960_1_e_cmd_qhd_pt.c
418
2850
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_truly_tft540960_1_e.h" static struct msm_panel_info pinfo; static struct mipi_dsi_phy_ctrl dsi_cmd_mode_phy_db = { /* DSI Bit Clock at 500 MHz, 2 lane, RGB888 */ /* regulator */ {0x03, 0x01, 0x01, 0x00}, /* timing */ {0xb9, 0x8e, 0x1f, 0x00, 0x98, 0x9c, 0x22, 0x90, 0x18, 0x03, 0x04}, /* phy ctrl */ {0x7f, 0x00, 0x00, 0x00}, /* strength */ {0xbb, 0x02, 0x06, 0x00}, /* pll control */ {0x01, 0xec, 0x31, 0xd2, 0x00, 0x40, 0x37, 0x62, 0x01, 0x0f, 0x07, 0x05, 0x14, 0x03, 0x0, 0x0, 0x0, 0x20, 0x0, 0x02, 0x0}, }; static int mipi_cmd_truly_qhd_pt_init(void) { int ret; if (msm_fb_detect_client("mipi_cmd_truly_qhd")) return 0; pinfo.xres = 540; pinfo.yres = 960; pinfo.type = MIPI_CMD_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 100; pinfo.lcdc.h_front_porch = 100; pinfo.lcdc.h_pulse_width = 8; pinfo.lcdc.v_back_porch = 20; pinfo.lcdc.v_front_porch = 20; pinfo.lcdc.v_pulse_width = 1; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 255; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.clk_rate = 499000000; pinfo.lcd.vsync_enable = TRUE; pinfo.lcd.hw_vsync_mode = TRUE; pinfo.lcd.refx100 = 6100; /* adjust refx100 to prevent tearing */ pinfo.mipi.mode = DSI_CMD_MODE; pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.data_lane1 = TRUE; pinfo.mipi.t_clk_post = 0x20; pinfo.mipi.t_clk_pre = 0x2F; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW_TE; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.te_sel = 1; /* TE from vsync gpio */ pinfo.mipi.interleave_max = 1; pinfo.mipi.insert_dcs_cmd = TRUE; pinfo.mipi.wr_mem_continue = 0x3c; pinfo.mipi.wr_mem_start = 0x2c; pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db; pinfo.mipi.tx_eot_append = 0x01; pinfo.mipi.rx_eot_ignore = 0x0; pinfo.mipi.dlane_swap = 0x01; ret = mipi_truly_tft540960_1_e_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_WVGA_PT); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; } module_init(mipi_cmd_truly_qhd_pt_init);
gpl-2.0
nics21212/android_kernel_samsung_msm8660-common
arch/arm/mach-omap2/common-board-devices.c
418
3738
/* * common-board-devices.c * * Copyright (C) 2011 CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <plat/mcspi.h> #include <plat/nand.h> #include "common-board-devices.h" #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) static struct omap2_mcspi_device_config ads7846_mcspi_config = { .turbo_mode = 0, .single_channel = 1, /* 0: slave, 1: master */ }; static struct ads7846_platform_data ads7846_config = { .x_max = 0x0fff, .y_max = 0x0fff, .x_plate_ohms = 180, .pressure_max = 255, .debounce_max = 10, .debounce_tol = 3, .debounce_rep = 1, .gpio_pendown = -EINVAL, .keep_vref_on = 1, }; static struct spi_board_info ads7846_spi_board_info __initdata = { .modalias = "ads7846", .bus_num = -EINVAL, .chip_select = 0, .max_speed_hz = 1500000, .controller_data = &ads7846_mcspi_config, .irq = -EINVAL, .platform_data = &ads7846_config, }; void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct ads7846_platform_data *board_pdata) { struct spi_board_info *spi_bi = &ads7846_spi_board_info; int err; if (board_pdata && board_pdata->get_pendown_state) { err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); if (err) { pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); return; } gpio_export(gpio_pendown, 0); if (gpio_debounce) gpio_set_debounce(gpio_pendown, gpio_debounce); } ads7846_config.gpio_pendown = gpio_pendown; spi_bi->bus_num = bus_num; spi_bi->irq = OMAP_GPIO_IRQ(gpio_pendown); if (board_pdata) spi_bi->platform_data = board_pdata; spi_register_board_info(&ads7846_spi_board_info, 1); } #else void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct ads7846_platform_data *board_pdata) { } #endif #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) static struct omap_nand_platform_data nand_data = { .dma_channel = -1, /* disable DMA in OMAP NAND driver */ }; void __init omap_nand_flash_init(int options, struct mtd_partition *parts, int nr_parts) { u8 cs = 0; u8 nandcs = GPMC_CS_NUM + 1; /* find out the chip-select on which NAND exists */ while (cs < GPMC_CS_NUM) { u32 ret = 0; ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); if ((ret & 0xC00) == 0x800) { printk(KERN_INFO "Found NAND on CS%d\n", cs); if (nandcs > GPMC_CS_NUM) nandcs = cs; } cs++; } if (nandcs > GPMC_CS_NUM) { printk(KERN_INFO "NAND: Unable to find configuration " "in GPMC\n "); return; } if (nandcs < GPMC_CS_NUM) { nand_data.cs = nandcs; nand_data.parts = parts; nand_data.nr_parts = nr_parts; nand_data.options = options; printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); if (gpmc_nand_init(&nand_data) < 0) printk(KERN_ERR "Unable to register NAND device\n"); } } #else void __init omap_nand_flash_init(int options, struct mtd_partition *parts, int nr_parts) { } #endif
gpl-2.0
DreamCore/DreamCore
dep/g3dlite/source/GImage_bmp.cpp
674
20514
/** @file GImage_bmp.cpp @author Morgan McGuire, http://graphics.cs.williams.edu @created 2002-05-27 @edited 2006-05-10 */ #include "G3D/platform.h" #include "G3D/GImage.h" #include "G3D/BinaryInput.h" #include "G3D/BinaryOutput.h" #include "G3D/Log.h" namespace G3D { #ifndef G3D_WIN32 /** This is used by the Windows bitmap I/O. */ static const int BI_RGB = 0; #endif void GImage::encodeBMP( BinaryOutput& out) const { debugAssert(m_channels == 1 || m_channels == 3); out.setEndian(G3D_LITTLE_ENDIAN); uint8 red; uint8 green; uint8 blue; int pixelBufferSize = m_width * m_height * 3; int fileHeaderSize = 14; int infoHeaderSize = 40; int BMScanWidth; int BMPadding; // First write the BITMAPFILEHEADER // // WORD bfType; // DWORD bfSize; // WORD bfReserved1; // WORD bfReserved2; // DWORD bfOffBits; // Type out.writeUInt8('B'); out.writeUInt8('M'); // File size out.writeUInt32(fileHeaderSize + infoHeaderSize + pixelBufferSize); // Two reserved fields set to zero out.writeUInt16(0); out.writeUInt16(0); // The offset, in bytes, from the BITMAPFILEHEADER structure // to the bitmap bits. out.writeUInt32(infoHeaderSize + fileHeaderSize); // Now the BITMAPINFOHEADER // // DWORD biSize; // LONG biWidth; // LONG biHeight; // WORD biPlanes; // WORD biBitCount // DWORD biCompression; // DWORD biSizeImage; // LONG biXPelsPerMeter; // LONG biYPelsPerMeter; // DWORD biClrUsed; // DWORD biClrImportant; // Size of the info header out.writeUInt32(infoHeaderSize); // Width and height of the image out.writeUInt32(m_width); out.writeUInt32(m_height); // Planes ("must be set to 1") out.writeUInt16(1); // BitCount and CompressionType out.writeUInt16(24); out.writeUInt32(BI_RGB); // Image size ("may be zero for BI_RGB bitmaps") out.writeUInt32(0); // biXPelsPerMeter out.writeUInt32(0); // biYPelsPerMeter out.writeUInt32(0); // biClrUsed out.writeUInt32(0); // biClrImportant out.writeUInt32(0); BMScanWidth = m_width * 3; if (BMScanWidth & 3) { BMPadding = 4 - (BMScanWidth & 3); } else { BMPadding = 0; } int hStart = m_height - 1; int hEnd = -1; int hDir = -1; int dest; // Write the pixel data for (int h = hStart; h != hEnd; h += hDir) { dest = m_channels * h * m_width; for (int w = 0; w < m_width; ++w) { if (m_channels == 3) { red = m_byte[dest]; green = m_byte[dest + 1]; blue = m_byte[dest + 2]; } else { red = m_byte[dest]; green = m_byte[dest]; blue = m_byte[dest]; } out.writeUInt8(blue); out.writeUInt8(green); out.writeUInt8(red); dest += m_channels; } if (BMPadding > 0) { out.skip(BMPadding); } } } void GImage::decodeBMP( BinaryInput& input) { // The BMP decoding uses these flags. static const uint16 PICTURE_NONE = 0x0000; static const uint16 PICTURE_BITMAP = 0x1000; // Compression Flags static const uint16 PICTURE_UNCOMPRESSED = 0x0100; static const uint16 PICTURE_MONOCHROME = 0x0001; static const uint16 PICTURE_4BIT = 0x0002; static const uint16 PICTURE_8BIT = 0x0004; static const uint16 PICTURE_16BIT = 0x0008; static const uint16 PICTURE_24BIT = 0x0010; static const uint16 PICTURE_32BIT = 0x0020; (void)PICTURE_16BIT; (void)PICTURE_32BIT; // This is a simple BMP loader that can handle uncompressed BMP files. // Verify this is a BMP file by looking for the BM tag. input.reset(); std::string tag = input.readString(2); if (tag != "BM") { throw Error("Not a BMP file", input.getFilename()); } m_channels = 3; // Skip to the BITMAPINFOHEADER's width and height input.skip(16); m_width = input.readUInt32(); m_height = input.readUInt32(); // Skip to the bit count and compression type input.skip(2); uint16 bitCount = input.readUInt16(); uint32 compressionType = input.readUInt32(); uint8 red; uint8 green; uint8 blue; uint8 blank; // Only uncompressed bitmaps are supported by this code if ((int32)compressionType != BI_RGB) { throw Error("BMP images must be uncompressed", input.getFilename()); } uint8* palette = NULL; // Create the palette if needed if (bitCount <= 8) { // Skip to the palette color count in the header input.skip(12); int numColors = input.readUInt32(); palette = (uint8*)System::malloc(numColors * 3); debugAssert(palette); // Skip past the end of the header to the palette info input.skip(4); int c; for(c = 0; c < numColors * 3; c += 3) { // Palette information in bitmaps is stored in BGR_ format. // That means it's blue-green-red-blank, for each entry. blue = input.readUInt8(); green = input.readUInt8(); red = input.readUInt8(); blank = input.readUInt8(); palette[c] = red; palette[c + 1] = green; palette[c + 2] = blue; } } int hStart = 0; int hEnd = 0; int hDir = 0; if (m_height < 0) { m_height = -m_height; hStart = 0; hEnd = m_height; hDir = 1; } else { //height = height; hStart = m_height - 1; hEnd = -1; hDir = -1; } m_byte = (uint8*)m_memMan->alloc(m_width * m_height * 3); debugAssert(m_byte); int BMScanWidth; int BMPadding; uint8 BMGroup; uint8 BMPixel8; int currPixel; int dest; int flags = PICTURE_NONE; if (bitCount == 1) { // Note that this file is not necessarily grayscale, since it's possible // the palette is blue-and-white, or whatever. But of course most image // programs only write 1-bit images if they're black-and-white. flags = PICTURE_BITMAP | PICTURE_UNCOMPRESSED | PICTURE_MONOCHROME; // For bitmaps, each scanline is dword-aligned. BMScanWidth = (m_width + 7) >> 3; if (BMScanWidth & 3) { BMScanWidth += 4 - (BMScanWidth & 3); } // Powers of 2 int pow2[8] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80}; for (int h = hStart; h != hEnd; h += hDir) { currPixel = 0; dest = 3 * h * m_width; for (int w = 0; w < BMScanWidth; ++w) { BMGroup = input.readUInt8(); // Now we read the pixels. Usually there are eight pixels per byte, // since each pixel is represented by one bit, but if the width // is not a multiple of eight, the last byte will have some bits // set, with the others just being extra. Plus there's the // dword-alignment padding. So we keep checking to see if we've // already read "width" number of pixels. for (int i = 7; i >= 0; --i) { if (currPixel < m_width) { int src = 3 * ((BMGroup & pow2[i]) >> i); m_byte[dest] = palette[src]; m_byte[dest + 1] = palette[src + 1]; m_byte[dest + 2] = palette[src + 2]; ++currPixel; dest += 3; } } } } } else if (bitCount == 4) { flags = PICTURE_BITMAP | PICTURE_UNCOMPRESSED | PICTURE_4BIT; // For bitmaps, each scanline is dword-aligned. int BMScanWidth = (m_width + 1) >> 1; if (BMScanWidth & 3) { BMScanWidth += 4 - (BMScanWidth & 3); } for (int h = hStart; h != hEnd; h += hDir) { currPixel = 0; dest = 3 * h * m_width; for (int w = 0; w < BMScanWidth; w++) { BMGroup = input.readUInt8(); int src[2]; src[0] = 3 * ((BMGroup & 0xF0) >> 4); src[1] = 3 * (BMGroup & 0x0F); // Now we read the pixels. Usually there are two pixels per byte, // since each pixel is represented by four bits, but if the width // is not a multiple of two, the last byte will have only four bits // set, with the others just being extra. Plus there's the // dword-alignment padding. So we keep checking to see if we've // already read "Width" number of pixels. for (int i = 0; i < 2; ++i) { if (currPixel < m_width) { int tsrc = src[i]; m_byte[dest] = palette[tsrc]; m_byte[dest + 1] = palette[tsrc + 1]; m_byte[dest + 2] = palette[tsrc + 2]; ++currPixel; dest += 3; } } } } } else if (bitCount == 8) { flags = PICTURE_BITMAP | PICTURE_UNCOMPRESSED | PICTURE_8BIT; // For bitmaps, each scanline is dword-aligned. BMScanWidth = m_width; if (BMScanWidth & 3) { BMScanWidth += 4 - (BMScanWidth & 3); } for (int h = hStart; h != hEnd; h += hDir) { currPixel = 0; for (int w = 0; w < BMScanWidth; ++w) { BMPixel8 = input.readUInt8(); if (currPixel < m_width) { dest = 3 * ((h * m_width) + currPixel); int src = 3 * BMPixel8; m_byte[dest] = palette[src]; m_byte[dest + 1] = palette[src + 1]; m_byte[dest + 2] = palette[src + 2]; ++currPixel; } } } } else if (bitCount == 16) { m_memMan->free(m_byte); m_byte = NULL; System::free(palette); palette = NULL; throw Error("16-bit bitmaps not supported", input.getFilename()); } else if (bitCount == 24) { input.skip(20); flags = PICTURE_BITMAP | PICTURE_UNCOMPRESSED | PICTURE_24BIT; // For bitmaps, each scanline is dword-aligned. BMScanWidth = m_width * 3; if (BMScanWidth & 3) { BMPadding = 4 - (BMScanWidth & 3); } else { BMPadding = 0; } for (int h = hStart; h != hEnd; h += hDir) { dest = 3 * h * m_width; for (int w = 0; w < m_width; ++w) { blue = input.readUInt8(); green = input.readUInt8(); red = input.readUInt8(); m_byte[dest] = red; m_byte[dest + 1] = green; m_byte[dest + 2] = blue; dest += 3; } if (BMPadding) { input.skip(2); } } } else if (bitCount == 32) { m_memMan->free(m_byte); m_byte = NULL; System::free(palette); palette = NULL; throw Error("32 bit bitmaps not supported", input.getFilename()); } else { // We support all possible bit depths, so if the // code gets here, it's not even a real bitmap. m_memMan->free(m_byte); m_byte = NULL; throw Error("Not a bitmap!", input.getFilename()); } System::free(palette); palette = NULL; } void GImage::decodeICO( BinaryInput& input) { // Header uint16 r = input.readUInt16(); debugAssert(r == 0); r = input.readUInt16(); debugAssert(r == 1); // Read the number of icons, although we'll only load the // first one. int count = input.readUInt16(); m_channels = 4; debugAssert(count > 0); const uint8* headerBuffer = input.getCArray() + input.getPosition(); int maxWidth = 0, maxHeight = 0; int maxHeaderNum = 0; for (int currentHeader = 0; currentHeader < count; ++currentHeader) { const uint8* curHeaderBuffer = headerBuffer + (currentHeader * 16); int tmpWidth = curHeaderBuffer[0]; int tmpHeight = curHeaderBuffer[1]; // Just in case there is a non-square icon, checking area if ((tmpWidth * tmpHeight) > (maxWidth * maxHeight)) { maxWidth = tmpWidth; maxHeight = tmpHeight; maxHeaderNum = currentHeader; } } input.skip(maxHeaderNum * 16); m_width = input.readUInt8(); m_height = input.readUInt8(); int numColors = input.readUInt8(); m_byte = (uint8*)m_memMan->alloc(m_width * m_height * m_channels); debugAssert(m_byte); // Bit mask for packed bits int mask = 0; int bitsPerPixel = 8; switch (numColors) { case 2: mask = 0x01; bitsPerPixel = 1; break; case 16: mask = 0x0F; bitsPerPixel = 4; break; case 0: numColors = 256; mask = 0xFF; bitsPerPixel = 8; break; default: throw Error("Unsupported ICO color count.", input.getFilename()); } input.skip(5); // Skip 'size' unused input.skip(4); int offset = input.readUInt32(); // Skip over any other icon descriptions input.setPosition(offset); // Skip over bitmap header; it is redundant input.skip(40); Array<Color4uint8> palette; palette.resize(numColors, true); for (int c = 0; c < numColors; ++c) { palette[c].b = input.readUInt8(); palette[c].g = input.readUInt8(); palette[c].r = input.readUInt8(); palette[c].a = input.readUInt8(); } // The actual image and mask follow // The XOR Bitmap is stored as 1-bit, 4-bit or 8-bit uncompressed Bitmap // using the same encoding as BMP files. The AND Bitmap is stored in as // 1-bit uncompressed Bitmap. // // Pixels are stored bottom-up, left-to-right. Pixel lines are padded // with zeros to end on a 32bit (4byte) boundary. Every line will have the // same number of bytes. Color indices are zero based, meaning a pixel color // of 0 represents the first color table entry, a pixel color of 255 (if there // are that many) represents the 256th entry. /* int bitsPerRow = width * bitsPerPixel; int bytesPerRow = iCeil((double)bitsPerRow / 8); // Rows are padded to 32-bit boundaries bytesPerRow += bytesPerRow % 4; // Read the XOR values into the color channel for (int y = height - 1; y >= 0; --y) { int x = 0; // Read the row for (int i = 0; i < bytesPerRow; ++i) { uint8 byte = input.readUInt8(); for (int j = 0; (j < 8) && (x < width); ++x, j += bitsPerPixel) { int bit = ((byte << j) >> (8 - bitsPerPixel)) & mask; pixel4(x, y) = colorTable[bit]; } } } */ int hStart = 0; int hEnd = 0; int hDir = 0; if (m_height < 0) { m_height = -m_height; hStart = 0; hEnd = m_height; hDir = 1; } else { //height = height; hStart = m_height - 1; hEnd = -1; hDir = -1; } int BMScanWidth; uint8 BMGroup; uint8 BMPixel8; int currPixel; int dest; if (bitsPerPixel == 1) { // Note that this file is not necessarily grayscale, since it's possible // the palette is blue-and-white, or whatever. But of course most image // programs only write 1-bit images if they're black-and-white. // For bitmaps, each scanline is dword-aligned. BMScanWidth = (m_width + 7) >> 3; if (BMScanWidth & 3) { BMScanWidth += 4 - (BMScanWidth & 3); } // Powers of 2 int pow2[8] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80}; for (int h = hStart; h != hEnd; h += hDir) { currPixel = 0; dest = 3 * h * m_width; for (int w = 0; w < BMScanWidth; ++w) { BMGroup = input.readUInt8(); // Now we read the pixels. Usually there are eight pixels per byte, // since each pixel is represented by one bit, but if the width // is not a multiple of eight, the last byte will have some bits // set, with the others just being extra. Plus there's the // dword-alignment padding. So we keep checking to see if we've // already read "width" number of pixels. for (int i = 7; i >= 0; --i) { if (currPixel < m_width) { int src = ((BMGroup & pow2[i]) >> i); m_byte[dest] = palette[src].r; m_byte[dest + 1] = palette[src].g; m_byte[dest + 2] = palette[src].b; ++currPixel; dest += 4; } } } } } else if (bitsPerPixel == 4) { // For bitmaps, each scanline is dword-aligned. int BMScanWidth = (m_width + 1) >> 1; if (BMScanWidth & 3) { BMScanWidth += 4 - (BMScanWidth & 3); } for (int h = hStart; h != hEnd; h += hDir) { currPixel = 0; dest = 4 * h * m_width; for (int w = 0; w < BMScanWidth; w++) { BMGroup = input.readUInt8(); int src[2]; src[0] = ((BMGroup & 0xF0) >> 4); src[1] = (BMGroup & 0x0F); // Now we read the pixels. Usually there are two pixels per byte, // since each pixel is represented by four bits, but if the width // is not a multiple of two, the last byte will have only four bits // set, with the others just being extra. Plus there's the // dword-alignment padding. So we keep checking to see if we've // already read "Width" number of pixels. for (int i = 0; i < 2; ++i) { if (currPixel < m_width) { int tsrc = src[i]; m_byte[dest] = palette[tsrc].r; m_byte[dest + 1] = palette[tsrc].g; m_byte[dest + 2] = palette[tsrc].b; ++currPixel; dest += 4; } } } } } else if (bitsPerPixel == 8) { // For bitmaps, each scanline is dword-aligned. BMScanWidth = m_width; if (BMScanWidth & 3) { BMScanWidth += 4 - (BMScanWidth & 3); } for (int h = hStart; h != hEnd; h += hDir) { currPixel = 0; for (int w = 0; w < BMScanWidth; ++w) { BMPixel8 = input.readUInt8(); if (currPixel < m_width) { dest = 4 * ((h * m_width) + currPixel); int src = BMPixel8; m_byte[dest] = palette[src].r; m_byte[dest + 1] = palette[src].g; m_byte[dest + 2] = palette[src].b; ++currPixel; } } } } // Read the mask into the alpha channel int bitsPerRow = m_width; int bytesPerRow = iCeil((double)bitsPerRow / 8); // For bitmaps, each scanline is dword-aligned. //BMScanWidth = (width + 1) >> 1; if (bytesPerRow & 3) { bytesPerRow += 4 - (bytesPerRow & 3); } for (int y = m_height - 1; y >= 0; --y) { int x = 0; // Read the row for (int i = 0; i < bytesPerRow; ++i) { uint8 byte = input.readUInt8(); for (int j = 0; (j < 8) && (x < m_width); ++x, ++j) { int bit = (byte >> (7 - j)) & 0x01; pixel4(x, y).a = (1 - bit) * 0xFF; } } } } }
gpl-2.0
liusen09003110-163-com/linux
net/netfilter/nft_ct.c
674
10268
/* * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_labels.h> struct nft_ct { enum nft_ct_keys key:8; enum ip_conntrack_dir dir:8; union { enum nft_registers dreg:8; enum nft_registers sreg:8; }; }; static void nft_ct_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_ct *priv = nft_expr_priv(expr); u32 *dest = &regs->data[priv->dreg]; enum ip_conntrack_info ctinfo; const struct nf_conn *ct; const struct nf_conn_help *help; const struct nf_conntrack_tuple *tuple; const struct nf_conntrack_helper *helper; long diff; unsigned int state; ct = nf_ct_get(pkt->skb, &ctinfo); switch (priv->key) { case NFT_CT_STATE: if (ct == NULL) state = NF_CT_STATE_INVALID_BIT; else if (nf_ct_is_untracked(ct)) state = NF_CT_STATE_UNTRACKED_BIT; else state = NF_CT_STATE_BIT(ctinfo); *dest = state; return; default: break; } if (ct == NULL) goto err; switch (priv->key) { case NFT_CT_DIRECTION: *dest = CTINFO2DIR(ctinfo); return; case NFT_CT_STATUS: *dest = ct->status; return; #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: *dest = ct->mark; return; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK case NFT_CT_SECMARK: *dest = ct->secmark; return; #endif case NFT_CT_EXPIRATION: diff = (long)jiffies - (long)ct->timeout.expires; if (diff < 0) diff = 0; *dest = jiffies_to_msecs(diff); return; case NFT_CT_HELPER: if (ct->master == NULL) goto err; help = nfct_help(ct->master); if (help == NULL) goto err; helper = rcu_dereference(help->helper); if (helper == NULL) goto err; strncpy((char *)dest, helper->name, NF_CT_HELPER_NAME_LEN); return; #ifdef CONFIG_NF_CONNTRACK_LABELS case NFT_CT_LABELS: { struct nf_conn_labels *labels = nf_ct_labels_find(ct); unsigned int size; if (!labels) { memset(dest, 0, NF_CT_LABELS_MAX_SIZE); return; } size = labels->words * sizeof(long); memcpy(dest, labels->bits, size); if (size < NF_CT_LABELS_MAX_SIZE) memset(((char *) dest) + size, 0, NF_CT_LABELS_MAX_SIZE - size); return; } #endif default: break; } tuple = &ct->tuplehash[priv->dir].tuple; switch (priv->key) { case NFT_CT_L3PROTOCOL: *dest = nf_ct_l3num(ct); return; case NFT_CT_SRC: memcpy(dest, tuple->src.u3.all, nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); return; case NFT_CT_DST: memcpy(dest, tuple->dst.u3.all, nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); return; case NFT_CT_PROTOCOL: *dest = nf_ct_protonum(ct); return; case NFT_CT_PROTO_SRC: *dest = (__force __u16)tuple->src.u.all; return; case NFT_CT_PROTO_DST: *dest = (__force __u16)tuple->dst.u.all; return; default: break; } return; err: regs->verdict.code = NFT_BREAK; } static void nft_ct_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_ct *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; #ifdef CONFIG_NF_CONNTRACK_MARK u32 value = regs->data[priv->sreg]; #endif enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return; switch (priv->key) { #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: if (ct->mark != value) { ct->mark = value; nf_conntrack_event_cache(IPCT_MARK, ct); } break; #endif default: break; } } static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = { [NFTA_CT_DREG] = { .type = NLA_U32 }, [NFTA_CT_KEY] = { .type = NLA_U32 }, [NFTA_CT_DIRECTION] = { .type = NLA_U8 }, [NFTA_CT_SREG] = { .type = NLA_U32 }, }; static int nft_ct_l3proto_try_module_get(uint8_t family) { int err; if (family == NFPROTO_INET) { err = nf_ct_l3proto_try_module_get(NFPROTO_IPV4); if (err < 0) goto err1; err = nf_ct_l3proto_try_module_get(NFPROTO_IPV6); if (err < 0) goto err2; } else { err = nf_ct_l3proto_try_module_get(family); if (err < 0) goto err1; } return 0; err2: nf_ct_l3proto_module_put(NFPROTO_IPV4); err1: return err; } static void nft_ct_l3proto_module_put(uint8_t family) { if (family == NFPROTO_INET) { nf_ct_l3proto_module_put(NFPROTO_IPV4); nf_ct_l3proto_module_put(NFPROTO_IPV6); } else nf_ct_l3proto_module_put(family); } static int nft_ct_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_ct *priv = nft_expr_priv(expr); unsigned int len; int err; priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); switch (priv->key) { case NFT_CT_DIRECTION: if (tb[NFTA_CT_DIRECTION] != NULL) return -EINVAL; len = sizeof(u8); break; case NFT_CT_STATE: case NFT_CT_STATUS: #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK case NFT_CT_SECMARK: #endif case NFT_CT_EXPIRATION: if (tb[NFTA_CT_DIRECTION] != NULL) return -EINVAL; len = sizeof(u32); break; #ifdef CONFIG_NF_CONNTRACK_LABELS case NFT_CT_LABELS: if (tb[NFTA_CT_DIRECTION] != NULL) return -EINVAL; len = NF_CT_LABELS_MAX_SIZE; break; #endif case NFT_CT_HELPER: if (tb[NFTA_CT_DIRECTION] != NULL) return -EINVAL; len = NF_CT_HELPER_NAME_LEN; break; case NFT_CT_L3PROTOCOL: case NFT_CT_PROTOCOL: if (tb[NFTA_CT_DIRECTION] == NULL) return -EINVAL; len = sizeof(u8); break; case NFT_CT_SRC: case NFT_CT_DST: if (tb[NFTA_CT_DIRECTION] == NULL) return -EINVAL; switch (ctx->afi->family) { case NFPROTO_IPV4: len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip); break; case NFPROTO_IPV6: case NFPROTO_INET: len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip6); break; default: return -EAFNOSUPPORT; } break; case NFT_CT_PROTO_SRC: case NFT_CT_PROTO_DST: if (tb[NFTA_CT_DIRECTION] == NULL) return -EINVAL; len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u.all); break; default: return -EOPNOTSUPP; } if (tb[NFTA_CT_DIRECTION] != NULL) { priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]); switch (priv->dir) { case IP_CT_DIR_ORIGINAL: case IP_CT_DIR_REPLY: break; default: return -EINVAL; } } priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]); err = nft_validate_register_store(ctx, priv->dreg, NULL, NFT_DATA_VALUE, len); if (err < 0) return err; err = nft_ct_l3proto_try_module_get(ctx->afi->family); if (err < 0) return err; return 0; } static int nft_ct_set_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_ct *priv = nft_expr_priv(expr); unsigned int len; int err; priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); switch (priv->key) { #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: len = FIELD_SIZEOF(struct nf_conn, mark); break; #endif default: return -EOPNOTSUPP; } priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]); err = nft_validate_register_load(priv->sreg, len); if (err < 0) return err; err = nft_ct_l3proto_try_module_get(ctx->afi->family); if (err < 0) return err; return 0; } static void nft_ct_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { nft_ct_l3proto_module_put(ctx->afi->family); } static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_ct *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_CT_DREG, priv->dreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) goto nla_put_failure; switch (priv->key) { case NFT_CT_PROTOCOL: case NFT_CT_SRC: case NFT_CT_DST: case NFT_CT_PROTO_SRC: case NFT_CT_PROTO_DST: if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) goto nla_put_failure; default: break; } return 0; nla_put_failure: return -1; } static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_ct *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_CT_SREG, priv->sreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static struct nft_expr_type nft_ct_type; static const struct nft_expr_ops nft_ct_get_ops = { .type = &nft_ct_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), .eval = nft_ct_get_eval, .init = nft_ct_get_init, .destroy = nft_ct_destroy, .dump = nft_ct_get_dump, }; static const struct nft_expr_ops nft_ct_set_ops = { .type = &nft_ct_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), .eval = nft_ct_set_eval, .init = nft_ct_set_init, .destroy = nft_ct_destroy, .dump = nft_ct_set_dump, }; static const struct nft_expr_ops * nft_ct_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { if (tb[NFTA_CT_KEY] == NULL) return ERR_PTR(-EINVAL); if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG]) return ERR_PTR(-EINVAL); if (tb[NFTA_CT_DREG]) return &nft_ct_get_ops; if (tb[NFTA_CT_SREG]) return &nft_ct_set_ops; return ERR_PTR(-EINVAL); } static struct nft_expr_type nft_ct_type __read_mostly = { .name = "ct", .select_ops = &nft_ct_select_ops, .policy = nft_ct_policy, .maxattr = NFTA_CT_MAX, .owner = THIS_MODULE, }; static int __init nft_ct_module_init(void) { return nft_register_expr(&nft_ct_type); } static void __exit nft_ct_module_exit(void) { nft_unregister_expr(&nft_ct_type); } module_init(nft_ct_module_init); module_exit(nft_ct_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS_NFT_EXPR("ct");
gpl-2.0
CyanogenMod/android_kernel_cyanogen_msm8916
drivers/acpi/numa.c
2210
8840
/* * acpi_numa.c - ACPI NUMA support * * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/acpi.h> #include <linux/numa.h> #include <acpi/acpi_bus.h> #define PREFIX "ACPI: " #define ACPI_NUMA 0x80000000 #define _COMPONENT ACPI_NUMA ACPI_MODULE_NAME("numa"); static nodemask_t nodes_found_map = NODE_MASK_NONE; /* maps to convert between proximity domain and logical node ID */ static int pxm_to_node_map[MAX_PXM_DOMAINS] = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; unsigned char acpi_srat_revision __initdata; int pxm_to_node(int pxm) { if (pxm < 0) return NUMA_NO_NODE; return pxm_to_node_map[pxm]; } int node_to_pxm(int node) { if (node < 0) return PXM_INVAL; return node_to_pxm_map[node]; } void __acpi_map_pxm_to_node(int pxm, int node) { if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) pxm_to_node_map[pxm] = node; if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) node_to_pxm_map[node] = pxm; } int acpi_map_pxm_to_node(int pxm) { int node = pxm_to_node_map[pxm]; if (node < 0) { if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) return NUMA_NO_NODE; node = first_unset_node(nodes_found_map); __acpi_map_pxm_to_node(pxm, node); node_set(node, nodes_found_map); } return node; } static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); if (!header) return; switch (header->type) { case ACPI_SRAT_TYPE_CPU_AFFINITY: #ifdef ACPI_DEBUG_OUTPUT { struct acpi_srat_cpu_affinity *p = (struct acpi_srat_cpu_affinity *)header; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", p->apic_id, p->local_sapic_eid, p->proximity_domain_lo, (p->flags & ACPI_SRAT_CPU_ENABLED)? "enabled" : "disabled")); } #endif /* ACPI_DEBUG_OUTPUT */ break; case ACPI_SRAT_TYPE_MEMORY_AFFINITY: #ifdef ACPI_DEBUG_OUTPUT { struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", (unsigned long)p->base_address, (unsigned long)p->length, p->proximity_domain, (p->flags & ACPI_SRAT_MEM_ENABLED)? "enabled" : "disabled", (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)? " hot-pluggable" : "", (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)? " non-volatile" : "")); } #endif /* ACPI_DEBUG_OUTPUT */ break; case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: #ifdef ACPI_DEBUG_OUTPUT { struct acpi_srat_x2apic_cpu_affinity *p = (struct acpi_srat_x2apic_cpu_affinity *)header; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SRAT Processor (x2apicid[0x%08x]) in" " proximity domain %d %s\n", p->apic_id, p->proximity_domain, (p->flags & ACPI_SRAT_CPU_ENABLED) ? "enabled" : "disabled")); } #endif /* ACPI_DEBUG_OUTPUT */ break; default: printk(KERN_WARNING PREFIX "Found unsupported SRAT entry (type = 0x%x)\n", header->type); break; } } /* * A lot of BIOS fill in 10 (= no distance) everywhere. This messes * up the NUMA heuristics which wants the local node to have a smaller * distance than the others. * Do some quick checks here and only use the SLIT if it passes. */ static __init int slit_valid(struct acpi_table_slit *slit) { int i, j; int d = slit->locality_count; for (i = 0; i < d; i++) { for (j = 0; j < d; j++) { u8 val = slit->entry[d*i + j]; if (i == j) { if (val != LOCAL_DISTANCE) return 0; } else if (val <= LOCAL_DISTANCE) return 0; } } return 1; } static int __init acpi_parse_slit(struct acpi_table_header *table) { struct acpi_table_slit *slit; if (!table) return -EINVAL; slit = (struct acpi_table_slit *)table; if (!slit_valid(slit)) { printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); return -EINVAL; } acpi_numa_slit_init(slit); return 0; } void __init __attribute__ ((weak)) acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) { printk(KERN_WARNING PREFIX "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id); return; } static int __init acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_srat_x2apic_cpu_affinity *processor_affinity; processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header; if (!processor_affinity) return -EINVAL; acpi_table_print_srat_entry(header); /* let architecture-dependent part to do it */ acpi_numa_x2apic_affinity_init(processor_affinity); return 0; } static int __init acpi_parse_processor_affinity(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_srat_cpu_affinity *processor_affinity; processor_affinity = (struct acpi_srat_cpu_affinity *)header; if (!processor_affinity) return -EINVAL; acpi_table_print_srat_entry(header); /* let architecture-dependent part to do it */ acpi_numa_processor_affinity_init(processor_affinity); return 0; } static int __initdata parsed_numa_memblks; static int __init acpi_parse_memory_affinity(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_srat_mem_affinity *memory_affinity; memory_affinity = (struct acpi_srat_mem_affinity *)header; if (!memory_affinity) return -EINVAL; acpi_table_print_srat_entry(header); /* let architecture-dependent part to do it */ if (!acpi_numa_memory_affinity_init(memory_affinity)) parsed_numa_memblks++; return 0; } static int __init acpi_parse_srat(struct acpi_table_header *table) { struct acpi_table_srat *srat; if (!table) return -EINVAL; srat = (struct acpi_table_srat *)table; acpi_srat_revision = srat->header.revision; /* Real work done in acpi_table_parse_srat below. */ return 0; } static int __init acpi_table_parse_srat(enum acpi_srat_type id, acpi_tbl_entry_handler handler, unsigned int max_entries) { return acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), id, handler, max_entries); } int __init acpi_numa_init(void) { int cnt = 0; /* * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= * SRAT cpu entries could have different order with that in MADT. * So go over all cpu entries in SRAT to get apicid to node mapping. */ /* SRAT: Static Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, acpi_parse_x2apic_affinity, 0); acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, acpi_parse_processor_affinity, 0); cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); } /* SLIT: System Locality Information Table */ acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); acpi_numa_arch_fixup(); if (cnt < 0) return cnt; else if (!parsed_numa_memblks) return -ENOENT; return 0; } int acpi_get_pxm(acpi_handle h) { unsigned long long pxm; acpi_status status; acpi_handle handle; acpi_handle phandle = h; do { handle = phandle; status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); if (ACPI_SUCCESS(status)) return pxm; status = acpi_get_parent(handle, &phandle); } while (ACPI_SUCCESS(status)); return -1; } int acpi_get_node(acpi_handle *handle) { int pxm, node = -1; pxm = acpi_get_pxm(handle); if (pxm >= 0 && pxm < MAX_PXM_DOMAINS) node = acpi_map_pxm_to_node(pxm); return node; } EXPORT_SYMBOL(acpi_get_node);
gpl-2.0
schqiushui/kernel_lollipop_sense_a52
sound/isa/azt2320.c
2722
9842
/* card-azt2320.c - driver for Aztech Systems AZT2320 based soundcards. Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This driver should provide support for most Aztech AZT2320 based cards. Several AZT2316 chips are also supported/tested, but autoprobe doesn't work: all module option have to be set. No docs available for us at Aztech headquarters !!! Unbelievable ... No other help obtained. Thanks to Rainer Wiesner <rainer.wiesner@01019freenet.de> for the WSS activation method (full-duplex audio!). */ #include <asm/io.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define PFX "azt2320: " MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("Aztech Systems AZT2320"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Aztech Systems,PRO16V}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3300}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3000}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for azt2320 based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for azt2320 based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable azt2320 based soundcard."); struct snd_card_azt2320 { int dev_no; struct pnp_dev *dev; struct pnp_dev *devmpu; struct snd_wss *chip; }; static struct pnp_card_device_id snd_azt2320_pnpids[] = { /* PRO16V */ { .id = "AZT1008", .devs = { { "AZT1008" }, { "AZT2001" }, } }, /* Aztech Sound Galaxy 16 */ { .id = "AZT2320", .devs = { { "AZT0001" }, { "AZT0002" }, } }, /* Packard Bell Sound III 336 AM/SP */ { .id = "AZT3000", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* AT3300 */ { .id = "AZT3002", .devs = { { "AZT1004" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3005", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3011", .devs = { { "AZT1003" }, { "AZT2001" }, } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_azt2320_pnpids); #define DRIVER_NAME "snd-card-azt2320" static int snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->dev == NULL) return -ENODEV; acard->devmpu = pnp_request_card_device(card, id->devs[1].id, NULL); pdev = acard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n"); return err; } port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); wss_port[dev] = pnp_port_start(pdev, 2); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); pdev = acard->devmpu; if (pdev != NULL) { err = pnp_activate_dev(pdev); if (err < 0) goto __mpu_error; mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } else { __mpu_error: if (pdev) { pnp_release_card_device(pdev); snd_printk(KERN_ERR PFX "MPU401 pnp configure failure, skipping\n"); } acard->devmpu = NULL; mpu_port[dev] = -1; } return 0; } /* same of snd_sbdsp_command by Jaroslav Kysela */ static int snd_card_azt2320_command(unsigned long port, unsigned char val) { int i; unsigned long limit; limit = jiffies + HZ / 10; for (i = 50000; i && time_after(limit, jiffies); i--) if (!(inb(port + 0x0c) & 0x80)) { outb(val, port + 0x0c); return 0; } return -EBUSY; } static int snd_card_azt2320_enable_wss(unsigned long port) { int error; if ((error = snd_card_azt2320_command(port, 0x09))) return error; if ((error = snd_card_azt2320_command(port, 0x00))) return error; mdelay(5); return 0; } static int snd_card_azt2320_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_card *card; struct snd_card_azt2320 *acard; struct snd_wss *chip; struct snd_opl3 *opl3; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_azt2320), &card); if (error < 0) return error; acard = card->private_data; if ((error = snd_card_azt2320_pnp(dev, acard, pcard, pid))) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); if ((error = snd_card_azt2320_enable_wss(port[dev]))) { snd_card_free(card); return error; } error = snd_wss_create(card, wss_port[dev], -1, irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT, 0, &chip); if (error < 0) { snd_card_free(card); return error; } strcpy(card->driver, "AZT2320"); strcpy(card->shortname, "Aztech AZT2320"); sprintf(card->longname, "%s, WSS at 0x%lx, irq %i, dma %i&%i", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); error = snd_wss_pcm(chip, 0, NULL); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_mixer(chip); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_timer(chip, 0, NULL); if (error < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (snd_mpu401_uart_new(card, 0, MPU401_HW_AZT2320, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n", fm_port[dev], fm_port[dev] + 2); } else { if ((error = snd_opl3_timer_new(opl3, 1, 2)) < 0) { snd_card_free(card); return error; } if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int azt2320_devices; static int snd_azt2320_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_azt2320_probe(dev, card, id); if (res < 0) return res; dev++; azt2320_devices++; return 0; } return -ENODEV; } static void snd_azt2320_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_azt2320_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->suspend(chip); return 0; } static int snd_azt2320_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; chip->resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_driver azt2320_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "azt2320", .id_table = snd_azt2320_pnpids, .probe = snd_azt2320_pnp_detect, .remove = snd_azt2320_pnp_remove, #ifdef CONFIG_PM .suspend = snd_azt2320_pnp_suspend, .resume = snd_azt2320_pnp_resume, #endif }; static int __init alsa_card_azt2320_init(void) { int err; err = pnp_register_card_driver(&azt2320_pnpc_driver); if (err) return err; if (!azt2320_devices) { pnp_unregister_card_driver(&azt2320_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no AZT2320 based soundcards found\n"); #endif return -ENODEV; } return 0; } static void __exit alsa_card_azt2320_exit(void) { pnp_unregister_card_driver(&azt2320_pnpc_driver); } module_init(alsa_card_azt2320_init) module_exit(alsa_card_azt2320_exit)
gpl-2.0
gp-b2g/gp-peak-kernel
arch/x86/kernel/apic/apic_noop.c
2978
4659
/* * NOOP APIC driver. * * Does almost nothing and should be substituted by a real apic driver via * probe routine. * * Though in case if apic is disabled (for some reason) we try * to not uglify the caller's code and allow to call (some) apic routines * like self-ipi, etc... */ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/errno.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/apicdef.h> #include <asm/apic.h> #include <asm/setup.h> #include <linux/smp.h> #include <asm/ipi.h> #include <linux/interrupt.h> #include <asm/acpi.h> #include <asm/e820.h> static void noop_init_apic_ldr(void) { } static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { } static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { } static void noop_send_IPI_allbutself(int vector) { } static void noop_send_IPI_all(int vector) { } static void noop_send_IPI_self(int vector) { } static void noop_apic_wait_icr_idle(void) { } static void noop_apic_icr_write(u32 low, u32 id) { } static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip) { return -1; } static u32 noop_safe_apic_wait_icr_idle(void) { return 0; } static u64 noop_apic_icr_read(void) { return 0; } static int noop_phys_pkg_id(int cpuid_apic, int index_msb) { return 0; } static unsigned int noop_get_apic_id(unsigned long x) { return 0; } static int noop_probe(void) { /* * NOOP apic should not ever be * enabled via probe routine */ return 0; } static int noop_apic_id_registered(void) { /* * if we would be really "pedantic" * we should pass read_apic_id() here * but since NOOP suppose APIC ID = 0 * lets save a few cycles */ return physid_isset(0, phys_cpu_present_map); } static const struct cpumask *noop_target_cpus(void) { /* only BSP here */ return cpumask_of(0); } static unsigned long noop_check_apicid_used(physid_mask_t *map, int apicid) { return physid_isset(apicid, *map); } static unsigned long noop_check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) { if (cpu != 0) pr_warning("APIC: Vector allocated for non-BSP cpu\n"); cpumask_clear(retmask); cpumask_set_cpu(cpu, retmask); } static u32 noop_apic_read(u32 reg) { WARN_ON_ONCE((cpu_has_apic && !disable_apic)); return 0; } static void noop_apic_write(u32 reg, u32 v) { WARN_ON_ONCE(cpu_has_apic && !disable_apic); } struct apic apic_noop = { .name = "noop", .probe = noop_probe, .acpi_madt_oem_check = NULL, .apic_id_registered = noop_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, .target_cpus = noop_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = noop_check_apicid_used, .check_apicid_present = noop_check_apicid_present, .vector_allocation_domain = noop_vector_allocation_domain, .init_apic_ldr = noop_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = NULL, .multi_timer_check = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = noop_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = noop_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, .cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = noop_send_IPI_mask, .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, .send_IPI_allbutself = noop_send_IPI_allbutself, .send_IPI_all = noop_send_IPI_all, .send_IPI_self = noop_send_IPI_self, .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, /* should be safe */ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = NULL, .read = noop_apic_read, .write = noop_apic_write, .icr_read = noop_apic_icr_read, .icr_write = noop_apic_icr_write, .wait_icr_idle = noop_apic_wait_icr_idle, .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, #ifdef CONFIG_X86_32 .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, #endif };
gpl-2.0
silence-star/android_kernel_nubia_NX503A
drivers/net/ethernet/sun/sungem.c
4770
77950
/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ * sungem.c: Sun GEM ethernet driver. * * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) * * Support for Apple GMAC and assorted PHYs, WOL, Power Management * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. * * NAPI and NETPOLL support * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/bitops.h> #include <linux/mm.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <asm/irq.h> #ifdef CONFIG_SPARC #include <asm/idprom.h> #include <asm/prom.h> #endif #ifdef CONFIG_PPC_PMAC #include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #endif #include <linux/sungem_phy.h> #include "sungem.h" /* Stripping FCS is causing problems, disabled for now */ #undef STRIP_FCS #define DEFAULT_MSG (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ SUPPORTED_Pause | SUPPORTED_Autoneg) #define DRV_NAME "sungem" #define DRV_VERSION "1.0" #define DRV_AUTHOR "David S. Miller <davem@redhat.com>" static char version[] __devinitdata = DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); MODULE_LICENSE("GPL"); #define GEM_MODULE_NAME "gem" static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* These models only differ from the original GEM in * that their tx/rx fifos are of a different size and * they only support 10/100 speeds. -DaveM * * Apple's GMAC does support gigabit on machines with * the BCM54xx PHYs. -BenH */ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, {0, } }; MODULE_DEVICE_TABLE(pci, gem_pci_tbl); static u16 __phy_read(struct gem *gp, int phy_addr, int reg) { u32 cmd; int limit = 10000; cmd = (1 << 30); cmd |= (2 << 28); cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; cmd |= (reg << 18) & MIF_FRAME_REGAD; cmd |= (MIF_FRAME_TAMSB); writel(cmd, gp->regs + MIF_FRAME); while (--limit) { cmd = readl(gp->regs + MIF_FRAME); if (cmd & MIF_FRAME_TALSB) break; udelay(10); } if (!limit) cmd = 0xffff; return cmd & MIF_FRAME_DATA; } static inline int _phy_read(struct net_device *dev, int mii_id, int reg) { struct gem *gp = netdev_priv(dev); return __phy_read(gp, mii_id, reg); } static inline u16 phy_read(struct gem *gp, int reg) { return __phy_read(gp, gp->mii_phy_addr, reg); } static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) { u32 cmd; int limit = 10000; cmd = (1 << 30); cmd |= (1 << 28); cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; cmd |= (reg << 18) & MIF_FRAME_REGAD; cmd |= (MIF_FRAME_TAMSB); cmd |= (val & MIF_FRAME_DATA); writel(cmd, gp->regs + MIF_FRAME); while (limit--) { cmd = readl(gp->regs + MIF_FRAME); if (cmd & MIF_FRAME_TALSB) break; udelay(10); } } static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) { struct gem *gp = netdev_priv(dev); __phy_write(gp, mii_id, reg, val & 0xffff); } static inline void phy_write(struct gem *gp, int reg, u16 val) { __phy_write(gp, gp->mii_phy_addr, reg, val); } static inline void gem_enable_ints(struct gem *gp) { /* Enable all interrupts but TXDONE */ writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); } static inline void gem_disable_ints(struct gem *gp) { /* Disable all interrupts, including TXDONE */ writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); (void)readl(gp->regs + GREG_IMASK); /* write posting */ } static void gem_get_cell(struct gem *gp) { BUG_ON(gp->cell_enabled < 0); gp->cell_enabled++; #ifdef CONFIG_PPC_PMAC if (gp->cell_enabled == 1) { mb(); pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); udelay(10); } #endif /* CONFIG_PPC_PMAC */ } /* Turn off the chip's clock */ static void gem_put_cell(struct gem *gp) { BUG_ON(gp->cell_enabled <= 0); gp->cell_enabled--; #ifdef CONFIG_PPC_PMAC if (gp->cell_enabled == 0) { mb(); pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); udelay(10); } #endif /* CONFIG_PPC_PMAC */ } static inline void gem_netif_stop(struct gem *gp) { gp->dev->trans_start = jiffies; /* prevent tx timeout */ napi_disable(&gp->napi); netif_tx_disable(gp->dev); } static inline void gem_netif_start(struct gem *gp) { /* NOTE: unconditional netif_wake_queue is only * appropriate so long as all callers are assured to * have free tx slots. */ netif_wake_queue(gp->dev); napi_enable(&gp->napi); } static void gem_schedule_reset(struct gem *gp) { gp->reset_task_pending = 1; schedule_work(&gp->reset_task); } static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) { if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); } static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 pcs_istat = readl(gp->regs + PCS_ISTAT); u32 pcs_miistat; if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", gp->dev->name, pcs_istat); if (!(pcs_istat & PCS_ISTAT_LSC)) { netdev_err(dev, "PCS irq but no link status change???\n"); return 0; } /* The link status bit latches on zero, so you must * read it twice in such a case to see a transition * to the link being up. */ pcs_miistat = readl(gp->regs + PCS_MIISTAT); if (!(pcs_miistat & PCS_MIISTAT_LS)) pcs_miistat |= (readl(gp->regs + PCS_MIISTAT) & PCS_MIISTAT_LS); if (pcs_miistat & PCS_MIISTAT_ANC) { /* The remote-fault indication is only valid * when autoneg has completed. */ if (pcs_miistat & PCS_MIISTAT_RF) netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); else netdev_info(dev, "PCS AutoNEG complete\n"); } if (pcs_miistat & PCS_MIISTAT_LS) { netdev_info(dev, "PCS link is now up\n"); netif_carrier_on(gp->dev); } else { netdev_info(dev, "PCS link is now down\n"); netif_carrier_off(gp->dev); /* If this happens and the link timer is not running, * reset so we re-negotiate. */ if (!timer_pending(&gp->link_timer)) return 1; } return 0; } static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", gp->dev->name, txmac_stat); /* Defer timer expiration is quite normal, * don't even log the event. */ if ((txmac_stat & MAC_TXSTAT_DTE) && !(txmac_stat & ~MAC_TXSTAT_DTE)) return 0; if (txmac_stat & MAC_TXSTAT_URUN) { netdev_err(dev, "TX MAC xmit underrun\n"); dev->stats.tx_fifo_errors++; } if (txmac_stat & MAC_TXSTAT_MPE) { netdev_err(dev, "TX MAC max packet size error\n"); dev->stats.tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TXSTAT_NCE) dev->stats.collisions += 0x10000; if (txmac_stat & MAC_TXSTAT_ECE) { dev->stats.tx_aborted_errors += 0x10000; dev->stats.collisions += 0x10000; } if (txmac_stat & MAC_TXSTAT_LCE) { dev->stats.tx_aborted_errors += 0x10000; dev->stats.collisions += 0x10000; } /* We do not keep track of MAC_TXSTAT_FCE and * MAC_TXSTAT_PCE events. */ return 0; } /* When we get a RX fifo overflow, the RX unit in GEM is probably hung * so we do the following. * * If any part of the reset goes wrong, we return 1 and that causes the * whole chip to be reset. */ static int gem_rxmac_reset(struct gem *gp) { struct net_device *dev = gp->dev; int limit, i; u64 desc_dma; u32 val; /* First, reset & disable MAC RX. */ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); return 1; } writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); return 1; } /* Second, disable RX DMA. */ writel(0, gp->regs + RXDMA_CFG); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); return 1; } udelay(5000); /* Execute RX reset command. */ writel(gp->swrst_base | GREG_SWRST_RXRST, gp->regs + GREG_SWRST); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); return 1; } /* Refresh the RX ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd = &gp->init_block->rxd[i]; if (gp->rx_skbs[i] == NULL) { netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); return 1; } rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); } gp->rx_new = gp->rx_old = 0; /* Now we must reprogram the rest of RX unit. */ desc_dma = (u64) gp->gblock_dvma; desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | ((8 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); else writel(((5 & RXDMA_BLANK_IPKTS) | ((4 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); writel(val, gp->regs + RXDMA_PTHRESH); val = readl(gp->regs + RXDMA_CFG); writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); return 0; } static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); int ret = 0; if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", gp->dev->name, rxmac_stat); if (rxmac_stat & MAC_RXSTAT_OFLW) { u32 smac = readl(gp->regs + MAC_SMACHINE); netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); dev->stats.rx_over_errors++; dev->stats.rx_fifo_errors++; ret = gem_rxmac_reset(gp); } if (rxmac_stat & MAC_RXSTAT_ACE) dev->stats.rx_frame_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_CCE) dev->stats.rx_crc_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_LCE) dev->stats.rx_length_errors += 0x10000; /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE * events. */ return ret; } static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 mac_cstat = readl(gp->regs + MAC_CSTAT); if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", gp->dev->name, mac_cstat); /* This interrupt is just for pause frame and pause * tracking. It is useful for diagnostics and debug * but probably by default we will mask these events. */ if (mac_cstat & MAC_CSTAT_PS) gp->pause_entered++; if (mac_cstat & MAC_CSTAT_PRCV) gp->pause_last_time_recvd = (mac_cstat >> 16); return 0; } static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 mif_status = readl(gp->regs + MIF_STATUS); u32 reg_val, changed_bits; reg_val = (mif_status & MIF_STATUS_DATA) >> 16; changed_bits = (mif_status & MIF_STATUS_STAT); gem_handle_mif_event(gp, reg_val, changed_bits); return 0; } static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { netdev_err(dev, "PCI error [%04x]", pci_estat); if (pci_estat & GREG_PCIESTAT_BADACK) pr_cont(" <No ACK64# during ABS64 cycle>"); if (pci_estat & GREG_PCIESTAT_DTRTO) pr_cont(" <Delayed transaction timeout>"); if (pci_estat & GREG_PCIESTAT_OTHER) pr_cont(" <other>"); pr_cont("\n"); } else { pci_estat |= GREG_PCIESTAT_OTHER; netdev_err(dev, "PCI error\n"); } if (pci_estat & GREG_PCIESTAT_OTHER) { u16 pci_cfg_stat; /* Interrogate PCI config space for the * true cause. */ pci_read_config_word(gp->pdev, PCI_STATUS, &pci_cfg_stat); netdev_err(dev, "Read PCI cfg space status [%04x]\n", pci_cfg_stat); if (pci_cfg_stat & PCI_STATUS_PARITY) netdev_err(dev, "PCI parity error detected\n"); if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) netdev_err(dev, "PCI target abort\n"); if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) netdev_err(dev, "PCI master acks target abort\n"); if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) netdev_err(dev, "PCI master abort\n"); if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) netdev_err(dev, "PCI system error SERR#\n"); if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) netdev_err(dev, "PCI parity error\n"); /* Write the error bits back to clear them. */ pci_cfg_stat &= (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); pci_write_config_word(gp->pdev, PCI_STATUS, pci_cfg_stat); } /* For all PCI errors, we should reset the chip. */ return 1; } /* All non-normal interrupt conditions get serviced here. * Returns non-zero if we should just exit the interrupt * handler right now (ie. if we reset the card which invalidates * all of the other original irq status bits). */ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) { if (gem_status & GREG_STAT_RXNOBUF) { /* Frame arrived, no free RX buffers available. */ if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: no buffer for rx frame\n", gp->dev->name); dev->stats.rx_dropped++; } if (gem_status & GREG_STAT_RXTAGERR) { /* corrupt RX tag framing */ if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: corrupt rx tag framing\n", gp->dev->name); dev->stats.rx_errors++; return 1; } if (gem_status & GREG_STAT_PCS) { if (gem_pcs_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_TXMAC) { if (gem_txmac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_RXMAC) { if (gem_rxmac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_MAC) { if (gem_mac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_MIF) { if (gem_mif_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_PCIERR) { if (gem_pci_interrupt(dev, gp, gem_status)) return 1; } return 0; } static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) { int entry, limit; entry = gp->tx_old; limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); while (entry != limit) { struct sk_buff *skb; struct gem_txd *txd; dma_addr_t dma_addr; u32 dma_len; int frag; if (netif_msg_tx_done(gp)) printk(KERN_DEBUG "%s: tx done, slot %d\n", gp->dev->name, entry); skb = gp->tx_skbs[entry]; if (skb_shinfo(skb)->nr_frags) { int last = entry + skb_shinfo(skb)->nr_frags; int walk = entry; int incomplete = 0; last &= (TX_RING_SIZE - 1); for (;;) { walk = NEXT_TX(walk); if (walk == limit) incomplete = 1; if (walk == last) break; } if (incomplete) break; } gp->tx_skbs[entry] = NULL; dev->stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &gp->init_block->txd[entry]; dma_addr = le64_to_cpu(txd->buffer); dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); entry = NEXT_TX(entry); } dev->stats.tx_packets++; dev_kfree_skb(skb); } gp->tx_old = entry; /* Need to make the tx_old update visible to gem_start_xmit() * before checking for netif_queue_stopped(). Without the * memory barrier, there is a small possibility that gem_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); if (unlikely(netif_queue_stopped(dev) && TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); __netif_tx_lock(txq, smp_processor_id()); if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); __netif_tx_unlock(txq); } } static __inline__ void gem_post_rxds(struct gem *gp, int limit) { int cluster_start, curr, count, kick; cluster_start = curr = (gp->rx_new & ~(4 - 1)); count = 0; kick = -1; wmb(); while (curr != limit) { curr = NEXT_RX(curr); if (++count == 4) { struct gem_rxd *rxd = &gp->init_block->rxd[cluster_start]; for (;;) { rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); rxd++; cluster_start = NEXT_RX(cluster_start); if (cluster_start == curr) break; } kick = curr; count = 0; } } if (kick >= 0) { mb(); writel(kick, gp->regs + RXDMA_KICK); } } #define ALIGNED_RX_SKB_ADDR(addr) \ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, gfp_t gfp_flags) { struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); if (likely(skb)) { unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); skb_reserve(skb, offset); skb->dev = dev; } return skb; } static int gem_rx(struct gem *gp, int work_to_do) { struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; __sum16 csum; if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); entry = gp->rx_new; drops = 0; done = readl(gp->regs + RXDMA_DONE); for (;;) { struct gem_rxd *rxd = &gp->init_block->rxd[entry]; struct sk_buff *skb; u64 status = le64_to_cpu(rxd->status_word); dma_addr_t dma_addr; int len; if ((status & RXDCTRL_OWN) != 0) break; if (work_done >= RX_RING_SIZE || work_done >= work_to_do) break; /* When writing back RX descriptor, GEM writes status * then buffer address, possibly in separate transactions. * If we don't wait for the chip to write both, we could * post a new buffer to this descriptor then have GEM spam * on the buffer address. We sync on the RX completion * register to prevent this from happening. */ if (entry == done) { done = readl(gp->regs + RXDMA_DONE); if (entry == done) break; } /* We can now account for the work we're about to do */ work_done++; skb = gp->rx_skbs[entry]; len = (status & RXDCTRL_BUFSZ) >> 16; if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { dev->stats.rx_errors++; if (len < ETH_ZLEN) dev->stats.rx_length_errors++; if (len & RXDCTRL_BAD) dev->stats.rx_crc_errors++; /* We'll just return it to GEM. */ drop_it: dev->stats.rx_dropped++; goto next; } dma_addr = le64_to_cpu(rxd->buffer); if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } pci_unmap_page(gp->pdev, dma_addr, RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE); gp->rx_skbs[entry] = new_skb; skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, virt_to_page(new_skb->data), offset_in_page(new_skb->data), RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); /* We'll reuse the original ring buffer. */ skb = copy_skb; } csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); skb->csum = csum_unfold(csum); skb->ip_summed = CHECKSUM_COMPLETE; skb->protocol = eth_type_trans(skb, gp->dev); napi_gro_receive(&gp->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; next: entry = NEXT_RX(entry); } gem_post_rxds(gp, entry); gp->rx_new = entry; if (drops) netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); return work_done; } static int gem_poll(struct napi_struct *napi, int budget) { struct gem *gp = container_of(napi, struct gem, napi); struct net_device *dev = gp->dev; int work_done; work_done = 0; do { /* Handle anomalies */ if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); int reset; /* We run the abnormal interrupt handling code with * the Tx lock. It only resets the Rx portion of the * chip, but we need to guard it against DMA being * restarted by the link poll timer */ __netif_tx_lock(txq, smp_processor_id()); reset = gem_abnormal_irq(dev, gp, gp->status); __netif_tx_unlock(txq); if (reset) { gem_schedule_reset(gp); napi_complete(napi); return work_done; } } /* Run TX completion thread */ gem_tx(dev, gp, gp->status); /* Run RX thread. We don't use any locking here, * code willing to do bad things - like cleaning the * rx ring - must call napi_disable(), which * schedule_timeout()'s if polling is already disabled. */ work_done += gem_rx(gp, budget - work_done); if (work_done >= budget) return work_done; gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); napi_complete(napi); gem_enable_ints(gp); return work_done; } static irqreturn_t gem_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct gem *gp = netdev_priv(dev); if (napi_schedule_prep(&gp->napi)) { u32 gem_status = readl(gp->regs + GREG_STAT); if (unlikely(gem_status == 0)) { napi_enable(&gp->napi); return IRQ_NONE; } if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", gp->dev->name, gem_status); gp->status = gem_status; gem_disable_ints(gp); __napi_schedule(&gp->napi); } /* If polling was disabled at the time we received that * interrupt, we may return IRQ_HANDLED here while we * should return IRQ_NONE. No big deal... */ return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void gem_poll_controller(struct net_device *dev) { struct gem *gp = netdev_priv(dev); disable_irq(gp->pdev->irq); gem_interrupt(gp->pdev->irq, dev); enable_irq(gp->pdev->irq); } #endif static void gem_tx_timeout(struct net_device *dev) { struct gem *gp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", readl(gp->regs + TXDMA_CFG), readl(gp->regs + MAC_TXSTAT), readl(gp->regs + MAC_TXCFG)); netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", readl(gp->regs + RXDMA_CFG), readl(gp->regs + MAC_RXSTAT), readl(gp->regs + MAC_RXCFG)); gem_schedule_reset(gp); } static __inline__ int gem_intme(int entry) { /* Algorithm: IRQ every 1/2 of descriptors. */ if (!(entry & ((TX_RING_SIZE>>1)-1))) return 1; return 0; } static netdev_tx_t gem_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gem *gp = netdev_priv(dev); int entry; u64 ctrl; ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u64 csum_start_off = skb_checksum_start_offset(skb); const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = (TXDCTRL_CENAB | (csum_start_off << 15) | (csum_stuff_off << 21)); } if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { /* This is a hard error, log it. */ if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } return NETDEV_TX_BUSY; } entry = gp->tx_new; gp->tx_skbs[entry] = skb; if (skb_shinfo(skb)->nr_frags == 0) { struct gem_txd *txd = &gp->init_block->txd[entry]; dma_addr_t mapping; u32 len; len = skb->len; mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), offset_in_page(skb->data), len, PCI_DMA_TODEVICE); ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; if (gem_intme(entry)) ctrl |= TXDCTRL_INTME; txd->buffer = cpu_to_le64(mapping); wmb(); txd->control_word = cpu_to_le64(ctrl); entry = NEXT_TX(entry); } else { struct gem_txd *txd; u32 first_len; u64 intme; dma_addr_t first_mapping; int frag, first_entry = entry; intme = 0; if (gem_intme(entry)) intme |= TXDCTRL_INTME; /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_len = skb_headlen(skb); first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), offset_in_page(skb->data), first_len, PCI_DMA_TODEVICE); entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; dma_addr_t mapping; u64 this_ctrl; len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, 0, len, DMA_TO_DEVICE); this_ctrl = ctrl; if (frag == skb_shinfo(skb)->nr_frags - 1) this_ctrl |= TXDCTRL_EOF; txd = &gp->init_block->txd[entry]; txd->buffer = cpu_to_le64(mapping); wmb(); txd->control_word = cpu_to_le64(this_ctrl | len); if (gem_intme(entry)) intme |= TXDCTRL_INTME; entry = NEXT_TX(entry); } txd = &gp->init_block->txd[first_entry]; txd->buffer = cpu_to_le64(first_mapping); wmb(); txd->control_word = cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); } gp->tx_new = entry; if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); /* netif_stop_queue() must be done before checking * checking tx index in TX_BUFFS_AVAIL() below, because * in gem_tx(), we update tx_old before checking for * netif_queue_stopped(). */ smp_mb(); if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); } if (netif_msg_tx_queued(gp)) printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", dev->name, entry, skb->len); mb(); writel(gp->tx_new, gp->regs + TXDMA_KICK); return NETDEV_TX_OK; } static void gem_pcs_reset(struct gem *gp) { int limit; u32 val; /* Reset PCS unit. */ val = readl(gp->regs + PCS_MIICTRL); val |= PCS_MIICTRL_RST; writel(val, gp->regs + PCS_MIICTRL); limit = 32; while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { udelay(100); if (limit-- <= 0) break; } if (limit < 0) netdev_warn(gp->dev, "PCS reset bit would not clear\n"); } static void gem_pcs_reinit_adv(struct gem *gp) { u32 val; /* Make sure PCS is disabled while changing advertisement * configuration. */ val = readl(gp->regs + PCS_CFG); val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); writel(val, gp->regs + PCS_CFG); /* Advertise all capabilities except asymmetric * pause. */ val = readl(gp->regs + PCS_MIIADV); val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | PCS_MIIADV_SP | PCS_MIIADV_AP); writel(val, gp->regs + PCS_MIIADV); /* Enable and restart auto-negotiation, disable wrapback/loopback, * and re-enable PCS. */ val = readl(gp->regs + PCS_MIICTRL); val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); val &= ~PCS_MIICTRL_WB; writel(val, gp->regs + PCS_MIICTRL); val = readl(gp->regs + PCS_CFG); val |= PCS_CFG_ENABLE; writel(val, gp->regs + PCS_CFG); /* Make sure serialink loopback is off. The meaning * of this bit is logically inverted based upon whether * you are in Serialink or SERDES mode. */ val = readl(gp->regs + PCS_SCTRL); if (gp->phy_type == phy_serialink) val &= ~PCS_SCTRL_LOOP; else val |= PCS_SCTRL_LOOP; writel(val, gp->regs + PCS_SCTRL); } #define STOP_TRIES 32 static void gem_reset(struct gem *gp) { int limit; u32 val; /* Make sure we won't get any more interrupts */ writel(0xffffffff, gp->regs + GREG_IMASK); /* Reset the chip */ writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, gp->regs + GREG_SWRST); limit = STOP_TRIES; do { udelay(20); val = readl(gp->regs + GREG_SWRST); if (limit-- <= 0) break; } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); if (limit < 0) netdev_err(gp->dev, "SW reset is ghetto\n"); if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) gem_pcs_reinit_adv(gp); } static void gem_start_dma(struct gem *gp) { u32 val; /* We are ready to rock, turn everything on. */ val = readl(gp->regs + TXDMA_CFG); writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); val = readl(gp->regs + RXDMA_CFG); writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); (void) readl(gp->regs + MAC_RXCFG); udelay(100); gem_enable_ints(gp); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); } /* DMA won't be actually stopped before about 4ms tho ... */ static void gem_stop_dma(struct gem *gp) { u32 val; /* We are done rocking, turn everything off. */ val = readl(gp->regs + TXDMA_CFG); writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); val = readl(gp->regs + RXDMA_CFG); writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); (void) readl(gp->regs + MAC_RXCFG); /* Need to wait a bit ... done by the caller */ } // XXX dbl check what that function should do when called on PCS PHY static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) { u32 advertise, features; int autoneg; int speed; int duplex; if (gp->phy_type != phy_mii_mdio0 && gp->phy_type != phy_mii_mdio1) goto non_mii; /* Setup advertise */ if (found_mii_phy(gp)) features = gp->phy_mii.def->features; else features = 0; advertise = features & ADVERTISE_MASK; if (gp->phy_mii.advertising != 0) advertise &= gp->phy_mii.advertising; autoneg = gp->want_autoneg; speed = gp->phy_mii.speed; duplex = gp->phy_mii.duplex; /* Setup link parameters */ if (!ep) goto start_aneg; if (ep->autoneg == AUTONEG_ENABLE) { advertise = ep->advertising; autoneg = 1; } else { autoneg = 0; speed = ethtool_cmd_speed(ep); duplex = ep->duplex; } start_aneg: /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) autoneg = 0; if (speed == SPEED_1000 && !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) speed = SPEED_100; if (speed == SPEED_100 && !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) speed = SPEED_10; if (duplex == DUPLEX_FULL && !(features & (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Full))) duplex = DUPLEX_HALF; if (speed == 0) speed = SPEED_10; /* If we are asleep, we don't try to actually setup the PHY, we * just store the settings */ if (!netif_device_present(gp->dev)) { gp->phy_mii.autoneg = gp->want_autoneg = autoneg; gp->phy_mii.speed = speed; gp->phy_mii.duplex = duplex; return; } /* Configure PHY & start aneg */ gp->want_autoneg = autoneg; if (autoneg) { if (found_mii_phy(gp)) gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); gp->lstate = link_aneg; } else { if (found_mii_phy(gp)) gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); gp->lstate = link_force_ok; } non_mii: gp->timer_ticks = 0; mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); } /* A link-up condition has occurred, initialize and enable the * rest of the chip. */ static int gem_set_link_modes(struct gem *gp) { struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); int full_duplex, speed, pause; u32 val; full_duplex = 0; speed = SPEED_10; pause = 0; if (found_mii_phy(gp)) { if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) return 1; full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); speed = gp->phy_mii.speed; pause = gp->phy_mii.pause; } else if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) full_duplex = 1; speed = SPEED_1000; } netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", speed, (full_duplex ? "full" : "half")); /* We take the tx queue lock to avoid collisions between * this code, the tx path and the NAPI-driven error path */ __netif_tx_lock(txq, smp_processor_id()); val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); if (full_duplex) { val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); } else { /* MAC_TXCFG_NBO must be zero. */ } writel(val, gp->regs + MAC_TXCFG); val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); if (!full_duplex && (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1)) { val |= MAC_XIFCFG_DISE; } else if (full_duplex) { val |= MAC_XIFCFG_FLED; } if (speed == SPEED_1000) val |= (MAC_XIFCFG_GMII); writel(val, gp->regs + MAC_XIFCFG); /* If gigabit and half-duplex, enable carrier extension * mode. Else, disable it. */ if (speed == SPEED_1000 && !full_duplex) { val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } else { val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) pause = 1; } if (!full_duplex) writel(512, gp->regs + MAC_STIME); else writel(64, gp->regs + MAC_STIME); val = readl(gp->regs + MAC_MCCFG); if (pause) val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); else val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); writel(val, gp->regs + MAC_MCCFG); gem_start_dma(gp); __netif_tx_unlock(txq); if (netif_msg_link(gp)) { if (pause) { netdev_info(gp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", gp->rx_fifo_sz, gp->rx_pause_off, gp->rx_pause_on); } else { netdev_info(gp->dev, "Pause is disabled\n"); } } return 0; } static int gem_mdio_link_not_up(struct gem *gp) { switch (gp->lstate) { case link_force_ret: netif_info(gp, link, gp->dev, "Autoneg failed again, keeping forced mode\n"); gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, gp->last_forced_speed, DUPLEX_HALF); gp->timer_ticks = 5; gp->lstate = link_force_ok; return 0; case link_aneg: /* We try forced modes after a failed aneg only on PHYs that don't * have "magic_aneg" bit set, which means they internally do the * while forced-mode thingy. On these, we just restart aneg */ if (gp->phy_mii.def->magic_aneg) return 1; netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); /* Try forced modes. */ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, DUPLEX_HALF); gp->timer_ticks = 5; gp->lstate = link_force_try; return 0; case link_force_try: /* Downgrade from 100 to 10 Mbps if necessary. * If already at 10Mbps, warn user about the * situation every 10 ticks. */ if (gp->phy_mii.speed == SPEED_100) { gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, DUPLEX_HALF); gp->timer_ticks = 5; netif_info(gp, link, gp->dev, "switching to forced 10bt\n"); return 0; } else return 1; default: return 0; } } static void gem_link_timer(unsigned long data) { struct gem *gp = (struct gem *) data; struct net_device *dev = gp->dev; int restart_aneg = 0; /* There's no point doing anything if we're going to be reset */ if (gp->reset_task_pending) return; if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 val = readl(gp->regs + PCS_MIISTAT); if (!(val & PCS_MIISTAT_LS)) val = readl(gp->regs + PCS_MIISTAT); if ((val & PCS_MIISTAT_LS) != 0) { if (gp->lstate == link_up) goto restart; gp->lstate = link_up; netif_carrier_on(dev); (void)gem_set_link_modes(gp); } goto restart; } if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { /* Ok, here we got a link. If we had it due to a forced * fallback, and we were configured for autoneg, we do * retry a short autoneg pass. If you know your hub is * broken, use ethtool ;) */ if (gp->lstate == link_force_try && gp->want_autoneg) { gp->lstate = link_force_ret; gp->last_forced_speed = gp->phy_mii.speed; gp->timer_ticks = 5; if (netif_msg_link(gp)) netdev_info(dev, "Got link after fallback, retrying autoneg once...\n"); gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); } else if (gp->lstate != link_up) { gp->lstate = link_up; netif_carrier_on(dev); if (gem_set_link_modes(gp)) restart_aneg = 1; } } else { /* If the link was previously up, we restart the * whole process */ if (gp->lstate == link_up) { gp->lstate = link_down; netif_info(gp, link, dev, "Link down\n"); netif_carrier_off(dev); gem_schedule_reset(gp); /* The reset task will restart the timer */ return; } else if (++gp->timer_ticks > 10) { if (found_mii_phy(gp)) restart_aneg = gem_mdio_link_not_up(gp); else restart_aneg = 1; } } if (restart_aneg) { gem_begin_auto_negotiation(gp, NULL); return; } restart: mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); } static void gem_clean_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; struct sk_buff *skb; int i; dma_addr_t dma_addr; for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd; rxd = &gb->rxd[i]; if (gp->rx_skbs[i] != NULL) { skb = gp->rx_skbs[i]; dma_addr = le64_to_cpu(rxd->buffer); pci_unmap_page(gp->pdev, dma_addr, RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); gp->rx_skbs[i] = NULL; } rxd->status_word = 0; wmb(); rxd->buffer = 0; } for (i = 0; i < TX_RING_SIZE; i++) { if (gp->tx_skbs[i] != NULL) { struct gem_txd *txd; int frag; skb = gp->tx_skbs[i]; gp->tx_skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { int ent = i & (TX_RING_SIZE - 1); txd = &gb->txd[ent]; dma_addr = le64_to_cpu(txd->buffer); pci_unmap_page(gp->pdev, dma_addr, le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; } dev_kfree_skb_any(skb); } } } static void gem_init_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; struct net_device *dev = gp->dev; int i; dma_addr_t dma_addr; gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; gem_clean_rings(gp); gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, (unsigned)VLAN_ETH_FRAME_LEN); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; struct gem_rxd *rxd = &gb->rxd[i]; skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); if (!skb) { rxd->buffer = 0; rxd->status_word = 0; continue; } gp->rx_skbs[i] = skb; skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); dma_addr = pci_map_page(gp->pdev, virt_to_page(skb->data), offset_in_page(skb->data), RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE); rxd->buffer = cpu_to_le64(dma_addr); wmb(); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); skb_reserve(skb, RX_OFFSET); } for (i = 0; i < TX_RING_SIZE; i++) { struct gem_txd *txd = &gb->txd[i]; txd->control_word = 0; wmb(); txd->buffer = 0; } wmb(); } /* Init PHY interface and start link poll state machine */ static void gem_init_phy(struct gem *gp) { u32 mifcfg; /* Revert MIF CFG setting done on stop_phy */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_BBMODE; writel(mifcfg, gp->regs + MIF_CFG); if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; /* Those delay sucks, the HW seem to love them though, I'll * serisouly consider breaking some locks here to be able * to schedule instead */ for (i = 0; i < 3; i++) { #ifdef CONFIG_PPC_PMAC pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); msleep(20); #endif /* Some PHYs used by apple have problem getting back to us, * we do an additional reset here */ phy_write(gp, MII_BMCR, BMCR_RESET); msleep(20); if (phy_read(gp, MII_BMCR) != 0xffff) break; if (i == 2) netdev_warn(gp->dev, "GMAC PHY not responding !\n"); } } if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { u32 val; /* Init datapath mode register. */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { val = PCS_DMODE_MGM; } else if (gp->phy_type == phy_serialink) { val = PCS_DMODE_SM | PCS_DMODE_GMOE; } else { val = PCS_DMODE_ESM; } writel(val, gp->regs + PCS_DMODE); } if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { /* Reset and detect MII PHY */ sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr); /* Init PHY */ if (gp->phy_mii.def && gp->phy_mii.def->ops->init) gp->phy_mii.def->ops->init(&gp->phy_mii); } else { gem_pcs_reset(gp); gem_pcs_reinit_adv(gp); } /* Default aneg parameters */ gp->timer_ticks = 0; gp->lstate = link_down; netif_carrier_off(gp->dev); /* Print things out */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) netdev_info(gp->dev, "Found %s PHY\n", gp->phy_mii.def ? gp->phy_mii.def->name : "no"); gem_begin_auto_negotiation(gp, NULL); } static void gem_init_dma(struct gem *gp) { u64 desc_dma = (u64) gp->gblock_dvma; u32 val; val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); writel(val, gp->regs + TXDMA_CFG); writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); writel(0, gp->regs + TXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); writel(val, gp->regs + RXDMA_PTHRESH); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | ((8 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); else writel(((5 & RXDMA_BLANK_IPKTS) | ((4 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); } static u32 gem_setup_multicast(struct gem *gp) { u32 rxcfg = 0; int i; if ((gp->dev->flags & IFF_ALLMULTI) || (netdev_mc_count(gp->dev) > 256)) { for (i=0; i<16; i++) writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); rxcfg |= MAC_RXCFG_HFE; } else if (gp->dev->flags & IFF_PROMISC) { rxcfg |= MAC_RXCFG_PROM; } else { u16 hash_table[16]; u32 crc; struct netdev_hw_addr *ha; int i; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, gp->dev) { crc = ether_crc_le(6, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } for (i=0; i<16; i++) writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); rxcfg |= MAC_RXCFG_HFE; } return rxcfg; } static void gem_init_mac(struct gem *gp) { unsigned char *e = &gp->dev->dev_addr[0]; writel(0x1bf0, gp->regs + MAC_SNDPAUSE); writel(0x00, gp->regs + MAC_IPG0); writel(0x08, gp->regs + MAC_IPG1); writel(0x04, gp->regs + MAC_IPG2); writel(0x40, gp->regs + MAC_STIME); writel(0x40, gp->regs + MAC_MINFSZ); /* Ethernet payload + header + FCS + optional VLAN tag. */ writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); writel(0x07, gp->regs + MAC_PASIZE); writel(0x04, gp->regs + MAC_JAMSIZE); writel(0x10, gp->regs + MAC_ATTLIM); writel(0x8808, gp->regs + MAC_MCTYPE); writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); writel(0, gp->regs + MAC_ADDR3); writel(0, gp->regs + MAC_ADDR4); writel(0, gp->regs + MAC_ADDR5); writel(0x0001, gp->regs + MAC_ADDR6); writel(0xc200, gp->regs + MAC_ADDR7); writel(0x0180, gp->regs + MAC_ADDR8); writel(0, gp->regs + MAC_AFILT0); writel(0, gp->regs + MAC_AFILT1); writel(0, gp->regs + MAC_AFILT2); writel(0, gp->regs + MAC_AF21MSK); writel(0, gp->regs + MAC_AF0MSK); gp->mac_rx_cfg = gem_setup_multicast(gp); #ifdef STRIP_FCS gp->mac_rx_cfg |= MAC_RXCFG_SFCS; #endif writel(0, gp->regs + MAC_NCOLL); writel(0, gp->regs + MAC_FASUCC); writel(0, gp->regs + MAC_ECOLL); writel(0, gp->regs + MAC_LCOLL); writel(0, gp->regs + MAC_DTIMER); writel(0, gp->regs + MAC_PATMPS); writel(0, gp->regs + MAC_RFCTR); writel(0, gp->regs + MAC_LERR); writel(0, gp->regs + MAC_AERR); writel(0, gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_RXCVERR); /* Clear RX/TX/MAC/XIF config, we will set these up and enable * them once a link is established. */ writel(0, gp->regs + MAC_TXCFG); writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); writel(0, gp->regs + MAC_MCCFG); writel(0, gp->regs + MAC_XIFCFG); /* Setup MAC interrupts. We want to get all of the interesting * counter expiration events, but we do not want to hear about * normal rx/tx as the DMA engine tells us that. */ writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); /* Don't enable even the PAUSE interrupts for now, we * make no use of those events other than to record them. */ writel(0xffffffff, gp->regs + MAC_MCMASK); /* Don't enable GEM's WOL in normal operations */ if (gp->has_wol) writel(0, gp->regs + WOL_WAKECSR); } static void gem_init_pause_thresholds(struct gem *gp) { u32 cfg; /* Calculate pause thresholds. Setting the OFF threshold to the * full RX fifo size effectively disables PAUSE generation which * is what we do for 10/100 only GEMs which have FIFOs too small * to make real gains from PAUSE. */ if (gp->rx_fifo_sz <= (2 * 1024)) { gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; } else { int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; int off = (gp->rx_fifo_sz - (max_frame * 2)); int on = off - max_frame; gp->rx_pause_off = off; gp->rx_pause_on = on; } /* Configure the chip "burst" DMA mode & enable some * HW bug fixes on Apple version */ cfg = 0; if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) cfg |= GREG_CFG_IBURST; #endif cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); writel(cfg, gp->regs + GREG_CFG); /* If Infinite Burst didn't stick, then use different * thresholds (and Apple bug fixes don't exist) */ if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { cfg = ((2 << 1) & GREG_CFG_TXDMALIM); cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); writel(cfg, gp->regs + GREG_CFG); } } static int gem_check_invariants(struct gem *gp) { struct pci_dev *pdev = gp->pdev; u32 mif_cfg; /* On Apple's sungem, we can't rely on registers as the chip * was been powered down by the firmware. The PHY is looked * up later on. */ if (pdev->vendor == PCI_VENDOR_ID_APPLE) { gp->phy_type = phy_mii_mdio0; gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; gp->swrst_base = 0; mif_cfg = readl(gp->regs + MIF_CFG); mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); mif_cfg |= MIF_CFG_MDI0; writel(mif_cfg, gp->regs + MIF_CFG); writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); /* We hard-code the PHY address so we can properly bring it out of * reset later on, we can't really probe it at this point, though * that isn't an issue. */ if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) gp->mii_phy_addr = 1; else gp->mii_phy_addr = 0; return 0; } mif_cfg = readl(gp->regs + MIF_CFG); if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { /* One of the MII PHYs _must_ be present * as this chip has no gigabit PHY. */ if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", mif_cfg); return -1; } } /* Determine initial PHY interface type guess. MDIO1 is the * external PHY and thus takes precedence over MDIO0. */ if (mif_cfg & MIF_CFG_MDI1) { gp->phy_type = phy_mii_mdio1; mif_cfg |= MIF_CFG_PSELECT; writel(mif_cfg, gp->regs + MIF_CFG); } else if (mif_cfg & MIF_CFG_MDI0) { gp->phy_type = phy_mii_mdio0; mif_cfg &= ~MIF_CFG_PSELECT; writel(mif_cfg, gp->regs + MIF_CFG); } else { #ifdef CONFIG_SPARC const char *p; p = of_get_property(gp->of_node, "shared-pins", NULL); if (p && !strcmp(p, "serdes")) gp->phy_type = phy_serdes; else #endif gp->phy_type = phy_serialink; } if (gp->phy_type == phy_mii_mdio1 || gp->phy_type == phy_mii_mdio0) { int i; for (i = 0; i < 32; i++) { gp->mii_phy_addr = i; if (phy_read(gp, MII_BMCR) != 0xffff) break; } if (i == 32) { if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { pr_err("RIO MII phy will not respond\n"); return -1; } gp->phy_type = phy_serdes; } } /* Fetch the FIFO configurations now too. */ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; if (pdev->vendor == PCI_VENDOR_ID_SUN) { if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { if (gp->tx_fifo_sz != (9 * 1024) || gp->rx_fifo_sz != (20 * 1024)) { pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", gp->tx_fifo_sz, gp->rx_fifo_sz); return -1; } gp->swrst_base = 0; } else { if (gp->tx_fifo_sz != (2 * 1024) || gp->rx_fifo_sz != (2 * 1024)) { pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", gp->tx_fifo_sz, gp->rx_fifo_sz); return -1; } gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; } } return 0; } static void gem_reinit_chip(struct gem *gp) { /* Reset the chip */ gem_reset(gp); /* Make sure ints are disabled */ gem_disable_ints(gp); /* Allocate & setup ring buffers */ gem_init_rings(gp); /* Configure pause thresholds */ gem_init_pause_thresholds(gp); /* Init DMA & MAC engines */ gem_init_dma(gp); gem_init_mac(gp); } static void gem_stop_phy(struct gem *gp, int wol) { u32 mifcfg; /* Let the chip settle down a bit, it seems that helps * for sleep mode on some models */ msleep(10); /* Make sure we aren't polling PHY status change. We * don't currently use that feature though */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_POLL; writel(mifcfg, gp->regs + MIF_CFG); if (wol && gp->has_wol) { unsigned char *e = &gp->dev->dev_addr[0]; u32 csr; /* Setup wake-on-lan for MAGIC packet */ writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); csr = WOL_WAKECSR_ENABLE; if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) csr |= WOL_WAKECSR_MII; writel(csr, gp->regs + WOL_WAKECSR); } else { writel(0, gp->regs + MAC_RXCFG); (void)readl(gp->regs + MAC_RXCFG); /* Machine sleep will die in strange ways if we * dont wait a bit here, looks like the chip takes * some time to really shut down */ msleep(10); } writel(0, gp->regs + MAC_TXCFG); writel(0, gp->regs + MAC_XIFCFG); writel(0, gp->regs + TXDMA_CFG); writel(0, gp->regs + RXDMA_CFG); if (!wol) { gem_reset(gp); writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) gp->phy_mii.def->ops->suspend(&gp->phy_mii); /* According to Apple, we must set the MDIO pins to this begnign * state or we may 1) eat more current, 2) damage some PHYs */ writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); writel(0, gp->regs + MIF_BBCLK); writel(0, gp->regs + MIF_BBDATA); writel(0, gp->regs + MIF_BBOENAB); writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); (void) readl(gp->regs + MAC_XIFCFG); } } static int gem_do_start(struct net_device *dev) { struct gem *gp = netdev_priv(dev); int rc; /* Enable the cell */ gem_get_cell(gp); /* Make sure PCI access and bus master are enabled */ rc = pci_enable_device(gp->pdev); if (rc) { netdev_err(dev, "Failed to enable chip on PCI bus !\n"); /* Put cell and forget it for now, it will be considered as * still asleep, a new sleep cycle may bring it back */ gem_put_cell(gp); return -ENXIO; } pci_set_master(gp->pdev); /* Init & setup chip hardware */ gem_reinit_chip(gp); /* An interrupt might come in handy */ rc = request_irq(gp->pdev->irq, gem_interrupt, IRQF_SHARED, dev->name, (void *)dev); if (rc) { netdev_err(dev, "failed to request irq !\n"); gem_reset(gp); gem_clean_rings(gp); gem_put_cell(gp); return rc; } /* Mark us as attached again if we come from resume(), this has * no effect if we weren't detatched and needs to be done now. */ netif_device_attach(dev); /* Restart NAPI & queues */ gem_netif_start(gp); /* Detect & init PHY, start autoneg etc... this will * eventually result in starting DMA operations when * the link is up */ gem_init_phy(gp); return 0; } static void gem_do_stop(struct net_device *dev, int wol) { struct gem *gp = netdev_priv(dev); /* Stop NAPI and stop tx queue */ gem_netif_stop(gp); /* Make sure ints are disabled. We don't care about * synchronizing as NAPI is disabled, thus a stray * interrupt will do nothing bad (our irq handler * just schedules NAPI) */ gem_disable_ints(gp); /* Stop the link timer */ del_timer_sync(&gp->link_timer); /* We cannot cancel the reset task while holding the * rtnl lock, we'd get an A->B / B->A deadlock stituation * if we did. This is not an issue however as the reset * task is synchronized vs. us (rtnl_lock) and will do * nothing if the device is down or suspended. We do * still clear reset_task_pending to avoid a spurrious * reset later on in case we do resume before it gets * scheduled. */ gp->reset_task_pending = 0; /* If we are going to sleep with WOL */ gem_stop_dma(gp); msleep(10); if (!wol) gem_reset(gp); msleep(10); /* Get rid of rings */ gem_clean_rings(gp); /* No irq needed anymore */ free_irq(gp->pdev->irq, (void *) dev); /* Shut the PHY down eventually and setup WOL */ gem_stop_phy(gp, wol); /* Make sure bus master is disabled */ pci_disable_device(gp->pdev); /* Cell not needed neither if no WOL */ if (!wol) gem_put_cell(gp); } static void gem_reset_task(struct work_struct *work) { struct gem *gp = container_of(work, struct gem, reset_task); /* Lock out the network stack (essentially shield ourselves * against a racing open, close, control call, or suspend */ rtnl_lock(); /* Skip the reset task if suspended or closed, or if it's * been cancelled by gem_do_stop (see comment there) */ if (!netif_device_present(gp->dev) || !netif_running(gp->dev) || !gp->reset_task_pending) { rtnl_unlock(); return; } /* Stop the link timer */ del_timer_sync(&gp->link_timer); /* Stop NAPI and tx */ gem_netif_stop(gp); /* Reset the chip & rings */ gem_reinit_chip(gp); if (gp->lstate == link_up) gem_set_link_modes(gp); /* Restart NAPI and Tx */ gem_netif_start(gp); /* We are back ! */ gp->reset_task_pending = 0; /* If the link is not up, restart autoneg, else restart the * polling timer */ if (gp->lstate != link_up) gem_begin_auto_negotiation(gp, NULL); else mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); rtnl_unlock(); } static int gem_open(struct net_device *dev) { /* We allow open while suspended, we just do nothing, * the chip will be initialized in resume() */ if (netif_device_present(dev)) return gem_do_start(dev); return 0; } static int gem_close(struct net_device *dev) { if (netif_device_present(dev)) gem_do_stop(dev, 0); return 0; } #ifdef CONFIG_PM static int gem_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct gem *gp = netdev_priv(dev); /* Lock the network stack first to avoid racing with open/close, * reset task and setting calls */ rtnl_lock(); /* Not running, mark ourselves non-present, no need for * a lock here */ if (!netif_running(dev)) { netif_device_detach(dev); rtnl_unlock(); return 0; } netdev_info(dev, "suspending, WakeOnLan %s\n", (gp->wake_on_lan && netif_running(dev)) ? "enabled" : "disabled"); /* Tell the network stack we're gone. gem_do_stop() below will * synchronize with TX, stop NAPI etc... */ netif_device_detach(dev); /* Switch off chip, remember WOL setting */ gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); /* Unlock the network stack */ rtnl_unlock(); return 0; } static int gem_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct gem *gp = netdev_priv(dev); /* See locking comment in gem_suspend */ rtnl_lock(); /* Not running, mark ourselves present, no need for * a lock here */ if (!netif_running(dev)) { netif_device_attach(dev); rtnl_unlock(); return 0; } /* Restart chip. If that fails there isn't much we can do, we * leave things stopped. */ gem_do_start(dev); /* If we had WOL enabled, the cell clock was never turned off during * sleep, so we end up beeing unbalanced. Fix that here */ if (gp->asleep_wol) gem_put_cell(gp); /* Unlock the network stack */ rtnl_unlock(); return 0; } #endif /* CONFIG_PM */ static struct net_device_stats *gem_get_stats(struct net_device *dev) { struct gem *gp = netdev_priv(dev); /* I have seen this being called while the PM was in progress, * so we shield against this. Let's also not poke at registers * while the reset task is going on. * * TODO: Move stats collection elsewhere (link timer ?) and * make this a nop to avoid all those synchro issues */ if (!netif_device_present(dev) || !netif_running(dev)) goto bail; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) goto bail; dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_FCSERR); dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); writel(0, gp->regs + MAC_AERR); dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); writel(0, gp->regs + MAC_LERR); dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); dev->stats.collisions += (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); writel(0, gp->regs + MAC_ECOLL); writel(0, gp->regs + MAC_LCOLL); bail: return &dev->stats; } static int gem_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *macaddr = (struct sockaddr *) addr; struct gem *gp = netdev_priv(dev); unsigned char *e = &dev->dev_addr[0]; if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) return 0; writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); return 0; } static void gem_set_multicast(struct net_device *dev) { struct gem *gp = netdev_priv(dev); u32 rxcfg, rxcfg_new; int limit = 10000; if (!netif_running(dev) || !netif_device_present(dev)) return; /* Better safe than sorry... */ if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) return; rxcfg = readl(gp->regs + MAC_RXCFG); rxcfg_new = gem_setup_multicast(gp); #ifdef STRIP_FCS rxcfg_new |= MAC_RXCFG_SFCS; #endif gp->mac_rx_cfg = rxcfg_new; writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { if (!limit--) break; udelay(10); } rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); rxcfg |= rxcfg_new; writel(rxcfg, gp->regs + MAC_RXCFG); } /* Jumbo-grams don't seem to work :-( */ #define GEM_MIN_MTU 68 #if 1 #define GEM_MAX_MTU 1500 #else #define GEM_MAX_MTU 9000 #endif static int gem_change_mtu(struct net_device *dev, int new_mtu) { struct gem *gp = netdev_priv(dev); if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) return -EINVAL; dev->mtu = new_mtu; /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) return 0; gem_netif_stop(gp); gem_reinit_chip(gp); if (gp->lstate == link_up) gem_set_link_modes(gp); gem_netif_start(gp); return 0; } static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct gem *gp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); } static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct gem *gp = netdev_priv(dev); if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { if (gp->phy_mii.def) cmd->supported = gp->phy_mii.def->features; else cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); /* XXX hardcoded stuff for now */ cmd->port = PORT_MII; cmd->transceiver = XCVR_EXTERNAL; cmd->phy_address = 0; /* XXX fixed PHYAD */ /* Return current PHY settings */ cmd->autoneg = gp->want_autoneg; ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); cmd->duplex = gp->phy_mii.duplex; cmd->advertising = gp->phy_mii.advertising; /* If we started with a forced mode, we don't have a default * advertise set, we need to return something sensible so * userland can re-enable autoneg properly. */ if (cmd->advertising == 0) cmd->advertising = cmd->supported; } else { // XXX PCS ? cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg); cmd->advertising = cmd->supported; ethtool_cmd_speed_set(cmd, 0); cmd->duplex = cmd->port = cmd->phy_address = cmd->transceiver = cmd->autoneg = 0; /* serdes means usually a Fibre connector, with most fixed */ if (gp->phy_type == phy_serdes) { cmd->port = PORT_FIBRE; cmd->supported = (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause); cmd->advertising = cmd->supported; cmd->transceiver = XCVR_INTERNAL; if (gp->lstate == link_up) ethtool_cmd_speed_set(cmd, SPEED_1000); cmd->duplex = DUPLEX_FULL; cmd->autoneg = 1; } } cmd->maxtxpkt = cmd->maxrxpkt = 0; return 0; } static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct gem *gp = netdev_priv(dev); u32 speed = ethtool_cmd_speed(cmd); /* Verify the settings we care about. */ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ if (netif_device_present(gp->dev)) { del_timer_sync(&gp->link_timer); gem_begin_auto_negotiation(gp, cmd); } return 0; } static int gem_nway_reset(struct net_device *dev) { struct gem *gp = netdev_priv(dev); if (!gp->want_autoneg) return -EINVAL; /* Restart link process */ if (netif_device_present(gp->dev)) { del_timer_sync(&gp->link_timer); gem_begin_auto_negotiation(gp, NULL); } return 0; } static u32 gem_get_msglevel(struct net_device *dev) { struct gem *gp = netdev_priv(dev); return gp->msg_enable; } static void gem_set_msglevel(struct net_device *dev, u32 value) { struct gem *gp = netdev_priv(dev); gp->msg_enable = value; } /* Add more when I understand how to program the chip */ /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ #define WOL_SUPPORTED_MASK (WAKE_MAGIC) static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gem *gp = netdev_priv(dev); /* Add more when I understand how to program the chip */ if (gp->has_wol) { wol->supported = WOL_SUPPORTED_MASK; wol->wolopts = gp->wake_on_lan; } else { wol->supported = 0; wol->wolopts = 0; } } static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gem *gp = netdev_priv(dev); if (!gp->has_wol) return -EOPNOTSUPP; gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; return 0; } static const struct ethtool_ops gem_ethtool_ops = { .get_drvinfo = gem_get_drvinfo, .get_link = ethtool_op_get_link, .get_settings = gem_get_settings, .set_settings = gem_set_settings, .nway_reset = gem_nway_reset, .get_msglevel = gem_get_msglevel, .set_msglevel = gem_set_msglevel, .get_wol = gem_get_wol, .set_wol = gem_set_wol, }; static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct gem *gp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); int rc = -EOPNOTSUPP; /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that * netif_device_present() is true and holds rtnl_lock for us * so we have nothing to worry about */ switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; /* Fallthrough... */ case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __phy_read(gp, data->phy_id & 0x1f, data->reg_num & 0x1f); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); rc = 0; break; } return rc; } #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) /* Fetch MAC address from vital product data of PCI ROM. */ static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) { int this_offset; for (this_offset = 0x20; this_offset < len; this_offset++) { void __iomem *p = rom_base + this_offset; int i; if (readb(p + 0) != 0x90 || readb(p + 1) != 0x00 || readb(p + 2) != 0x09 || readb(p + 3) != 0x4e || readb(p + 4) != 0x41 || readb(p + 5) != 0x06) continue; this_offset += 6; p += 6; for (i = 0; i < 6; i++) dev_addr[i] = readb(p + i); return 1; } return 0; } static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) { size_t size; void __iomem *p = pci_map_rom(pdev, &size); if (p) { int found; found = readb(p) == 0x55 && readb(p + 1) == 0xaa && find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); pci_unmap_rom(pdev, p); if (found) return; } /* Sun MAC prefix then 3 random bytes. */ dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(dev_addr + 3, 3); } #endif /* not Sparc and not PPC */ static int __devinit gem_get_device_address(struct gem *gp) { #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) struct net_device *dev = gp->dev; const unsigned char *addr; addr = of_get_property(gp->of_node, "local-mac-address", NULL); if (addr == NULL) { #ifdef CONFIG_SPARC addr = idprom->id_ethaddr; #else printk("\n"); pr_err("%s: can't get mac-address\n", dev->name); return -1; #endif } memcpy(dev->dev_addr, addr, 6); #else get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); #endif return 0; } static void gem_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct gem *gp = netdev_priv(dev); unregister_netdev(dev); /* Ensure reset task is truely gone */ cancel_work_sync(&gp->reset_task); /* Free resources */ pci_free_consistent(pdev, sizeof(struct gem_init_block), gp->init_block, gp->gblock_dvma); iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev); pci_set_drvdata(pdev, NULL); } } static const struct net_device_ops gem_netdev_ops = { .ndo_open = gem_open, .ndo_stop = gem_close, .ndo_start_xmit = gem_start_xmit, .ndo_get_stats = gem_get_stats, .ndo_set_rx_mode = gem_set_multicast, .ndo_do_ioctl = gem_ioctl, .ndo_tx_timeout = gem_tx_timeout, .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = gem_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gem_poll_controller, #endif }; static int __devinit gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long gemreg_base, gemreg_len; struct net_device *dev; struct gem *gp; int err, pci_using_dac; printk_once(KERN_INFO "%s", version); /* Apple gmac note: during probe, the chip is powered up by * the arch code to allow the code below to work (and to let * the chip be probed on the config space. It won't stay powered * up until the interface is brought up however, so we can't rely * on register configuration done at this point. */ err = pci_enable_device(pdev); if (err) { pr_err("Cannot enable MMIO operation, aborting\n"); return err; } pci_set_master(pdev); /* Configure DMA attributes. */ /* All of the GEM documentation states that 64-bit DMA addressing * is fully supported and should work just fine. However the * front end for RIO based GEMs is different and only supports * 32-bit addressing. * * For now we assume the various PPC GEMs are 32-bit only as well. */ if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_GEM && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { pr_err("No usable DMA configuration, aborting\n"); goto err_disable_device; } pci_using_dac = 0; } gemreg_base = pci_resource_start(pdev, 0); gemreg_len = pci_resource_len(pdev, 0); if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { pr_err("Cannot find proper PCI device base address, aborting\n"); err = -ENODEV; goto err_disable_device; } dev = alloc_etherdev(sizeof(*gp)); if (!dev) { err = -ENOMEM; goto err_disable_device; } SET_NETDEV_DEV(dev, &pdev->dev); gp = netdev_priv(dev); err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("Cannot obtain PCI resources, aborting\n"); goto err_out_free_netdev; } gp->pdev = pdev; dev->base_addr = (long) pdev; gp->dev = dev; gp->msg_enable = DEFAULT_MSG; init_timer(&gp->link_timer); gp->link_timer.function = gem_link_timer; gp->link_timer.data = (unsigned long) gp; INIT_WORK(&gp->reset_task, gem_reset_task); gp->lstate = link_down; gp->timer_ticks = 0; netif_carrier_off(dev); gp->regs = ioremap(gemreg_base, gemreg_len); if (!gp->regs) { pr_err("Cannot map device registers, aborting\n"); err = -EIO; goto err_out_free_res; } /* On Apple, we want a reference to the Open Firmware device-tree * node. We use it for clock control. */ #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) gp->of_node = pci_device_to_OF_node(pdev); #endif /* Only Apple version supports WOL afaik */ if (pdev->vendor == PCI_VENDOR_ID_APPLE) gp->has_wol = 1; /* Make sure cell is enabled */ gem_get_cell(gp); /* Make sure everything is stopped and in init state */ gem_reset(gp); /* Fill up the mii_phy structure (even if we won't use it) */ gp->phy_mii.dev = dev; gp->phy_mii.mdio_read = _phy_read; gp->phy_mii.mdio_write = _phy_write; #ifdef CONFIG_PPC_PMAC gp->phy_mii.platform_data = gp->of_node; #endif /* By default, we start with autoneg */ gp->want_autoneg = 1; /* Check fifo sizes, PHY type, etc... */ if (gem_check_invariants(gp)) { err = -ENODEV; goto err_out_iounmap; } /* It is guaranteed that the returned buffer will be at least * PAGE_SIZE aligned. */ gp->init_block = (struct gem_init_block *) pci_alloc_consistent(pdev, sizeof(struct gem_init_block), &gp->gblock_dvma); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } if (gem_get_device_address(gp)) goto err_out_free_consistent; dev->netdev_ops = &gem_netdev_ops; netif_napi_add(dev, &gp->napi, gem_poll, 64); dev->ethtool_ops = &gem_ethtool_ops; dev->watchdog_timeo = 5 * HZ; dev->irq = pdev->irq; dev->dma = 0; /* Set that now, in case PM kicks in now */ pci_set_drvdata(pdev, dev); /* We can do scatter/gather and HW checksum */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= dev->hw_features | NETIF_F_RXCSUM; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* Register with kernel */ if (register_netdev(dev)) { pr_err("Cannot register net device, aborting\n"); err = -ENOMEM; goto err_out_free_consistent; } /* Undo the get_cell with appropriate locking (we could use * ndo_init/uninit but that would be even more clumsy imho) */ rtnl_lock(); gem_put_cell(gp); rtnl_unlock(); netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", dev->dev_addr); return 0; err_out_free_consistent: gem_remove_one(pdev); err_out_iounmap: gem_put_cell(gp); iounmap(gp->regs); err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev(dev); err_disable_device: pci_disable_device(pdev); return err; } static struct pci_driver gem_driver = { .name = GEM_MODULE_NAME, .id_table = gem_pci_tbl, .probe = gem_init_one, .remove = gem_remove_one, #ifdef CONFIG_PM .suspend = gem_suspend, .resume = gem_resume, #endif /* CONFIG_PM */ }; static int __init gem_init(void) { return pci_register_driver(&gem_driver); } static void __exit gem_cleanup(void) { pci_unregister_driver(&gem_driver); } module_init(gem_init); module_exit(gem_cleanup);
gpl-2.0
UberSlim/KernelSanders_L90
tools/perf/builtin-report.c
4770
21708
/* * builtin-report.c * * Builtin report command: Analyze the perf.data input file, * look up and read DSOs and symbol information and display * a histogram of results, along various sorting keys. */ #include "builtin.h" #include "util/util.h" #include "util/annotate.h" #include "util/color.h" #include <linux/list.h> #include "util/cache.h" #include <linux/rbtree.h> #include "util/symbol.h" #include "util/callchain.h" #include "util/strlist.h" #include "util/values.h" #include "perf.h" #include "util/debug.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/header.h" #include "util/session.h" #include "util/tool.h" #include "util/parse-options.h" #include "util/parse-events.h" #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" #include <linux/bitmap.h> struct perf_report { struct perf_tool tool; struct perf_session *session; char const *input_name; bool force, use_tui, use_gtk, use_stdio; bool hide_unresolved; bool dont_use_callchains; bool show_full_info; bool show_threads; bool inverted_callchain; struct perf_read_values show_threads_values; const char *pretty_printing_style; symbol_filter_t annotate_init; const char *cpu_list; const char *symbol_filter_str; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; static int perf_report__add_branch_hist_entry(struct perf_tool *tool, struct addr_location *al, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct perf_report *rep = container_of(tool, struct perf_report, tool); struct symbol *parent = NULL; int err = 0; unsigned i; struct hist_entry *he; struct branch_info *bi, *bx; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { err = machine__resolve_callchain(machine, evsel, al->thread, sample->callchain, &parent); if (err) return err; } bi = machine__resolve_bstack(machine, al->thread, sample->branch_stack); if (!bi) return -ENOMEM; for (i = 0; i < sample->branch_stack->nr; i++) { if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) continue; /* * The report shows the percentage of total branches captured * and not events sampled. Thus we use a pseudo period of 1. */ he = __hists__add_branch_entry(&evsel->hists, al, parent, &bi[i], 1); if (he) { struct annotation *notes; err = -ENOMEM; bx = he->branch_info; if (bx->from.sym && use_browser > 0) { notes = symbol__annotation(bx->from.sym); if (!notes->src && symbol__alloc_hist(bx->from.sym) < 0) goto out; err = symbol__inc_addr_samples(bx->from.sym, bx->from.map, evsel->idx, bx->from.al_addr); if (err) goto out; } if (bx->to.sym && use_browser > 0) { notes = symbol__annotation(bx->to.sym); if (!notes->src && symbol__alloc_hist(bx->to.sym) < 0) goto out; err = symbol__inc_addr_samples(bx->to.sym, bx->to.map, evsel->idx, bx->to.al_addr); if (err) goto out; } evsel->hists.stats.total_period += 1; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); err = 0; } else return -ENOMEM; } out: return err; } static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, struct addr_location *al, struct perf_sample *sample, struct machine *machine) { struct symbol *parent = NULL; int err = 0; struct hist_entry *he; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { err = machine__resolve_callchain(machine, evsel, al->thread, sample->callchain, &parent); if (err) return err; } he = __hists__add_entry(&evsel->hists, al, parent, sample->period); if (he == NULL) return -ENOMEM; if (symbol_conf.use_callchain) { err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, sample->period); if (err) return err; } /* * Only in the newt browser we are doing integrated annotation, * so we don't allocated the extra space needed because the stdio * code will not use it. */ if (al->sym != NULL && use_browser > 0) { struct annotation *notes = symbol__annotation(he->ms.sym); assert(evsel != NULL); err = -ENOMEM; if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) goto out; err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); } evsel->hists.stats.total_period += sample->period; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); out: return err; } static int process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct perf_report *rep = container_of(tool, struct perf_report, tool); struct addr_location al; if (perf_event__preprocess_sample(event, machine, &al, sample, rep->annotate_init) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } if (al.filtered || (rep->hide_unresolved && al.sym == NULL)) return 0; if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; if (sort__branch_mode == 1) { if (perf_report__add_branch_hist_entry(tool, &al, sample, evsel, machine)) { pr_debug("problem adding lbr entry, skipping event\n"); return -1; } } else { if (al.map != NULL) al.map->dso->hit = 1; if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { pr_debug("problem incrementing symbol period, skipping event\n"); return -1; } } return 0; } static int process_read_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __used, struct perf_evsel *evsel, struct machine *machine __used) { struct perf_report *rep = container_of(tool, struct perf_report, tool); if (rep->show_threads) { const char *name = evsel ? event_name(evsel) : "unknown"; perf_read_values_add_value(&rep->show_threads_values, event->read.pid, event->read.tid, event->read.id, name, event->read.value); } dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid, evsel ? event_name(evsel) : "FAIL", event->read.value); return 0; } static int perf_report__setup_sample_type(struct perf_report *rep) { struct perf_session *self = rep->session; if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__warning("Selected --sort parent, but no " "callchain data. Did you call " "'perf record' without -g?\n"); return -EINVAL; } if (symbol_conf.use_callchain) { ui__warning("Selected -g but no callchain data. Did " "you call 'perf record' without -g?\n"); return -1; } } else if (!rep->dont_use_callchains && callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { symbol_conf.use_callchain = true; if (callchain_register_param(&callchain_param) < 0) { ui__warning("Can't register callchain " "params.\n"); return -EINVAL; } } if (sort__branch_mode == 1) { if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { fprintf(stderr, "selected -b but no branch data." " Did you call perf record without" " -b?\n"); return -1; } } return 0; } extern volatile int session_done; static void sig_handler(int sig __used) { session_done = 1; } static size_t hists__fprintf_nr_sample_events(struct hists *self, const char *evname, FILE *fp) { size_t ret; char unit; unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; nr_events = convert_unit(nr_events, &unit); ret = fprintf(fp, "# Events: %lu%c", nr_events, unit); if (evname != NULL) ret += fprintf(fp, " %s", evname); return ret + fprintf(fp, "\n#\n"); } static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, struct perf_report *rep, const char *help) { struct perf_evsel *pos; list_for_each_entry(pos, &evlist->entries, node) { struct hists *hists = &pos->hists; const char *evname = event_name(pos); hists__fprintf_nr_sample_events(hists, evname, stdout); hists__fprintf(hists, NULL, false, true, 0, 0, stdout); fprintf(stdout, "\n\n"); } if (sort_order == default_sort_order && parent_pattern == default_parent_pattern) { fprintf(stdout, "#\n# (%s)\n#\n", help); if (rep->show_threads) { bool style = !strcmp(rep->pretty_printing_style, "raw"); perf_read_values_display(stdout, &rep->show_threads_values, style); perf_read_values_destroy(&rep->show_threads_values); } } return 0; } static int __cmd_report(struct perf_report *rep) { int ret = -EINVAL; u64 nr_samples; struct perf_session *session = rep->session; struct perf_evsel *pos; struct map *kernel_map; struct kmap *kernel_kmap; const char *help = "For a higher level overview, try: perf report --sort comm,dso"; signal(SIGINT, sig_handler); if (rep->cpu_list) { ret = perf_session__cpu_bitmap(session, rep->cpu_list, rep->cpu_bitmap); if (ret) goto out_delete; } if (use_browser <= 0) perf_session__fprintf_info(session, stdout, rep->show_full_info); if (rep->show_threads) perf_read_values_init(&rep->show_threads_values); ret = perf_report__setup_sample_type(rep); if (ret) goto out_delete; ret = perf_session__process_events(session, &rep->tool); if (ret) goto out_delete; kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION]; kernel_kmap = map__kmap(kernel_map); if (kernel_map == NULL || (kernel_map->dso->hit && (kernel_kmap->ref_reloc_sym == NULL || kernel_kmap->ref_reloc_sym->addr == 0))) { const char *desc = "As no suitable kallsyms nor vmlinux was found, kernel samples\n" "can't be resolved."; if (kernel_map) { const struct dso *kdso = kernel_map->dso; if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) { desc = "If some relocation was applied (e.g. " "kexec) symbols may be misresolved."; } } ui__warning( "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" "Samples in kernel modules can't be resolved as well.\n\n", desc); } if (dump_trace) { perf_session__fprintf_nr_events(session, stdout); goto out_delete; } if (verbose > 3) perf_session__fprintf(session, stdout); if (verbose > 2) perf_session__fprintf_dsos(session, stdout); nr_samples = 0; list_for_each_entry(pos, &session->evlist->entries, node) { struct hists *hists = &pos->hists; if (pos->idx == 0) hists->symbol_filter_str = rep->symbol_filter_str; hists__collapse_resort(hists); hists__output_resort(hists); nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE]; } if (nr_samples == 0) { ui__warning("The %s file has no samples!\n", session->filename); goto out_delete; } if (use_browser > 0) { if (use_browser == 1) { perf_evlist__tui_browse_hists(session->evlist, help, NULL, NULL, 0); } else if (use_browser == 2) { perf_evlist__gtk_browse_hists(session->evlist, help, NULL, NULL, 0); } } else perf_evlist__tty_browse_hists(session->evlist, rep, help); out_delete: /* * Speed up the exit process, for large files this can * take quite a while. * * XXX Enable this when using valgrind or if we ever * librarize this command. * * Also experiment with obstacks to see how much speed * up we'll get here. * * perf_session__delete(session); */ return ret; } static int parse_callchain_opt(const struct option *opt, const char *arg, int unset) { struct perf_report *rep = (struct perf_report *)opt->value; char *tok, *tok2; char *endptr; /* * --no-call-graph */ if (unset) { rep->dont_use_callchains = true; return 0; } symbol_conf.use_callchain = true; if (!arg) return 0; tok = strtok((char *)arg, ","); if (!tok) return -1; /* get the output mode */ if (!strncmp(tok, "graph", strlen(arg))) callchain_param.mode = CHAIN_GRAPH_ABS; else if (!strncmp(tok, "flat", strlen(arg))) callchain_param.mode = CHAIN_FLAT; else if (!strncmp(tok, "fractal", strlen(arg))) callchain_param.mode = CHAIN_GRAPH_REL; else if (!strncmp(tok, "none", strlen(arg))) { callchain_param.mode = CHAIN_NONE; symbol_conf.use_callchain = false; return 0; } else return -1; /* get the min percentage */ tok = strtok(NULL, ","); if (!tok) goto setup; callchain_param.min_percent = strtod(tok, &endptr); if (tok == endptr) return -1; /* get the print limit */ tok2 = strtok(NULL, ","); if (!tok2) goto setup; if (tok2[0] != 'c') { callchain_param.print_limit = strtoul(tok2, &endptr, 0); tok2 = strtok(NULL, ","); if (!tok2) goto setup; } /* get the call chain order */ if (!strcmp(tok2, "caller")) callchain_param.order = ORDER_CALLER; else if (!strcmp(tok2, "callee")) callchain_param.order = ORDER_CALLEE; else return -1; setup: if (callchain_register_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain params\n"); return -1; } return 0; } static int parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) { sort__branch_mode = !unset; return 0; } int cmd_report(int argc, const char **argv, const char *prefix __used) { struct perf_session *session; struct stat st; bool has_br_stack = false; int ret = -1; char callchain_default_opt[] = "fractal,0.5,callee"; const char * const report_usage[] = { "perf report [<options>]", NULL }; struct perf_report report = { .tool = { .sample = process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, .exit = perf_event__process_task, .fork = perf_event__process_task, .lost = perf_event__process_lost, .read = process_read_event, .attr = perf_event__process_attr, .event_type = perf_event__process_event_type, .tracing_data = perf_event__process_tracing_data, .build_id = perf_event__process_build_id, .ordered_samples = true, .ordering_requires_timestamps = true, }, .pretty_printing_style = "normal", }; const struct option options[] = { OPT_STRING('i', "input", &report.input_name, "file", "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", "kallsyms pathname"), OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), OPT_BOOLEAN('T', "threads", &report.show_threads, "Show per-thread event counters"), OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", "pretty printing style key: normal raw"), OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"), OPT_BOOLEAN(0, "stdio", &report.use_stdio, "Use the stdio interface"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent, dso_to," " dso_from, symbol_to, symbol_from, mispredict"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_STRING('p', "parent", &parent_pattern, "regex", "regex filter to identify parent, see: '--sort parent'"), OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, "Only display entries with parent-match"), OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order", "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. " "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, "alias for inverted call graph"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only consider symbols in these comms"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter", "only show symbols that (partially) match with this filter"), OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, "width[,width...]", "don't try to adjust column width, use these fixed values"), OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved, "Only display entries resolved to a symbol"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_STRING('C', "cpu", &report.cpu_list, "cpu", "list of cpus to profile"), OPT_BOOLEAN('I', "show-info", &report.show_full_info, "Display extended information about perf.data file"), OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, "Interleave source code with assembly code (default)"), OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw, "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", "use branch records for histogram filling", parse_branch_mode), OPT_END() }; argc = parse_options(argc, argv, options, report_usage, 0); if (report.use_stdio) use_browser = 0; else if (report.use_tui) use_browser = 1; else if (report.use_gtk) use_browser = 2; if (report.inverted_callchain) callchain_param.order = ORDER_CALLER; if (!report.input_name || !strlen(report.input_name)) { if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) report.input_name = "-"; else report.input_name = "perf.data"; } session = perf_session__new(report.input_name, O_RDONLY, report.force, false, &report.tool); if (session == NULL) return -ENOMEM; report.session = session; has_br_stack = perf_header__has_feat(&session->header, HEADER_BRANCH_STACK); if (sort__branch_mode == -1 && has_br_stack) sort__branch_mode = 1; /* sort__branch_mode could be 0 if --no-branch-stack */ if (sort__branch_mode == 1) { /* * if no sort_order is provided, then specify * branch-mode specific order */ if (sort_order == default_sort_order) sort_order = "comm,dso_from,symbol_from," "dso_to,symbol_to"; } if (strcmp(report.input_name, "-") != 0) { if (report.use_gtk) perf_gtk_setup_browser(argc, argv, true); else setup_browser(true); } else { use_browser = 0; } /* * Only in the newt browser we are doing integrated annotation, * so don't allocate extra space that won't be used in the stdio * implementation. */ if (use_browser > 0) { symbol_conf.priv_size = sizeof(struct annotation); report.annotate_init = symbol__annotate_init; /* * For searching by name on the "Browse map details". * providing it only in verbose mode not to bloat too * much struct symbol. */ if (verbose) { /* * XXX: Need to provide a less kludgy way to ask for * more space per symbol, the u32 is for the index on * the ui browser. * See symbol__browser_index. */ symbol_conf.priv_size += sizeof(u32); symbol_conf.sort_by_name = true; } } if (symbol__init() < 0) goto error; setup_sorting(report_usage, options); if (parent_pattern != default_parent_pattern) { if (sort_dimension__add("parent") < 0) goto error; /* * Only show the parent fields if we explicitly * sort that way. If we only use parent machinery * for filtering, we don't want it. */ if (!strstr(sort_order, "parent")) sort_parent.elide = 1; } else symbol_conf.exclude_other = false; if (argc) { /* * Special case: if there's an argument left then assume that * it's a symbol filter: */ if (argc > 1) usage_with_options(report_usage, options); report.symbol_filter_str = argv[0]; } sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); if (sort__branch_mode == 1) { sort_entry__setup_elide(&sort_dso_from, symbol_conf.dso_from_list, "dso_from", stdout); sort_entry__setup_elide(&sort_dso_to, symbol_conf.dso_to_list, "dso_to", stdout); sort_entry__setup_elide(&sort_sym_from, symbol_conf.sym_from_list, "sym_from", stdout); sort_entry__setup_elide(&sort_sym_to, symbol_conf.sym_to_list, "sym_to", stdout); } else { sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); } ret = __cmd_report(&report); error: perf_session__delete(session); return ret; }
gpl-2.0
mpokwsths/hammerhead_kernel
drivers/video/vt8500lcdfb.c
5026
12194
/* * linux/drivers/video/vt8500lcdfb.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * Based on skeletonfb.c and pxafb.c * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/wait.h> #include <mach/vt8500fb.h> #include "vt8500lcdfb.h" #include "wmt_ge_rops.h" #define to_vt8500lcd_info(__info) container_of(__info, \ struct vt8500lcd_info, fb) static int vt8500lcd_set_par(struct fb_info *info) { struct vt8500lcd_info *fbi = to_vt8500lcd_info(info); int reg_bpp = 5; /* 16bpp */ int i; unsigned long control0; if (!fbi) return -EINVAL; if (info->var.bits_per_pixel <= 8) { /* palettized */ info->var.red.offset = 0; info->var.red.length = info->var.bits_per_pixel; info->var.red.msb_right = 0; info->var.green.offset = 0; info->var.green.length = info->var.bits_per_pixel; info->var.green.msb_right = 0; info->var.blue.offset = 0; info->var.blue.length = info->var.bits_per_pixel; info->var.blue.msb_right = 0; info->var.transp.offset = 0; info->var.transp.length = 0; info->var.transp.msb_right = 0; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = info->var.xres_virtual / (8/info->var.bits_per_pixel); } else { /* non-palettized */ info->var.transp.offset = 0; info->var.transp.length = 0; info->var.transp.msb_right = 0; if (info->var.bits_per_pixel == 16) { /* RGB565 */ info->var.red.offset = 11; info->var.red.length = 5; info->var.red.msb_right = 0; info->var.green.offset = 5; info->var.green.length = 6; info->var.green.msb_right = 0; info->var.blue.offset = 0; info->var.blue.length = 5; info->var.blue.msb_right = 0; } else { /* Equal depths per channel */ info->var.red.offset = info->var.bits_per_pixel * 2 / 3; info->var.red.length = info->var.bits_per_pixel / 3; info->var.red.msb_right = 0; info->var.green.offset = info->var.bits_per_pixel / 3; info->var.green.length = info->var.bits_per_pixel / 3; info->var.green.msb_right = 0; info->var.blue.offset = 0; info->var.blue.length = info->var.bits_per_pixel / 3; info->var.blue.msb_right = 0; } info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = info->var.bits_per_pixel > 16 ? info->var.xres_virtual << 2 : info->var.xres_virtual << 1; } for (i = 0; i < 8; i++) { if (bpp_values[i] == info->var.bits_per_pixel) { reg_bpp = i; continue; } } control0 = readl(fbi->regbase) & ~0xf; writel(0, fbi->regbase); while (readl(fbi->regbase + 0x38) & 0x10) /* wait */; writel((((info->var.hsync_len - 1) & 0x3f) << 26) | ((info->var.left_margin & 0xff) << 18) | (((info->var.xres - 1) & 0x3ff) << 8) | (info->var.right_margin & 0xff), fbi->regbase + 0x4); writel((((info->var.vsync_len - 1) & 0x3f) << 26) | ((info->var.upper_margin & 0xff) << 18) | (((info->var.yres - 1) & 0x3ff) << 8) | (info->var.lower_margin & 0xff), fbi->regbase + 0x8); writel((((info->var.yres - 1) & 0x400) << 2) | ((info->var.xres - 1) & 0x400), fbi->regbase + 0x10); writel(0x80000000, fbi->regbase + 0x20); writel(control0 | (reg_bpp << 1) | 0x100, fbi->regbase); return 0; } static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int vt8500lcd_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct vt8500lcd_info *fbi = to_vt8500lcd_info(info); int ret = 1; unsigned int val; if (regno >= 256) return -EINVAL; if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; switch (fbi->fb.fix.visual) { case FB_VISUAL_TRUECOLOR: if (regno < 16) { u32 *pal = fbi->fb.pseudo_palette; val = chan_to_field(red, &fbi->fb.var.red); val |= chan_to_field(green, &fbi->fb.var.green); val |= chan_to_field(blue, &fbi->fb.var.blue); pal[regno] = val; ret = 0; } break; case FB_VISUAL_STATIC_PSEUDOCOLOR: case FB_VISUAL_PSEUDOCOLOR: writew((red & 0xf800) | ((green >> 5) & 0x7e0) | ((blue >> 11) & 0x1f), fbi->palette_cpu + sizeof(u16) * regno); break; } return ret; } static int vt8500lcd_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { int ret = 0; struct vt8500lcd_info *fbi = to_vt8500lcd_info(info); if (cmd == FBIO_WAITFORVSYNC) { /* Unmask End of Frame interrupt */ writel(0xffffffff ^ (1 << 3), fbi->regbase + 0x3c); ret = wait_event_interruptible_timeout(fbi->wait, readl(fbi->regbase + 0x38) & (1 << 3), HZ / 10); /* Mask back to reduce unwanted interrupt traffic */ writel(0xffffffff, fbi->regbase + 0x3c); if (ret < 0) return ret; if (ret == 0) return -ETIMEDOUT; } return ret; } static int vt8500lcd_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { unsigned pixlen = info->fix.line_length / info->var.xres_virtual; unsigned off = pixlen * var->xoffset + info->fix.line_length * var->yoffset; struct vt8500lcd_info *fbi = to_vt8500lcd_info(info); writel((1 << 31) | (((info->var.xres_virtual - info->var.xres) * pixlen / 4) << 20) | (off >> 2), fbi->regbase + 0x20); return 0; } /* * vt8500lcd_blank(): * Blank the display by setting all palette values to zero. Note, * True Color modes do not really use the palette, so this will not * blank the display in all modes. */ static int vt8500lcd_blank(int blank, struct fb_info *info) { int i; switch (blank) { case FB_BLANK_POWERDOWN: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_NORMAL: if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) for (i = 0; i < 256; i++) vt8500lcd_setcolreg(i, 0, 0, 0, 0, info); case FB_BLANK_UNBLANK: if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) fb_set_cmap(&info->cmap, info); } return 0; } static struct fb_ops vt8500lcd_ops = { .owner = THIS_MODULE, .fb_set_par = vt8500lcd_set_par, .fb_setcolreg = vt8500lcd_setcolreg, .fb_fillrect = wmt_ge_fillrect, .fb_copyarea = wmt_ge_copyarea, .fb_imageblit = sys_imageblit, .fb_sync = wmt_ge_sync, .fb_ioctl = vt8500lcd_ioctl, .fb_pan_display = vt8500lcd_pan_display, .fb_blank = vt8500lcd_blank, }; static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id) { struct vt8500lcd_info *fbi = dev_id; if (readl(fbi->regbase + 0x38) & (1 << 3)) wake_up_interruptible(&fbi->wait); writel(0xffffffff, fbi->regbase + 0x38); return IRQ_HANDLED; } static int __devinit vt8500lcd_probe(struct platform_device *pdev) { struct vt8500lcd_info *fbi; struct resource *res; struct vt8500fb_platform_data *pdata = pdev->dev.platform_data; void *addr; int irq, ret; ret = -ENOMEM; fbi = NULL; fbi = kzalloc(sizeof(struct vt8500lcd_info) + sizeof(u32) * 16, GFP_KERNEL); if (!fbi) { dev_err(&pdev->dev, "Failed to initialize framebuffer device\n"); ret = -ENOMEM; goto failed; } strcpy(fbi->fb.fix.id, "VT8500 LCD"); fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS; fbi->fb.fix.xpanstep = 0; fbi->fb.fix.ypanstep = 1; fbi->fb.fix.ywrapstep = 0; fbi->fb.fix.accel = FB_ACCEL_NONE; fbi->fb.var.nonstd = 0; fbi->fb.var.activate = FB_ACTIVATE_NOW; fbi->fb.var.height = -1; fbi->fb.var.width = -1; fbi->fb.var.vmode = FB_VMODE_NONINTERLACED; fbi->fb.fbops = &vt8500lcd_ops; fbi->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK; fbi->fb.node = -1; addr = fbi; addr = addr + sizeof(struct vt8500lcd_info); fbi->fb.pseudo_palette = addr; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no I/O memory resource defined\n"); ret = -ENODEV; goto failed_fbi; } res = request_mem_region(res->start, resource_size(res), "vt8500lcd"); if (res == NULL) { dev_err(&pdev->dev, "failed to request I/O memory\n"); ret = -EBUSY; goto failed_fbi; } fbi->regbase = ioremap(res->start, resource_size(res)); if (fbi->regbase == NULL) { dev_err(&pdev->dev, "failed to map I/O memory\n"); ret = -EBUSY; goto failed_free_res; } fbi->fb.fix.smem_start = pdata->video_mem_phys; fbi->fb.fix.smem_len = pdata->video_mem_len; fbi->fb.screen_base = pdata->video_mem_virt; fbi->palette_size = PAGE_ALIGN(512); fbi->palette_cpu = dma_alloc_coherent(&pdev->dev, fbi->palette_size, &fbi->palette_phys, GFP_KERNEL); if (fbi->palette_cpu == NULL) { dev_err(&pdev->dev, "Failed to allocate palette buffer\n"); ret = -ENOMEM; goto failed_free_io; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ defined\n"); ret = -ENODEV; goto failed_free_palette; } ret = request_irq(irq, vt8500lcd_handle_irq, 0, "LCD", fbi); if (ret) { dev_err(&pdev->dev, "request_irq failed: %d\n", ret); ret = -EBUSY; goto failed_free_palette; } init_waitqueue_head(&fbi->wait); if (fb_alloc_cmap(&fbi->fb.cmap, 256, 0) < 0) { dev_err(&pdev->dev, "Failed to allocate color map\n"); ret = -ENOMEM; goto failed_free_irq; } fb_videomode_to_var(&fbi->fb.var, &pdata->mode); fbi->fb.var.bits_per_pixel = pdata->bpp; fbi->fb.var.xres_virtual = pdata->xres_virtual; fbi->fb.var.yres_virtual = pdata->yres_virtual; ret = vt8500lcd_set_par(&fbi->fb); if (ret) { dev_err(&pdev->dev, "Failed to set parameters\n"); goto failed_free_cmap; } writel(fbi->fb.fix.smem_start >> 22, fbi->regbase + 0x1c); writel((fbi->palette_phys & 0xfffffe00) | 1, fbi->regbase + 0x18); platform_set_drvdata(pdev, fbi); ret = register_framebuffer(&fbi->fb); if (ret < 0) { dev_err(&pdev->dev, "Failed to register framebuffer device: %d\n", ret); goto failed_free_cmap; } /* * Ok, now enable the LCD controller */ writel(readl(fbi->regbase) | 1, fbi->regbase); return 0; failed_free_cmap: if (fbi->fb.cmap.len) fb_dealloc_cmap(&fbi->fb.cmap); failed_free_irq: free_irq(irq, fbi); failed_free_palette: dma_free_coherent(&pdev->dev, fbi->palette_size, fbi->palette_cpu, fbi->palette_phys); failed_free_io: iounmap(fbi->regbase); failed_free_res: release_mem_region(res->start, resource_size(res)); failed_fbi: platform_set_drvdata(pdev, NULL); kfree(fbi); failed: return ret; } static int __devexit vt8500lcd_remove(struct platform_device *pdev) { struct vt8500lcd_info *fbi = platform_get_drvdata(pdev); struct resource *res; int irq; unregister_framebuffer(&fbi->fb); writel(0, fbi->regbase); if (fbi->fb.cmap.len) fb_dealloc_cmap(&fbi->fb.cmap); irq = platform_get_irq(pdev, 0); free_irq(irq, fbi); dma_free_coherent(&pdev->dev, fbi->palette_size, fbi->palette_cpu, fbi->palette_phys); iounmap(fbi->regbase); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); kfree(fbi); return 0; } static struct platform_driver vt8500lcd_driver = { .probe = vt8500lcd_probe, .remove = __devexit_p(vt8500lcd_remove), .driver = { .owner = THIS_MODULE, .name = "vt8500-lcd", }, }; module_platform_driver(vt8500lcd_driver); MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>"); MODULE_DESCRIPTION("LCD controller driver for VIA VT8500"); MODULE_LICENSE("GPL");
gpl-2.0
asis92/kernel-lp-lg-d802
drivers/video/omap/lcd_osk.c
5026
2897
/* * LCD panel support for the TI OMAP OSK board * * Copyright (C) 2004 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * Adapted for OSK by <dirk.behme@de.bosch.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <asm/gpio.h> #include <plat/mux.h> #include "omapfb.h" static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { /* gpio2 was allocated in board init */ return 0; } static void osk_panel_cleanup(struct lcd_panel *panel) { } static int osk_panel_enable(struct lcd_panel *panel) { /* configure PWL pin */ omap_cfg_reg(PWL); /* Enable PWL unit */ omap_writeb(0x01, OMAP_PWL_CLK_ENABLE); /* Set PWL level */ omap_writeb(0xFF, OMAP_PWL_ENABLE); /* set GPIO2 high (lcd power enabled) */ gpio_set_value(2, 1); return 0; } static void osk_panel_disable(struct lcd_panel *panel) { /* Set PWL level to zero */ omap_writeb(0x00, OMAP_PWL_ENABLE); /* Disable PWL unit */ omap_writeb(0x00, OMAP_PWL_CLK_ENABLE); /* set GPIO2 low */ gpio_set_value(2, 0); } static unsigned long osk_panel_get_caps(struct lcd_panel *panel) { return 0; } struct lcd_panel osk_panel = { .name = "osk", .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, .data_lines = 16, .x_res = 240, .y_res = 320, .pixel_clock = 12500, .hsw = 40, .hfp = 40, .hbp = 72, .vsw = 1, .vfp = 1, .vbp = 0, .pcd = 12, .init = osk_panel_init, .cleanup = osk_panel_cleanup, .enable = osk_panel_enable, .disable = osk_panel_disable, .get_caps = osk_panel_get_caps, }; static int osk_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&osk_panel); return 0; } static int osk_panel_remove(struct platform_device *pdev) { return 0; } static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int osk_panel_resume(struct platform_device *pdev) { return 0; } static struct platform_driver osk_panel_driver = { .probe = osk_panel_probe, .remove = osk_panel_remove, .suspend = osk_panel_suspend, .resume = osk_panel_resume, .driver = { .name = "lcd_osk", .owner = THIS_MODULE, }, }; module_platform_driver(osk_panel_driver);
gpl-2.0
nekromant/linux-rlx-upstream
sound/isa/ad1816a/ad1816a.c
5026
8779
/* card-ad1816a.c - driver for ADI SoundPort AD1816A based soundcards. Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/ad1816a.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define PFX "ad1816a: " MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("AD1816A, AD1815"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Highscreen,Sound-Boostar 16 3D}," "{Analog Devices,AD1815}," "{Analog Devices,AD1816A}," "{TerraTec,Base 64}," "{TerraTec,AudioSystem EWS64S}," "{Aztech/Newcom SC-16 3D}," "{Shark Predator ISA}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 1-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int clockfreq[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for ad1816a based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for ad1816a based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable ad1816a based soundcard."); module_param_array(clockfreq, int, NULL, 0444); MODULE_PARM_DESC(clockfreq, "Clock frequency for ad1816a driver (default = 0)."); struct snd_card_ad1816a { struct pnp_dev *dev; struct pnp_dev *devmpu; }; static struct pnp_card_device_id snd_ad1816a_pnpids[] = { /* Analog Devices AD1815 */ { .id = "ADS7150", .devs = { { .id = "ADS7150" }, { .id = "ADS7151" } } }, /* Analog Device AD1816? */ { .id = "ADS7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - added by Kenneth Platz <kxp@atl.hp.com> */ { .id = "ADS7181", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - Aztech/Newcom SC-16 3D */ { .id = "AZT1022", .devs = { { .id = "AZT1018" }, { .id = "AZT2002" } } }, /* Highscreen Sound-Boostar 16 3D - added by Stefan Behnel */ { .id = "LWC1061", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Highscreen Sound-Boostar 16 3D */ { .id = "MDK1605", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Shark Predator ISA - added by Ken Arromdee */ { .id = "SMM7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - Terratec AudioSystem EWS64 S */ { .id = "TER1112", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* Analog Devices AD1816A - Terratec AudioSystem EWS64 S */ { .id = "TER1112", .devs = { { .id = "TER1100" }, { .id = "TER1101" } } }, /* Analog Devices AD1816A - Terratec Base 64 */ { .id = "TER1411", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } }, /* end */ { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_ad1816a_pnpids); #define DRIVER_NAME "snd-card-ad1816a" static int __devinit snd_card_ad1816a_pnp(int dev, struct snd_card_ad1816a *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->dev == NULL) return -EBUSY; acard->devmpu = pnp_request_card_device(card, id->devs[1].id, NULL); if (acard->devmpu == NULL) { mpu_port[dev] = -1; snd_printk(KERN_WARNING PFX "MPU401 device busy, skipping.\n"); } pdev = acard->dev; err = pnp_activate_dev(pdev); if (err < 0) { printk(KERN_ERR PFX "AUDIO PnP configure failure\n"); return -EBUSY; } port[dev] = pnp_port_start(pdev, 2); fm_port[dev] = pnp_port_start(pdev, 1); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); if (acard->devmpu == NULL) return 0; pdev = acard->devmpu; err = pnp_activate_dev(pdev); if (err < 0) { printk(KERN_ERR PFX "MPU401 PnP configure failure\n"); mpu_port[dev] = -1; acard->devmpu = NULL; } else { mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } return 0; } static int __devinit snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_card *card; struct snd_card_ad1816a *acard; struct snd_ad1816a *chip; struct snd_opl3 *opl3; struct snd_timer *timer; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_ad1816a), &card); if (error < 0) return error; acard = card->private_data; if ((error = snd_card_ad1816a_pnp(dev, acard, pcard, pid))) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); if ((error = snd_ad1816a_create(card, port[dev], irq[dev], dma1[dev], dma2[dev], &chip)) < 0) { snd_card_free(card); return error; } if (clockfreq[dev] >= 5000 && clockfreq[dev] <= 100000) chip->clock_freq = clockfreq[dev]; strcpy(card->driver, "AD1816A"); strcpy(card->shortname, "ADI SoundPort AD1816A"); sprintf(card->longname, "%s, SS at 0x%lx, irq %d, dma %d&%d", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); if ((error = snd_ad1816a_pcm(chip, 0, NULL)) < 0) { snd_card_free(card); return error; } if ((error = snd_ad1816a_mixer(chip)) < 0) { snd_card_free(card); return error; } error = snd_ad1816a_timer(chip, 0, &timer); if (error < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0) { if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) printk(KERN_ERR PFX "no MPU-401 device at 0x%lx.\n", mpu_port[dev]); } if (fm_port[dev] > 0) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx.\n", fm_port[dev], fm_port[dev] + 2); } else { error = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (error < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int __devinitdata ad1816a_devices; static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_ad1816a_probe(dev, card, id); if (res < 0) return res; dev++; ad1816a_devices++; return 0; } return -ENODEV; } static void __devexit snd_ad1816a_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static struct pnp_card_driver ad1816a_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "ad1816a", .id_table = snd_ad1816a_pnpids, .probe = snd_ad1816a_pnp_detect, .remove = __devexit_p(snd_ad1816a_pnp_remove), /* FIXME: suspend/resume */ }; static int __init alsa_card_ad1816a_init(void) { int err; err = pnp_register_card_driver(&ad1816a_pnpc_driver); if (err) return err; if (!ad1816a_devices) { pnp_unregister_card_driver(&ad1816a_pnpc_driver); #ifdef MODULE printk(KERN_ERR "no AD1816A based soundcards found.\n"); #endif /* MODULE */ return -ENODEV; } return 0; } static void __exit alsa_card_ad1816a_exit(void) { pnp_unregister_card_driver(&ad1816a_pnpc_driver); } module_init(alsa_card_ad1816a_init) module_exit(alsa_card_ad1816a_exit)
gpl-2.0
kprkpr/kernel-e400
drivers/staging/comedi/drivers/comedi_bond.c
8098
15371
/* comedi/drivers/comedi_bond.c A Comedi driver to 'bond' or merge multiple drivers and devices as one. COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: comedi_bond Description: A driver to 'bond' (merge) multiple subdevices from multiple devices together as one. Devices: Author: ds Updated: Mon, 10 Oct 00:18:25 -0500 Status: works This driver allows you to 'bond' (merge) multiple comedi subdevices (coming from possibly difference boards and/or drivers) together. For example, if you had a board with 2 different DIO subdevices, and another with 1 DIO subdevice, you could 'bond' them with this driver so that they look like one big fat DIO subdevice. This makes writing applications slightly easier as you don't have to worry about managing different subdevices in the application -- you just worry about indexing one linear array of channel id's. Right now only DIO subdevices are supported as that's the personal itch I am scratching with this driver. If you want to add support for AI and AO subdevs, go right on ahead and do so! Commands aren't supported -- although it would be cool if they were. Configuration Options: List of comedi-minors to bond. All subdevices of the same type within each minor will be concatenated together in the order given here. */ #include <linux/string.h> #include <linux/slab.h> #include "../comedi.h" #include "../comedilib.h" #include "../comedidev.h" /* The maxiumum number of channels per subdevice. */ #define MAX_CHANS 256 #define MODULE_NAME "comedi_bond" MODULE_LICENSE("GPL"); #ifndef STR # define STR1(x) #x # define STR(x) STR1(x) #endif static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "If true, print extra cryptic debugging output useful" "only to developers."); #define LOG_MSG(x...) printk(KERN_INFO MODULE_NAME": "x) #define DEBUG(x...) \ do { \ if (debug) \ printk(KERN_DEBUG MODULE_NAME": DEBUG: "x); \ } while (0) #define WARNING(x...) printk(KERN_WARNING MODULE_NAME ": WARNING: "x) #define ERROR(x...) printk(KERN_ERR MODULE_NAME ": INTERNAL ERROR: "x) MODULE_AUTHOR("Calin A. Culianu"); MODULE_DESCRIPTION(MODULE_NAME "A driver for COMEDI to bond multiple COMEDI " "devices together as one. In the words of John Lennon: " "'And the world will live as one...'"); /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct BondingBoard { const char *name; }; static const struct BondingBoard bondingBoards[] = { { .name = MODULE_NAME, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct BondingBoard *)dev->board_ptr) struct BondedDevice { struct comedi_device *dev; unsigned minor; unsigned subdev; unsigned subdev_type; unsigned nchans; unsigned chanid_offset; /* The offset into our unified linear channel-id's of chanid 0 on this subdevice. */ }; /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct Private { # define MAX_BOARD_NAME 256 char name[MAX_BOARD_NAME]; struct BondedDevice **devs; unsigned ndevs; struct BondedDevice *chanIdDevMap[MAX_CHANS]; unsigned nchans; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct Private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int bonding_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int bonding_detach(struct comedi_device *dev); /** Build Private array of all devices.. */ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it); static void doDevUnconfig(struct comedi_device *dev); /* Ugly implementation of realloc that always copies memory around -- I'm lazy, * what can I say? I like to do wasteful memcopies.. :) */ static void *Realloc(const void *ptr, size_t len, size_t old_len); static struct comedi_driver driver_bonding = { .driver_name = MODULE_NAME, .module = THIS_MODULE, .attach = bonding_attach, .detach = bonding_detach, /* It is not necessary to implement the following members if you are * writing a driver for a ISA PnP or PCI card */ /* Most drivers will support multiple types of boards by * having an array of board structures. These were defined * in skel_boards[] above. Note that the element 'name' * was first in the structure -- Comedi uses this fact to * extract the name of the board without knowing any details * about the structure except for its length. * When a device is attached (by comedi_config), the name * of the device is given to Comedi, and Comedi tries to * match it by going through the list of board names. If * there is a match, the address of the pointer is put * into dev->board_ptr and driver->attach() is called. * * Note that these are not necessary if you can determine * the type of board in software. ISA PnP, PCI, and PCMCIA * devices are such boards. */ .board_name = &bondingBoards[0].name, .offset = sizeof(struct BondingBoard), .num_names = ARRAY_SIZE(bondingBoards), }; static int bonding_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int bonding_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int bonding_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; LOG_MSG("comedi%d\n", dev->minor); /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct Private)) < 0) return -ENOMEM; /* * Setup our bonding from config params.. sets up our Private struct.. */ if (!doDevConfig(dev, it)) return -EINVAL; /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = devpriv->name; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; s = dev->subdevices + 0; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = devpriv->nchans; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = bonding_dio_insn_bits; s->insn_config = bonding_dio_insn_config; LOG_MSG("attached with %u DIO channels coming from %u different " "subdevices all bonded together. " "John Lennon would be proud!\n", devpriv->nchans, devpriv->ndevs); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int bonding_detach(struct comedi_device *dev) { LOG_MSG("comedi%d: remove\n", dev->minor); doDevUnconfig(dev); return 0; } /* DIO devices are slightly special. Although it is possible to * implement the insn_read/insn_write interface, it is much more * useful to applications if you implement the insn_bits interface. * This allows packed reading/writing of the DIO channels. The * comedi core can convert between insn_bits and insn_read/write */ static int bonding_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { #define LSAMPL_BITS (sizeof(unsigned int)*8) unsigned nchans = LSAMPL_BITS, num_done = 0, i; if (insn->n != 2) return -EINVAL; if (devpriv->nchans < nchans) nchans = devpriv->nchans; /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ for (i = 0; num_done < nchans && i < devpriv->ndevs; ++i) { struct BondedDevice *bdev = devpriv->devs[i]; /* Grab the channel mask and data of only the bits corresponding to this subdevice.. need to shift them to zero position of course. */ /* Bits corresponding to this subdev. */ unsigned int subdevMask = ((1 << bdev->nchans) - 1); unsigned int writeMask, dataBits; /* Argh, we have >= LSAMPL_BITS chans.. take all bits */ if (bdev->nchans >= LSAMPL_BITS) subdevMask = (unsigned int)(-1); writeMask = (data[0] >> num_done) & subdevMask; dataBits = (data[1] >> num_done) & subdevMask; /* Read/Write the new digital lines */ if (comedi_dio_bitfield(bdev->dev, bdev->subdev, writeMask, &dataBits) != 2) return -EINVAL; /* Make room for the new bits in data[1], the return value */ data[1] &= ~(subdevMask << num_done); /* Put the bits in the return value */ data[1] |= (dataBits & subdevMask) << num_done; /* Save the new bits to the saved state.. */ s->state = data[1]; num_done += bdev->nchans; } return insn->n; } static int bonding_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec), ret, io_bits = s->io_bits; unsigned int io; struct BondedDevice *bdev; if (chan < 0 || chan >= devpriv->nchans) return -EINVAL; bdev = devpriv->chanIdDevMap[chan]; /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: io = COMEDI_OUTPUT; /* is this really necessary? */ io_bits |= 1 << chan; break; case INSN_CONFIG_DIO_INPUT: io = COMEDI_INPUT; /* is this really necessary? */ io_bits &= ~(1 << chan); break; case INSN_CONFIG_DIO_QUERY: data[1] = (io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; break; } /* 'real' channel id for this subdev.. */ chan -= bdev->chanid_offset; ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, io); if (ret != 1) return -EINVAL; /* Finally, save the new io_bits values since we didn't get an error above. */ s->io_bits = io_bits; return insn->n; } static void *Realloc(const void *oldmem, size_t newlen, size_t oldlen) { void *newmem = kmalloc(newlen, GFP_KERNEL); if (newmem && oldmem) memcpy(newmem, oldmem, min(oldlen, newlen)); kfree(oldmem); return newmem; } static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it) { int i; struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS]; memset(devs_opened, 0, sizeof(devs_opened)); devpriv->name[0] = 0; /* Loop through all comedi devices specified on the command-line, building our device list */ for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) { char file[] = "/dev/comediXXXXXX"; int minor = it->options[i]; struct comedi_device *d; int sdev = -1, nchans, tmp; struct BondedDevice *bdev = NULL; if (minor < 0 || minor >= COMEDI_NUM_BOARD_MINORS) { ERROR("Minor %d is invalid!\n", minor); return 0; } if (minor == dev->minor) { ERROR("Cannot bond this driver to itself!\n"); return 0; } if (devs_opened[minor]) { ERROR("Minor %d specified more than once!\n", minor); return 0; } snprintf(file, sizeof(file), "/dev/comedi%u", minor); file[sizeof(file) - 1] = 0; d = devs_opened[minor] = comedi_open(file); if (!d) { ERROR("Minor %u could not be opened\n", minor); return 0; } /* Do DIO, as that's all we support now.. */ while ((sdev = comedi_find_subdevice_by_type(d, COMEDI_SUBD_DIO, sdev + 1)) > -1) { nchans = comedi_get_n_channels(d, sdev); if (nchans <= 0) { ERROR("comedi_get_n_channels() returned %d " "on minor %u subdev %d!\n", nchans, minor, sdev); return 0; } bdev = kmalloc(sizeof(*bdev), GFP_KERNEL); if (!bdev) { ERROR("Out of memory.\n"); return 0; } bdev->dev = d; bdev->minor = minor; bdev->subdev = sdev; bdev->subdev_type = COMEDI_SUBD_DIO; bdev->nchans = nchans; bdev->chanid_offset = devpriv->nchans; /* map channel id's to BondedDevice * pointer.. */ while (nchans--) devpriv->chanIdDevMap[devpriv->nchans++] = bdev; /* Now put bdev pointer at end of devpriv->devs array * list.. */ /* ergh.. ugly.. we need to realloc :( */ tmp = devpriv->ndevs * sizeof(bdev); devpriv->devs = Realloc(devpriv->devs, ++devpriv->ndevs * sizeof(bdev), tmp); if (!devpriv->devs) { ERROR("Could not allocate memory. " "Out of memory?"); return 0; } devpriv->devs[devpriv->ndevs - 1] = bdev; { /** Append dev:subdev to devpriv->name */ char buf[20]; int left = MAX_BOARD_NAME - strlen(devpriv->name) - 1; snprintf(buf, sizeof(buf), "%d:%d ", dev->minor, bdev->subdev); buf[sizeof(buf) - 1] = 0; strncat(devpriv->name, buf, left); } } } if (!devpriv->nchans) { ERROR("No channels found!\n"); return 0; } return 1; } static void doDevUnconfig(struct comedi_device *dev) { unsigned long devs_closed = 0; if (devpriv) { while (devpriv->ndevs-- && devpriv->devs) { struct BondedDevice *bdev; bdev = devpriv->devs[devpriv->ndevs]; if (!bdev) continue; if (!(devs_closed & (0x1 << bdev->minor))) { comedi_close(bdev->dev); devs_closed |= (0x1 << bdev->minor); } kfree(bdev); } kfree(devpriv->devs); devpriv->devs = NULL; kfree(devpriv); dev->private = NULL; } } static int __init init(void) { return comedi_driver_register(&driver_bonding); } static void __exit cleanup(void) { comedi_driver_unregister(&driver_bonding); } module_init(init); module_exit(cleanup);
gpl-2.0
Nothing-Dev/MaxiKernel_condor
net/ipv4/tunnel4.c
9378
4220
/* tunnel4.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/icmp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/xfrm.h> static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly; static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly; static DEFINE_MUTEX(tunnel4_mutex); static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family) { return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; } int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel __rcu **pprev; struct xfrm_tunnel *t; int ret = -EEXIST; int priority = handler->priority; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&tunnel4_mutex))) != NULL; pprev = &t->next) { if (t->priority > priority) break; if (t->priority == priority) goto err; } handler->next = *pprev; rcu_assign_pointer(*pprev, handler); ret = 0; err: mutex_unlock(&tunnel4_mutex); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_register); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel __rcu **pprev; struct xfrm_tunnel *t; int ret = -ENOENT; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&tunnel4_mutex))) != NULL; pprev = &t->next) { if (t == handler) { *pprev = handler->next; ret = 0; break; } } mutex_unlock(&tunnel4_mutex); synchronize_net(); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_deregister); #define for_each_tunnel_rcu(head, handler) \ for (handler = rcu_dereference(head); \ handler != NULL; \ handler = rcu_dereference(handler->next)) \ static int tunnel4_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; for_each_tunnel_rcu(tunnel4_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #if IS_ENABLED(CONFIG_IPV6) static int tunnel64_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto drop; for_each_tunnel_rcu(tunnel64_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #endif static void tunnel4_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnel4_handlers, handler) if (!handler->err_handler(skb, info)) break; } #if IS_ENABLED(CONFIG_IPV6) static void tunnel64_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnel64_handlers, handler) if (!handler->err_handler(skb, info)) break; } #endif static const struct net_protocol tunnel4_protocol = { .handler = tunnel4_rcv, .err_handler = tunnel4_err, .no_policy = 1, .netns_ok = 1, }; #if IS_ENABLED(CONFIG_IPV6) static const struct net_protocol tunnel64_protocol = { .handler = tunnel64_rcv, .err_handler = tunnel64_err, .no_policy = 1, .netns_ok = 1, }; #endif static int __init tunnel4_init(void) { if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) { pr_err("%s: can't add protocol\n", __func__); return -EAGAIN; } #if IS_ENABLED(CONFIG_IPV6) if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { pr_err("tunnel64 init: can't add protocol\n"); inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); return -EAGAIN; } #endif return 0; } static void __exit tunnel4_fini(void) { #if IS_ENABLED(CONFIG_IPV6) if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) pr_err("tunnel64 close: can't remove protocol\n"); #endif if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) pr_err("tunnel4 close: can't remove protocol\n"); } module_init(tunnel4_init); module_exit(tunnel4_fini); MODULE_LICENSE("GPL");
gpl-2.0
emwno/android_kernel_N7100
arch/x86/boot/cpucheck.c
12194
6054
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * Check for obligatory CPU features and abort if the features are not * present. This code should be compilable as 16-, 32- or 64-bit * code, so be very careful with types and inline assembly. * * This code should not contain any messages; that requires an * additional wrapper. * * As written, this code is not safe for inclusion into the kernel * proper (after FPU initialization, in particular). */ #ifdef _SETUP # include "boot.h" #endif #include <linux/types.h> #include <asm/processor-flags.h> #include <asm/required-features.h> #include <asm/msr-index.h> struct cpu_features cpu; static u32 cpu_vendor[3]; static u32 err_flags[NCAPINTS]; static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; static const u32 req_flags[NCAPINTS] = { REQUIRED_MASK0, REQUIRED_MASK1, 0, /* REQUIRED_MASK2 not implemented in this file */ 0, /* REQUIRED_MASK3 not implemented in this file */ REQUIRED_MASK4, 0, /* REQUIRED_MASK5 not implemented in this file */ REQUIRED_MASK6, 0, /* REQUIRED_MASK7 not implemented in this file */ }; #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) static int is_amd(void) { return cpu_vendor[0] == A32('A', 'u', 't', 'h') && cpu_vendor[1] == A32('e', 'n', 't', 'i') && cpu_vendor[2] == A32('c', 'A', 'M', 'D'); } static int is_centaur(void) { return cpu_vendor[0] == A32('C', 'e', 'n', 't') && cpu_vendor[1] == A32('a', 'u', 'r', 'H') && cpu_vendor[2] == A32('a', 'u', 'l', 's'); } static int is_transmeta(void) { return cpu_vendor[0] == A32('G', 'e', 'n', 'u') && cpu_vendor[1] == A32('i', 'n', 'e', 'T') && cpu_vendor[2] == A32('M', 'x', '8', '6'); } static int has_fpu(void) { u16 fcw = -1, fsw = -1; u32 cr0; asm("movl %%cr0,%0" : "=r" (cr0)); if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { cr0 &= ~(X86_CR0_EM|X86_CR0_TS); asm volatile("movl %0,%%cr0" : : "r" (cr0)); } asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw)); return fsw == 0 && (fcw & 0x103f) == 0x003f; } static int has_eflag(u32 mask) { u32 f0, f1; asm("pushfl ; " "pushfl ; " "popl %0 ; " "movl %0,%1 ; " "xorl %2,%1 ; " "pushl %1 ; " "popfl ; " "pushfl ; " "popl %1 ; " "popfl" : "=&r" (f0), "=&r" (f1) : "ri" (mask)); return !!((f0^f1) & mask); } static void get_flags(void) { u32 max_intel_level, max_amd_level; u32 tfms; if (has_fpu()) set_bit(X86_FEATURE_FPU, cpu.flags); if (has_eflag(X86_EFLAGS_ID)) { asm("cpuid" : "=a" (max_intel_level), "=b" (cpu_vendor[0]), "=d" (cpu_vendor[1]), "=c" (cpu_vendor[2]) : "a" (0)); if (max_intel_level >= 0x00000001 && max_intel_level <= 0x0000ffff) { asm("cpuid" : "=a" (tfms), "=c" (cpu.flags[4]), "=d" (cpu.flags[0]) : "a" (0x00000001) : "ebx"); cpu.level = (tfms >> 8) & 15; cpu.model = (tfms >> 4) & 15; if (cpu.level >= 6) cpu.model += ((tfms >> 16) & 0xf) << 4; } asm("cpuid" : "=a" (max_amd_level) : "a" (0x80000000) : "ebx", "ecx", "edx"); if (max_amd_level >= 0x80000001 && max_amd_level <= 0x8000ffff) { u32 eax = 0x80000001; asm("cpuid" : "+a" (eax), "=c" (cpu.flags[6]), "=d" (cpu.flags[1]) : : "ebx"); } } } /* Returns a bitmask of which words we have error bits in */ static int check_flags(void) { u32 err; int i; err = 0; for (i = 0; i < NCAPINTS; i++) { err_flags[i] = req_flags[i] & ~cpu.flags[i]; if (err_flags[i]) err |= 1 << i; } return err; } /* * Returns -1 on error. * * *cpu_level is set to the current CPU level; *req_level to the required * level. x86-64 is considered level 64 for this purpose. * * *err_flags_ptr is set to the flags error array if there are flags missing. */ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) { int err; memset(&cpu.flags, 0, sizeof cpu.flags); cpu.level = 3; if (has_eflag(X86_EFLAGS_AC)) cpu.level = 4; get_flags(); err = check_flags(); if (test_bit(X86_FEATURE_LM, cpu.flags)) cpu.level = 64; if (err == 0x01 && !(err_flags[0] & ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) && is_amd()) { /* If this is an AMD and we're only missing SSE+SSE2, try to turn them on */ u32 ecx = MSR_K7_HWCR; u32 eax, edx; asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax &= ~(1 << 15); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); get_flags(); /* Make sure it really did something */ err = check_flags(); } else if (err == 0x01 && !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && is_centaur() && cpu.model >= 6) { /* If this is a VIA C3, we might have to enable CX8 explicitly */ u32 ecx = MSR_VIA_FCR; u32 eax, edx; asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax |= (1<<1)|(1<<7); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); set_bit(X86_FEATURE_CX8, cpu.flags); err = check_flags(); } else if (err == 0x01 && is_transmeta()) { /* Transmeta might have masked feature bits in word 0 */ u32 ecx = 0x80860004; u32 eax, edx; u32 level = 1; asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); asm("cpuid" : "+a" (level), "=d" (cpu.flags[0]) : : "ecx", "ebx"); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); err = check_flags(); } if (err_flags_ptr) *err_flags_ptr = err ? err_flags : NULL; if (cpu_level_ptr) *cpu_level_ptr = cpu.level; if (req_level_ptr) *req_level_ptr = req_level; return (cpu.level < req_level || err) ? -1 : 0; }
gpl-2.0
TheTypoMaster/ubuntu-utopic
drivers/hwmon/amc6821.c
163
28656
/* * amc6821.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> * * Based on max6650.c: * Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> /* Needed for KERN_INFO */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* * Addresses to scan. */ static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END}; /* * Insmod parameters */ static int pwminv; /*Inverted PWM output. */ module_param(pwminv, int, S_IRUGO); static int init = 1; /*Power-on initialization.*/ module_param(init, int, S_IRUGO); enum chips { amc6821 }; #define AMC6821_REG_DEV_ID 0x3D #define AMC6821_REG_COMP_ID 0x3E #define AMC6821_REG_CONF1 0x00 #define AMC6821_REG_CONF2 0x01 #define AMC6821_REG_CONF3 0x3F #define AMC6821_REG_CONF4 0x04 #define AMC6821_REG_STAT1 0x02 #define AMC6821_REG_STAT2 0x03 #define AMC6821_REG_TDATA_LOW 0x08 #define AMC6821_REG_TDATA_HI 0x09 #define AMC6821_REG_LTEMP_HI 0x0A #define AMC6821_REG_RTEMP_HI 0x0B #define AMC6821_REG_LTEMP_LIMIT_MIN 0x15 #define AMC6821_REG_LTEMP_LIMIT_MAX 0x14 #define AMC6821_REG_RTEMP_LIMIT_MIN 0x19 #define AMC6821_REG_RTEMP_LIMIT_MAX 0x18 #define AMC6821_REG_LTEMP_CRIT 0x1B #define AMC6821_REG_RTEMP_CRIT 0x1D #define AMC6821_REG_PSV_TEMP 0x1C #define AMC6821_REG_DCY 0x22 #define AMC6821_REG_LTEMP_FAN_CTRL 0x24 #define AMC6821_REG_RTEMP_FAN_CTRL 0x25 #define AMC6821_REG_DCY_LOW_TEMP 0x21 #define AMC6821_REG_TACH_LLIMITL 0x10 #define AMC6821_REG_TACH_LLIMITH 0x11 #define AMC6821_REG_TACH_HLIMITL 0x12 #define AMC6821_REG_TACH_HLIMITH 0x13 #define AMC6821_CONF1_START 0x01 #define AMC6821_CONF1_FAN_INT_EN 0x02 #define AMC6821_CONF1_FANIE 0x04 #define AMC6821_CONF1_PWMINV 0x08 #define AMC6821_CONF1_FAN_FAULT_EN 0x10 #define AMC6821_CONF1_FDRC0 0x20 #define AMC6821_CONF1_FDRC1 0x40 #define AMC6821_CONF1_THERMOVIE 0x80 #define AMC6821_CONF2_PWM_EN 0x01 #define AMC6821_CONF2_TACH_MODE 0x02 #define AMC6821_CONF2_TACH_EN 0x04 #define AMC6821_CONF2_RTFIE 0x08 #define AMC6821_CONF2_LTOIE 0x10 #define AMC6821_CONF2_RTOIE 0x20 #define AMC6821_CONF2_PSVIE 0x40 #define AMC6821_CONF2_RST 0x80 #define AMC6821_CONF3_THERM_FAN_EN 0x80 #define AMC6821_CONF3_REV_MASK 0x0F #define AMC6821_CONF4_OVREN 0x10 #define AMC6821_CONF4_TACH_FAST 0x20 #define AMC6821_CONF4_PSPR 0x40 #define AMC6821_CONF4_MODE 0x80 #define AMC6821_STAT1_RPM_ALARM 0x01 #define AMC6821_STAT1_FANS 0x02 #define AMC6821_STAT1_RTH 0x04 #define AMC6821_STAT1_RTL 0x08 #define AMC6821_STAT1_R_THERM 0x10 #define AMC6821_STAT1_RTF 0x20 #define AMC6821_STAT1_LTH 0x40 #define AMC6821_STAT1_LTL 0x80 #define AMC6821_STAT2_RTC 0x08 #define AMC6821_STAT2_LTC 0x10 #define AMC6821_STAT2_LPSV 0x20 #define AMC6821_STAT2_L_THERM 0x40 #define AMC6821_STAT2_THERM_IN 0x80 enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX, IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN, IDX_TEMP2_MAX, IDX_TEMP2_CRIT, TEMP_IDX_LEN, }; static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI, AMC6821_REG_LTEMP_LIMIT_MIN, AMC6821_REG_LTEMP_LIMIT_MAX, AMC6821_REG_LTEMP_CRIT, AMC6821_REG_RTEMP_HI, AMC6821_REG_RTEMP_LIMIT_MIN, AMC6821_REG_RTEMP_LIMIT_MAX, AMC6821_REG_RTEMP_CRIT, }; enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX, FAN1_IDX_LEN, }; static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW, AMC6821_REG_TACH_LLIMITL, AMC6821_REG_TACH_HLIMITL, }; static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI, AMC6821_REG_TACH_LLIMITH, AMC6821_REG_TACH_HLIMITH, }; static int amc6821_probe( struct i2c_client *client, const struct i2c_device_id *id); static int amc6821_detect( struct i2c_client *client, struct i2c_board_info *info); static int amc6821_init_client(struct i2c_client *client); static int amc6821_remove(struct i2c_client *client); static struct amc6821_data *amc6821_update_device(struct device *dev); /* * Driver data (common to all clients) */ static const struct i2c_device_id amc6821_id[] = { { "amc6821", amc6821 }, { } }; MODULE_DEVICE_TABLE(i2c, amc6821_id); static struct i2c_driver amc6821_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "amc6821", }, .probe = amc6821_probe, .remove = amc6821_remove, .id_table = amc6821_id, .detect = amc6821_detect, .address_list = normal_i2c, }; /* * Client data (each client gets its own) */ struct amc6821_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* register values */ int temp[TEMP_IDX_LEN]; u16 fan[FAN1_IDX_LEN]; u8 fan1_div; u8 pwm1; u8 temp1_auto_point_temp[3]; u8 temp2_auto_point_temp[3]; u8 pwm1_auto_point_pwm[3]; u8 pwm1_enable; u8 pwm1_auto_channels_temp; u8 stat1; u8 stat2; }; static ssize_t get_temp( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); int ix = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", data->temp[ix] * 1000); } static ssize_t set_temp( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); int ix = to_sensor_dev_attr(attr)->index; long val; int ret = kstrtol(buf, 10, &val); if (ret) return ret; val = clamp_val(val / 1000, -128, 127); mutex_lock(&data->update_lock); data->temp[ix] = val; if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) { dev_err(&client->dev, "Register write error, aborting.\n"); count = -EIO; } mutex_unlock(&data->update_lock); return count; } static ssize_t get_temp_alarm( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); int ix = to_sensor_dev_attr(devattr)->index; u8 flag; switch (ix) { case IDX_TEMP1_MIN: flag = data->stat1 & AMC6821_STAT1_LTL; break; case IDX_TEMP1_MAX: flag = data->stat1 & AMC6821_STAT1_LTH; break; case IDX_TEMP1_CRIT: flag = data->stat2 & AMC6821_STAT2_LTC; break; case IDX_TEMP2_MIN: flag = data->stat1 & AMC6821_STAT1_RTL; break; case IDX_TEMP2_MAX: flag = data->stat1 & AMC6821_STAT1_RTH; break; case IDX_TEMP2_CRIT: flag = data->stat2 & AMC6821_STAT2_RTC; break; default: dev_dbg(dev, "Unknown attr->index (%d).\n", ix); return -EINVAL; } if (flag) return sprintf(buf, "1"); else return sprintf(buf, "0"); } static ssize_t get_temp2_fault( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); if (data->stat1 & AMC6821_STAT1_RTF) return sprintf(buf, "1"); else return sprintf(buf, "0"); } static ssize_t get_pwm1( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); return sprintf(buf, "%d\n", data->pwm1); } static ssize_t set_pwm1( struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); long val; int ret = kstrtol(buf, 10, &val); if (ret) return ret; mutex_lock(&data->update_lock); data->pwm1 = clamp_val(val , 0, 255); i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1); mutex_unlock(&data->update_lock); return count; } static ssize_t get_pwm1_enable( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); return sprintf(buf, "%d\n", data->pwm1_enable); } static ssize_t set_pwm1_enable( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); long val; int config = kstrtol(buf, 10, &val); if (config) return config; mutex_lock(&data->update_lock); config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); count = config; goto unlock; } switch (val) { case 1: config &= ~AMC6821_CONF1_FDRC0; config &= ~AMC6821_CONF1_FDRC1; break; case 2: config &= ~AMC6821_CONF1_FDRC0; config |= AMC6821_CONF1_FDRC1; break; case 3: config |= AMC6821_CONF1_FDRC0; config |= AMC6821_CONF1_FDRC1; break; default: count = -EINVAL; goto unlock; } if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); count = -EIO; } unlock: mutex_unlock(&data->update_lock); return count; } static ssize_t get_pwm1_auto_channels_temp( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp); } static ssize_t get_temp_auto_point_temp( struct device *dev, struct device_attribute *devattr, char *buf) { int ix = to_sensor_dev_attr_2(devattr)->index; int nr = to_sensor_dev_attr_2(devattr)->nr; struct amc6821_data *data = amc6821_update_device(dev); switch (nr) { case 1: return sprintf(buf, "%d\n", data->temp1_auto_point_temp[ix] * 1000); case 2: return sprintf(buf, "%d\n", data->temp2_auto_point_temp[ix] * 1000); default: dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); return -EINVAL; } } static ssize_t get_pwm1_auto_point_pwm( struct device *dev, struct device_attribute *devattr, char *buf) { int ix = to_sensor_dev_attr(devattr)->index; struct amc6821_data *data = amc6821_update_device(dev); return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]); } static inline ssize_t set_slope_register(struct i2c_client *client, u8 reg, u8 dpwm, u8 *ptemp) { int dt; u8 tmp; dt = ptemp[2]-ptemp[1]; for (tmp = 4; tmp > 0; tmp--) { if (dt * (0x20 >> tmp) >= dpwm) break; } tmp |= (ptemp[1] & 0x7C) << 1; if (i2c_smbus_write_byte_data(client, reg, tmp)) { dev_err(&client->dev, "Register write error, aborting.\n"); return -EIO; } return 0; } static ssize_t set_temp_auto_point_temp( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = amc6821_update_device(dev); int ix = to_sensor_dev_attr_2(attr)->index; int nr = to_sensor_dev_attr_2(attr)->nr; u8 *ptemp; u8 reg; int dpwm; long val; int ret = kstrtol(buf, 10, &val); if (ret) return ret; switch (nr) { case 1: ptemp = data->temp1_auto_point_temp; reg = AMC6821_REG_LTEMP_FAN_CTRL; break; case 2: ptemp = data->temp2_auto_point_temp; reg = AMC6821_REG_RTEMP_FAN_CTRL; break; default: dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); return -EINVAL; } mutex_lock(&data->update_lock); data->valid = 0; switch (ix) { case 0: ptemp[0] = clamp_val(val / 1000, 0, data->temp1_auto_point_temp[1]); ptemp[0] = clamp_val(ptemp[0], 0, data->temp2_auto_point_temp[1]); ptemp[0] = clamp_val(ptemp[0], 0, 63); if (i2c_smbus_write_byte_data( client, AMC6821_REG_PSV_TEMP, ptemp[0])) { dev_err(&client->dev, "Register write error, aborting.\n"); count = -EIO; } goto EXIT; case 1: ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124); ptemp[1] &= 0x7C; ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255); break; case 2: ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255); break; default: dev_dbg(dev, "Unknown attr->index (%d).\n", ix); count = -EINVAL; goto EXIT; } dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]; if (set_slope_register(client, reg, dpwm, ptemp)) count = -EIO; EXIT: mutex_unlock(&data->update_lock); return count; } static ssize_t set_pwm1_auto_point_pwm( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); int dpwm; long val; int ret = kstrtol(buf, 10, &val); if (ret) return ret; mutex_lock(&data->update_lock); data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254); if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP, data->pwm1_auto_point_pwm[1])) { dev_err(&client->dev, "Register write error, aborting.\n"); count = -EIO; goto EXIT; } dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]; if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm, data->temp1_auto_point_temp)) { count = -EIO; goto EXIT; } if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm, data->temp2_auto_point_temp)) { count = -EIO; goto EXIT; } EXIT: data->valid = 0; mutex_unlock(&data->update_lock); return count; } static ssize_t get_fan( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); int ix = to_sensor_dev_attr(devattr)->index; if (0 == data->fan[ix]) return sprintf(buf, "0"); return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix])); } static ssize_t get_fan1_fault( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); if (data->stat1 & AMC6821_STAT1_FANS) return sprintf(buf, "1"); else return sprintf(buf, "0"); } static ssize_t set_fan( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); long val; int ix = to_sensor_dev_attr(attr)->index; int ret = kstrtol(buf, 10, &val); if (ret) return ret; val = 1 > val ? 0xFFFF : 6000000/val; mutex_lock(&data->update_lock); data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF); if (i2c_smbus_write_byte_data(client, fan_reg_low[ix], data->fan[ix] & 0xFF)) { dev_err(&client->dev, "Register write error, aborting.\n"); count = -EIO; goto EXIT; } if (i2c_smbus_write_byte_data(client, fan_reg_hi[ix], data->fan[ix] >> 8)) { dev_err(&client->dev, "Register write error, aborting.\n"); count = -EIO; } EXIT: mutex_unlock(&data->update_lock); return count; } static ssize_t get_fan1_div( struct device *dev, struct device_attribute *devattr, char *buf) { struct amc6821_data *data = amc6821_update_device(dev); return sprintf(buf, "%d\n", data->fan1_div); } static ssize_t set_fan1_div( struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); long val; int config = kstrtol(buf, 10, &val); if (config) return config; mutex_lock(&data->update_lock); config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); count = config; goto EXIT; } switch (val) { case 2: config &= ~AMC6821_CONF4_PSPR; data->fan1_div = 2; break; case 4: config |= AMC6821_CONF4_PSPR; data->fan1_div = 4; break; default: count = -EINVAL; goto EXIT; } if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); count = -EIO; } EXIT: mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_temp, NULL, IDX_TEMP1_INPUT); static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP1_MIN); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP1_MAX); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP1_CRIT); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_MIN); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_MAX); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_CRIT); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp, NULL, IDX_TEMP2_INPUT); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_MIN); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_MAX); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_CRIT); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, get_temp2_fault, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP2_MIN); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP2_MAX); static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP2_CRIT); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, IDX_FAN1_INPUT); static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR, get_fan, set_fan, IDX_FAN1_MIN); static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO | S_IWUSR, get_fan, set_fan, IDX_FAN1_MAX); static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan1_fault, NULL, 0); static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, get_fan1_div, set_fan1_div, 0); static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm1, set_pwm1, 0); static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_pwm1_enable, set_pwm1_enable, 0); static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO, get_pwm1_auto_point_pwm, NULL, 0); static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO, get_pwm1_auto_point_pwm, set_pwm1_auto_point_pwm, 1); static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO, get_pwm1_auto_point_pwm, NULL, 2); static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO, get_pwm1_auto_channels_temp, NULL, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO, get_temp_auto_point_temp, NULL, 1, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IWUSR | S_IRUGO, get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 1); static SENSOR_DEVICE_ATTR_2(temp1_auto_point3_temp, S_IWUSR | S_IRUGO, get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 2); static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IWUSR | S_IRUGO, get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 0); static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IWUSR | S_IRUGO, get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_point3_temp, S_IWUSR | S_IRUGO, get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 2); static struct attribute *amc6821_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_max.dev_attr.attr, &sensor_dev_attr_fan1_fault.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp1_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr, NULL }; static struct attribute_group amc6821_attr_grp = { .attrs = amc6821_attrs, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int amc6821_detect( struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int address = client->addr; int dev_id, comp_id; dev_dbg(&adapter->dev, "amc6821_detect called.\n"); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_dbg(&adapter->dev, "amc6821: I2C bus doesn't support byte mode, " "skipping.\n"); return -ENODEV; } dev_id = i2c_smbus_read_byte_data(client, AMC6821_REG_DEV_ID); comp_id = i2c_smbus_read_byte_data(client, AMC6821_REG_COMP_ID); if (dev_id != 0x21 || comp_id != 0x49) { dev_dbg(&adapter->dev, "amc6821: detection failed at 0x%02x.\n", address); return -ENODEV; } /* * Bit 7 of the address register is ignored, so we can check the * ID registers again */ dev_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_DEV_ID); comp_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_COMP_ID); if (dev_id != 0x21 || comp_id != 0x49) { dev_dbg(&adapter->dev, "amc6821: detection failed at 0x%02x.\n", address); return -ENODEV; } dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address); strlcpy(info->type, "amc6821", I2C_NAME_SIZE); return 0; } static int amc6821_probe( struct i2c_client *client, const struct i2c_device_id *id) { struct amc6821_data *data; int err; data = devm_kzalloc(&client->dev, sizeof(struct amc6821_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* * Initialize the amc6821 chip */ err = amc6821_init_client(client); if (err) return err; err = sysfs_create_group(&client->dev.kobj, &amc6821_attr_grp); if (err) return err; data->hwmon_dev = hwmon_device_register(&client->dev); if (!IS_ERR(data->hwmon_dev)) return 0; err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp); return err; } static int amc6821_remove(struct i2c_client *client) { struct amc6821_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp); return 0; } static int amc6821_init_client(struct i2c_client *client) { int config; int err = -EIO; if (init) { config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); return err; } config |= AMC6821_CONF4_MODE; if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); return err; } config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); return err; } dev_info(&client->dev, "Revision %d\n", config & 0x0f); config &= ~AMC6821_CONF3_THERM_FAN_EN; if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); return err; } config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); return err; } config &= ~AMC6821_CONF2_RTFIE; config &= ~AMC6821_CONF2_LTOIE; config &= ~AMC6821_CONF2_RTOIE; if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF2, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); return err; } config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); return err; } config &= ~AMC6821_CONF1_THERMOVIE; config &= ~AMC6821_CONF1_FANIE; config |= AMC6821_CONF1_START; if (pwminv) config |= AMC6821_CONF1_PWMINV; else config &= ~AMC6821_CONF1_PWMINV; if (i2c_smbus_write_byte_data( client, AMC6821_REG_CONF1, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); return err; } } return 0; } static struct amc6821_data *amc6821_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct amc6821_data *data = i2c_get_clientdata(client); int timeout = HZ; u8 reg; int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + timeout) || !data->valid) { for (i = 0; i < TEMP_IDX_LEN; i++) data->temp[i] = i2c_smbus_read_byte_data(client, temp_reg[i]); data->stat1 = i2c_smbus_read_byte_data(client, AMC6821_REG_STAT1); data->stat2 = i2c_smbus_read_byte_data(client, AMC6821_REG_STAT2); data->pwm1 = i2c_smbus_read_byte_data(client, AMC6821_REG_DCY); for (i = 0; i < FAN1_IDX_LEN; i++) { data->fan[i] = i2c_smbus_read_byte_data( client, fan_reg_low[i]); data->fan[i] += i2c_smbus_read_byte_data( client, fan_reg_hi[i]) << 8; } data->fan1_div = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2; data->pwm1_auto_point_pwm[0] = 0; data->pwm1_auto_point_pwm[2] = 255; data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client, AMC6821_REG_DCY_LOW_TEMP); data->temp1_auto_point_temp[0] = i2c_smbus_read_byte_data(client, AMC6821_REG_PSV_TEMP); data->temp2_auto_point_temp[0] = data->temp1_auto_point_temp[0]; reg = i2c_smbus_read_byte_data(client, AMC6821_REG_LTEMP_FAN_CTRL); data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1; reg &= 0x07; reg = 0x20 >> reg; if (reg > 0) data->temp1_auto_point_temp[2] = data->temp1_auto_point_temp[1] + (data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]) / reg; else data->temp1_auto_point_temp[2] = 255; reg = i2c_smbus_read_byte_data(client, AMC6821_REG_RTEMP_FAN_CTRL); data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1; reg &= 0x07; reg = 0x20 >> reg; if (reg > 0) data->temp2_auto_point_temp[2] = data->temp2_auto_point_temp[1] + (data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]) / reg; else data->temp2_auto_point_temp[2] = 255; reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); reg = (reg >> 5) & 0x3; switch (reg) { case 0: /*open loop: software sets pwm1*/ data->pwm1_auto_channels_temp = 0; data->pwm1_enable = 1; break; case 2: /*closed loop: remote T (temp2)*/ data->pwm1_auto_channels_temp = 2; data->pwm1_enable = 2; break; case 3: /*closed loop: local and remote T (temp2)*/ data->pwm1_auto_channels_temp = 3; data->pwm1_enable = 3; break; case 1: /* * semi-open loop: software sets rpm, chip controls * pwm1, currently not implemented */ data->pwm1_auto_channels_temp = 0; data->pwm1_enable = 0; break; } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(amc6821_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("T. Mertelj <tomaz.mertelj@guest.arnes.si>"); MODULE_DESCRIPTION("Texas Instruments amc6821 hwmon driver");
gpl-2.0
netico-solutions/linux-am335x
drivers/mmc/host/au1xmmc.c
163
29005
/* * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver * * Copyright (c) 2005, Advanced Micro Devices, Inc. * * Developed with help from the 2.4.30 MMC AU1XXX controller including * the following copyright notices: * Copyright (c) 2003-2004 Embedded Edge, LLC. * Portions Copyright (C) 2002 Embedix, Inc * Copyright 2002 Hewlett-Packard Company * 2.6 version of this driver inspired by: * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman, * All Rights Reserved. * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King, * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Why don't we use the SD controllers' carddetect feature? * * From the AU1100 MMC application guide: * If the Au1100-based design is intended to support both MultiMediaCards * and 1- or 4-data bit SecureDigital cards, then the solution is to * connect a weak (560KOhm) pull-up resistor to connector pin 1. * In doing so, a MMC card never enters SPI-mode communications, * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective * (the low to high transition will not occur). */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/leds.h> #include <linux/mmc/host.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> #define DRIVER_NAME "au1xxx-mmc" /* Set this to enable special debugging macros */ /* #define DEBUG */ #ifdef DEBUG #define DBG(fmt, idx, args...) \ pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args) #else #define DBG(fmt, idx, args...) do {} while (0) #endif /* Hardware definitions */ #define AU1XMMC_DESCRIPTOR_COUNT 1 /* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */ #define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff #define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) /* This gives us a hard value for the stop command that we can write directly * to the command register. */ #define STOP_CMD \ (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO) /* This is the set of interrupts that we configure by default. */ #define AU1XMMC_INTERRUPTS \ (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \ SD_CONFIG_CR | SD_CONFIG_I) /* The poll event (looking for insert/remove events runs twice a second. */ #define AU1XMMC_DETECT_TIMEOUT (HZ/2) struct au1xmmc_host { struct mmc_host *mmc; struct mmc_request *mrq; u32 flags; u32 iobase; u32 clock; u32 bus_width; u32 power_mode; int status; struct { int len; int dir; } dma; struct { int index; int offset; int len; } pio; u32 tx_chan; u32 rx_chan; int irq; struct tasklet_struct finish_task; struct tasklet_struct data_task; struct au1xmmc_platform_data *platdata; struct platform_device *pdev; struct resource *ioarea; }; /* Status flags used by the host structure */ #define HOST_F_XMIT 0x0001 #define HOST_F_RECV 0x0002 #define HOST_F_DMA 0x0010 #define HOST_F_DBDMA 0x0020 #define HOST_F_ACTIVE 0x0100 #define HOST_F_STOP 0x1000 #define HOST_S_IDLE 0x0001 #define HOST_S_CMD 0x0002 #define HOST_S_DATA 0x0003 #define HOST_S_STOP 0x0004 /* Easy access macros */ #define HOST_STATUS(h) ((h)->iobase + SD_STATUS) #define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) #define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) #define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) #define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) #define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) #define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) #define HOST_CMD(h) ((h)->iobase + SD_CMD) #define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) #define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) #define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) #define DMA_CHANNEL(h) \ (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) static inline int has_dbdma(void) { switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1200: case ALCHEMY_CPU_AU1300: return 1; default: return 0; } } static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) { u32 val = au_readl(HOST_CONFIG(host)); val |= mask; au_writel(val, HOST_CONFIG(host)); au_sync(); } static inline void FLUSH_FIFO(struct au1xmmc_host *host) { u32 val = au_readl(HOST_CONFIG2(host)); au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host)); au_sync_delay(1); /* SEND_STOP will turn off clock control - this re-enables it */ val &= ~SD_CONFIG2_DF; au_writel(val, HOST_CONFIG2(host)); au_sync(); } static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask) { u32 val = au_readl(HOST_CONFIG(host)); val &= ~mask; au_writel(val, HOST_CONFIG(host)); au_sync(); } static inline void SEND_STOP(struct au1xmmc_host *host) { u32 config2; WARN_ON(host->status != HOST_S_DATA); host->status = HOST_S_STOP; config2 = au_readl(HOST_CONFIG2(host)); au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); au_sync(); /* Send the stop command */ au_writel(STOP_CMD, HOST_CMD(host)); } static void au1xmmc_set_power(struct au1xmmc_host *host, int state) { if (host->platdata && host->platdata->set_power) host->platdata->set_power(host->mmc, state); } static int au1xmmc_card_inserted(struct mmc_host *mmc) { struct au1xmmc_host *host = mmc_priv(mmc); if (host->platdata && host->platdata->card_inserted) return !!host->platdata->card_inserted(host->mmc); return -ENOSYS; } static int au1xmmc_card_readonly(struct mmc_host *mmc) { struct au1xmmc_host *host = mmc_priv(mmc); if (host->platdata && host->platdata->card_readonly) return !!host->platdata->card_readonly(mmc); return -ENOSYS; } static void au1xmmc_finish_request(struct au1xmmc_host *host) { struct mmc_request *mrq = host->mrq; host->mrq = NULL; host->flags &= HOST_F_ACTIVE | HOST_F_DMA; host->dma.len = 0; host->dma.dir = 0; host->pio.index = 0; host->pio.offset = 0; host->pio.len = 0; host->status = HOST_S_IDLE; mmc_request_done(host->mmc, mrq); } static void au1xmmc_tasklet_finish(unsigned long param) { struct au1xmmc_host *host = (struct au1xmmc_host *) param; au1xmmc_finish_request(host); } static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, struct mmc_command *cmd, struct mmc_data *data) { u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: break; case MMC_RSP_R1: mmccmd |= SD_CMD_RT_1; break; case MMC_RSP_R1B: mmccmd |= SD_CMD_RT_1B; break; case MMC_RSP_R2: mmccmd |= SD_CMD_RT_2; break; case MMC_RSP_R3: mmccmd |= SD_CMD_RT_3; break; default: pr_info("au1xmmc: unhandled response type %02x\n", mmc_resp_type(cmd)); return -EINVAL; } if (data) { if (data->flags & MMC_DATA_READ) { if (data->blocks > 1) mmccmd |= SD_CMD_CT_4; else mmccmd |= SD_CMD_CT_2; } else if (data->flags & MMC_DATA_WRITE) { if (data->blocks > 1) mmccmd |= SD_CMD_CT_3; else mmccmd |= SD_CMD_CT_1; } } au_writel(cmd->arg, HOST_CMDARG(host)); au_sync(); if (wait) IRQ_OFF(host, SD_CONFIG_CR); au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); au_sync(); /* Wait for the command to go on the line */ while (au_readl(HOST_CMD(host)) & SD_CMD_GO) /* nop */; /* Wait for the command to come back */ if (wait) { u32 status = au_readl(HOST_STATUS(host)); while (!(status & SD_STATUS_CR)) status = au_readl(HOST_STATUS(host)); /* Clear the CR status */ au_writel(SD_STATUS_CR, HOST_STATUS(host)); IRQ_ON(host, SD_CONFIG_CR); } return 0; } static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) { struct mmc_request *mrq = host->mrq; struct mmc_data *data; u32 crc; WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP)); if (host->mrq == NULL) return; data = mrq->cmd->data; if (status == 0) status = au_readl(HOST_STATUS(host)); /* The transaction is really over when the SD_STATUS_DB bit is clear */ while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) status = au_readl(HOST_STATUS(host)); data->error = 0; dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); /* Process any errors */ crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); if (host->flags & HOST_F_XMIT) crc |= ((status & 0x07) == 0x02) ? 0 : 1; if (crc) data->error = -EILSEQ; /* Clear the CRC bits */ au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host)); data->bytes_xfered = 0; if (!data->error) { if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { u32 chan = DMA_CHANNEL(host); chan_tab_t *c = *((chan_tab_t **)chan); au1x_dma_chan_t *cp = c->chan_ptr; data->bytes_xfered = cp->ddma_bytecnt; } else data->bytes_xfered = (data->blocks * data->blksz) - host->pio.len; } au1xmmc_finish_request(host); } static void au1xmmc_tasklet_data(unsigned long param) { struct au1xmmc_host *host = (struct au1xmmc_host *)param; u32 status = au_readl(HOST_STATUS(host)); au1xmmc_data_complete(host, status); } #define AU1XMMC_MAX_TRANSFER 8 static void au1xmmc_send_pio(struct au1xmmc_host *host) { struct mmc_data *data; int sg_len, max, count; unsigned char *sg_ptr, val; u32 status; struct scatterlist *sg; data = host->mrq->data; if (!(host->flags & HOST_F_XMIT)) return; /* This is the pointer to the data buffer */ sg = &data->sg[host->pio.index]; sg_ptr = sg_virt(sg) + host->pio.offset; /* This is the space left inside the buffer */ sg_len = data->sg[host->pio.index].length - host->pio.offset; /* Check if we need less than the size of the sg_buffer */ max = (sg_len > host->pio.len) ? host->pio.len : sg_len; if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_TH)) break; val = *sg_ptr++; au_writel((unsigned long)val, HOST_TXPORT(host)); au_sync(); } host->pio.len -= count; host->pio.offset += count; if (count == sg_len) { host->pio.index++; host->pio.offset = 0; } if (host->pio.len == 0) { IRQ_OFF(host, SD_CONFIG_TH); if (host->flags & HOST_F_STOP) SEND_STOP(host); tasklet_schedule(&host->data_task); } } static void au1xmmc_receive_pio(struct au1xmmc_host *host) { struct mmc_data *data; int max, count, sg_len = 0; unsigned char *sg_ptr = NULL; u32 status, val; struct scatterlist *sg; data = host->mrq->data; if (!(host->flags & HOST_F_RECV)) return; max = host->pio.len; if (host->pio.index < host->dma.len) { sg = &data->sg[host->pio.index]; sg_ptr = sg_virt(sg) + host->pio.offset; /* This is the space left inside the buffer */ sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; /* Check if we need less than the size of the sg_buffer */ if (sg_len < max) max = sg_len; } if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_NE)) break; if (status & SD_STATUS_RC) { DBG("RX CRC Error [%d + %d].\n", host->pdev->id, host->pio.len, count); break; } if (status & SD_STATUS_RO) { DBG("RX Overrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } else if (status & SD_STATUS_RU) { DBG("RX Underrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } val = au_readl(HOST_RXPORT(host)); if (sg_ptr) *sg_ptr++ = (unsigned char)(val & 0xFF); } host->pio.len -= count; host->pio.offset += count; if (sg_len && count == sg_len) { host->pio.index++; host->pio.offset = 0; } if (host->pio.len == 0) { /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ IRQ_OFF(host, SD_CONFIG_NE); if (host->flags & HOST_F_STOP) SEND_STOP(host); tasklet_schedule(&host->data_task); } } /* This is called when a command has been completed - grab the response * and check for errors. Then start the data transfer if it is indicated. */ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) { struct mmc_request *mrq = host->mrq; struct mmc_command *cmd; u32 r[4]; int i, trans; if (!host->mrq) return; cmd = mrq->cmd; cmd->error = 0; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { r[0] = au_readl(host->iobase + SD_RESP3); r[1] = au_readl(host->iobase + SD_RESP2); r[2] = au_readl(host->iobase + SD_RESP1); r[3] = au_readl(host->iobase + SD_RESP0); /* The CRC is omitted from the response, so really * we only got 120 bytes, but the engine expects * 128 bits, so we have to shift things up. */ for (i = 0; i < 4; i++) { cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; if (i != 3) cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; } } else { /* Techincally, we should be getting all 48 bits of * the response (SD_RESP1 + SD_RESP2), but because * our response omits the CRC, our data ends up * being shifted 8 bits to the right. In this case, * that means that the OSR data starts at bit 31, * so we can just read RESP0 and return that. */ cmd->resp[0] = au_readl(host->iobase + SD_RESP0); } } /* Figure out errors */ if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) cmd->error = -EILSEQ; trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); if (!trans || cmd->error) { IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); tasklet_schedule(&host->finish_task); return; } host->status = HOST_S_DATA; if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) { u32 channel = DMA_CHANNEL(host); /* Start the DBDMA as soon as the buffer gets something in it */ if (host->flags & HOST_F_RECV) { u32 mask = SD_STATUS_DB | SD_STATUS_NE; while((status & mask) != mask) status = au_readl(HOST_STATUS(host)); } au1xxx_dbdma_start(channel); } } static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) { unsigned int pbus = get_au1x00_speed(); unsigned int divisor; u32 config; /* From databook: * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 */ pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); pbus /= 2; divisor = ((pbus / rate) / 2) - 1; config = au_readl(HOST_CONFIG(host)); config &= ~(SD_CONFIG_DIV); config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE; au_writel(config, HOST_CONFIG(host)); au_sync(); } static int au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) { int datalen = data->blocks * data->blksz; if (data->flags & MMC_DATA_READ) host->flags |= HOST_F_RECV; else host->flags |= HOST_F_XMIT; if (host->mrq->stop) host->flags |= HOST_F_STOP; host->dma.dir = DMA_BIDIRECTIONAL; host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); if (host->dma.len == 0) return -ETIMEDOUT; au_writel(data->blksz - 1, HOST_BLKSIZE(host)); if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { int i; u32 channel = DMA_CHANNEL(host); au1xxx_dbdma_stop(channel); for (i = 0; i < host->dma.len; i++) { u32 ret = 0, flags = DDMA_FLAGS_NOIE; struct scatterlist *sg = &data->sg[i]; int sg_len = sg->length; int len = (datalen > sg_len) ? sg_len : datalen; if (i == host->dma.len - 1) flags = DDMA_FLAGS_IE; if (host->flags & HOST_F_XMIT) { ret = au1xxx_dbdma_put_source(channel, sg_phys(sg), len, flags); } else { ret = au1xxx_dbdma_put_dest(channel, sg_phys(sg), len, flags); } if (!ret) goto dataerr; datalen -= len; } } else { host->pio.index = 0; host->pio.offset = 0; host->pio.len = datalen; if (host->flags & HOST_F_XMIT) IRQ_ON(host, SD_CONFIG_TH); else IRQ_ON(host, SD_CONFIG_NE); /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */ } return 0; dataerr: dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); return -ETIMEDOUT; } /* This actually starts a command or data transaction */ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) { struct au1xmmc_host *host = mmc_priv(mmc); int ret = 0; WARN_ON(irqs_disabled()); WARN_ON(host->status != HOST_S_IDLE); host->mrq = mrq; host->status = HOST_S_CMD; /* fail request immediately if no card is present */ if (0 == au1xmmc_card_inserted(mmc)) { mrq->cmd->error = -ENOMEDIUM; au1xmmc_finish_request(host); return; } if (mrq->data) { FLUSH_FIFO(host); ret = au1xmmc_prepare_data(host, mrq->data); } if (!ret) ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data); if (ret) { mrq->cmd->error = ret; au1xmmc_finish_request(host); } } static void au1xmmc_reset_controller(struct au1xmmc_host *host) { /* Apply the clock */ au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); au_sync_delay(1); au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host)); au_sync_delay(5); au_writel(~0, HOST_STATUS(host)); au_sync(); au_writel(0, HOST_BLKSIZE(host)); au_writel(0x001fffff, HOST_TIMEOUT(host)); au_sync(); au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); au_sync(); au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host)); au_sync_delay(1); au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); au_sync(); /* Configure interrupts */ au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host)); au_sync(); } static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct au1xmmc_host *host = mmc_priv(mmc); u32 config2; if (ios->power_mode == MMC_POWER_OFF) au1xmmc_set_power(host, 0); else if (ios->power_mode == MMC_POWER_ON) { au1xmmc_set_power(host, 1); } if (ios->clock && ios->clock != host->clock) { au1xmmc_set_clock(host, ios->clock); host->clock = ios->clock; } config2 = au_readl(HOST_CONFIG2(host)); switch (ios->bus_width) { case MMC_BUS_WIDTH_8: config2 |= SD_CONFIG2_BB; break; case MMC_BUS_WIDTH_4: config2 &= ~SD_CONFIG2_BB; config2 |= SD_CONFIG2_WB; break; case MMC_BUS_WIDTH_1: config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB); break; } au_writel(config2, HOST_CONFIG2(host)); au_sync(); } #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) #define STATUS_DATA_IN (SD_STATUS_NE) #define STATUS_DATA_OUT (SD_STATUS_TH) static irqreturn_t au1xmmc_irq(int irq, void *dev_id) { struct au1xmmc_host *host = dev_id; u32 status; status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_I)) return IRQ_NONE; /* not ours */ if (status & SD_STATUS_SI) /* SDIO */ mmc_signal_sdio_irq(host->mmc); if (host->mrq && (status & STATUS_TIMEOUT)) { if (status & SD_STATUS_RAT) host->mrq->cmd->error = -ETIMEDOUT; else if (status & SD_STATUS_DT) host->mrq->data->error = -ETIMEDOUT; /* In PIO mode, interrupts might still be enabled */ IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ tasklet_schedule(&host->finish_task); } #if 0 else if (status & SD_STATUS_DD) { /* Sometimes we get a DD before a NE in PIO mode */ if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE)) au1xmmc_receive_pio(host); else { au1xmmc_data_complete(host, status); /* tasklet_schedule(&host->data_task); */ } } #endif else if (status & SD_STATUS_CR) { if (host->status == HOST_S_CMD) au1xmmc_cmd_complete(host, status); } else if (!(host->flags & HOST_F_DMA)) { if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT)) au1xmmc_send_pio(host); else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN)) au1xmmc_receive_pio(host); } else if (status & 0x203F3C70) { DBG("Unhandled status %8.8x\n", host->pdev->id, status); } au_writel(status, HOST_STATUS(host)); au_sync(); return IRQ_HANDLED; } /* 8bit memory DMA device */ static dbdev_tab_t au1xmmc_mem_dbdev = { .dev_id = DSCR_CMD0_ALWAYS, .dev_flags = DEV_FLAGS_ANYUSE, .dev_tsize = 0, .dev_devwidth = 8, .dev_physaddr = 0x00000000, .dev_intlevel = 0, .dev_intpolarity = 0, }; static int memid; static void au1xmmc_dbdma_callback(int irq, void *dev_id) { struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id; /* Avoid spurious interrupts */ if (!host->mrq) return; if (host->flags & HOST_F_STOP) SEND_STOP(host); tasklet_schedule(&host->data_task); } static int au1xmmc_dbdma_init(struct au1xmmc_host *host) { struct resource *res; int txid, rxid; res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0); if (!res) return -ENODEV; txid = res->start; res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1); if (!res) return -ENODEV; rxid = res->start; if (!memid) return -ENODEV; host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid, au1xmmc_dbdma_callback, (void *)host); if (!host->tx_chan) { dev_err(&host->pdev->dev, "cannot allocate TX DMA\n"); return -ENODEV; } host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid, au1xmmc_dbdma_callback, (void *)host); if (!host->rx_chan) { dev_err(&host->pdev->dev, "cannot allocate RX DMA\n"); au1xxx_dbdma_chan_free(host->tx_chan); return -ENODEV; } au1xxx_dbdma_set_devwidth(host->tx_chan, 8); au1xxx_dbdma_set_devwidth(host->rx_chan, 8); au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT); au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); /* DBDMA is good to go */ host->flags |= HOST_F_DMA | HOST_F_DBDMA; return 0; } static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) { if (host->flags & HOST_F_DMA) { host->flags &= ~HOST_F_DMA; au1xxx_dbdma_chan_free(host->tx_chan); au1xxx_dbdma_chan_free(host->rx_chan); } } static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) { struct au1xmmc_host *host = mmc_priv(mmc); if (en) IRQ_ON(host, SD_CONFIG_SI); else IRQ_OFF(host, SD_CONFIG_SI); } static const struct mmc_host_ops au1xmmc_ops = { .request = au1xmmc_request, .set_ios = au1xmmc_set_ios, .get_ro = au1xmmc_card_readonly, .get_cd = au1xmmc_card_inserted, .enable_sdio_irq = au1xmmc_enable_sdio_irq, }; static int au1xmmc_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct au1xmmc_host *host; struct resource *r; int ret, iflag; mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); if (!mmc) { dev_err(&pdev->dev, "no memory for mmc_host\n"); ret = -ENOMEM; goto out0; } host = mmc_priv(mmc); host->mmc = mmc; host->platdata = pdev->dev.platform_data; host->pdev = pdev; ret = -ENODEV; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no mmio defined\n"); goto out1; } host->ioarea = request_mem_region(r->start, resource_size(r), pdev->name); if (!host->ioarea) { dev_err(&pdev->dev, "mmio already in use\n"); goto out1; } host->iobase = (unsigned long)ioremap(r->start, 0x3c); if (!host->iobase) { dev_err(&pdev->dev, "cannot remap mmio\n"); goto out2; } r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!r) { dev_err(&pdev->dev, "no IRQ defined\n"); goto out3; } host->irq = r->start; mmc->ops = &au1xmmc_ops; mmc->f_min = 450000; mmc->f_max = 24000000; mmc->max_blk_size = 2048; mmc->max_blk_count = 512; mmc->ocr_avail = AU1XMMC_OCR; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */ switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1100: mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE; break; case ALCHEMY_CPU_AU1200: mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; break; case ALCHEMY_CPU_AU1300: iflag = 0; /* nothing is shared */ mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; mmc->f_max = 52000000; if (host->ioarea->start == AU1100_SD0_PHYS_ADDR) mmc->caps |= MMC_CAP_8_BIT_DATA; break; } ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host); if (ret) { dev_err(&pdev->dev, "cannot grab IRQ\n"); goto out3; } host->status = HOST_S_IDLE; /* board-specific carddetect setup, if any */ if (host->platdata && host->platdata->cd_setup) { ret = host->platdata->cd_setup(mmc, 1); if (ret) { dev_warn(&pdev->dev, "board CD setup failed\n"); mmc->caps |= MMC_CAP_NEEDS_POLL; } } else mmc->caps |= MMC_CAP_NEEDS_POLL; /* platform may not be able to use all advertised caps */ if (host->platdata) mmc->caps &= ~(host->platdata->mask_host_caps); tasklet_init(&host->data_task, au1xmmc_tasklet_data, (unsigned long)host); tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, (unsigned long)host); if (has_dbdma()) { ret = au1xmmc_dbdma_init(host); if (ret) pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); } #ifdef CONFIG_LEDS_CLASS if (host->platdata && host->platdata->led) { struct led_classdev *led = host->platdata->led; led->name = mmc_hostname(mmc); led->brightness = LED_OFF; led->default_trigger = mmc_hostname(mmc); ret = led_classdev_register(mmc_dev(mmc), led); if (ret) goto out5; } #endif au1xmmc_reset_controller(host); ret = mmc_add_host(mmc); if (ret) { dev_err(&pdev->dev, "cannot add mmc host\n"); goto out6; } platform_set_drvdata(pdev, host); pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X" " (mode=%s)\n", pdev->id, host->iobase, host->flags & HOST_F_DMA ? "dma" : "pio"); return 0; /* all ok */ out6: #ifdef CONFIG_LEDS_CLASS if (host->platdata && host->platdata->led) led_classdev_unregister(host->platdata->led); out5: #endif au_writel(0, HOST_ENABLE(host)); au_writel(0, HOST_CONFIG(host)); au_writel(0, HOST_CONFIG2(host)); au_sync(); if (host->flags & HOST_F_DBDMA) au1xmmc_dbdma_shutdown(host); tasklet_kill(&host->data_task); tasklet_kill(&host->finish_task); if (host->platdata && host->platdata->cd_setup && !(mmc->caps & MMC_CAP_NEEDS_POLL)) host->platdata->cd_setup(mmc, 0); free_irq(host->irq, host); out3: iounmap((void *)host->iobase); out2: release_resource(host->ioarea); kfree(host->ioarea); out1: mmc_free_host(mmc); out0: return ret; } static int au1xmmc_remove(struct platform_device *pdev) { struct au1xmmc_host *host = platform_get_drvdata(pdev); if (host) { mmc_remove_host(host->mmc); #ifdef CONFIG_LEDS_CLASS if (host->platdata && host->platdata->led) led_classdev_unregister(host->platdata->led); #endif if (host->platdata && host->platdata->cd_setup && !(host->mmc->caps & MMC_CAP_NEEDS_POLL)) host->platdata->cd_setup(host->mmc, 0); au_writel(0, HOST_ENABLE(host)); au_writel(0, HOST_CONFIG(host)); au_writel(0, HOST_CONFIG2(host)); au_sync(); tasklet_kill(&host->data_task); tasklet_kill(&host->finish_task); if (host->flags & HOST_F_DBDMA) au1xmmc_dbdma_shutdown(host); au1xmmc_set_power(host, 0); free_irq(host->irq, host); iounmap((void *)host->iobase); release_resource(host->ioarea); kfree(host->ioarea); mmc_free_host(host->mmc); } return 0; } #ifdef CONFIG_PM static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) { struct au1xmmc_host *host = platform_get_drvdata(pdev); int ret; ret = mmc_suspend_host(host->mmc); if (ret) return ret; au_writel(0, HOST_CONFIG2(host)); au_writel(0, HOST_CONFIG(host)); au_writel(0xffffffff, HOST_STATUS(host)); au_writel(0, HOST_ENABLE(host)); au_sync(); return 0; } static int au1xmmc_resume(struct platform_device *pdev) { struct au1xmmc_host *host = platform_get_drvdata(pdev); au1xmmc_reset_controller(host); return mmc_resume_host(host->mmc); } #else #define au1xmmc_suspend NULL #define au1xmmc_resume NULL #endif static struct platform_driver au1xmmc_driver = { .probe = au1xmmc_probe, .remove = au1xmmc_remove, .suspend = au1xmmc_suspend, .resume = au1xmmc_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init au1xmmc_init(void) { if (has_dbdma()) { /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride * of 8 bits. And since devices are shared, we need to create * our own to avoid freaking out other devices. */ memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); if (!memid) pr_err("au1xmmc: cannot add memory dbdma\n"); } return platform_driver_register(&au1xmmc_driver); } static void __exit au1xmmc_exit(void) { if (has_dbdma() && memid) au1xxx_ddma_del_device(memid); platform_driver_unregister(&au1xmmc_driver); } module_init(au1xmmc_init); module_exit(au1xmmc_exit); MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:au1xxx-mmc");
gpl-2.0
Jairus980/kernel_hltexx
kernel/smp.c
163
22229
/* * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 */ #include <linux/rcupdate.h> #include <linux/rculist.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/smp.h> #include <linux/cpu.h> #ifdef CONFIG_USE_GENERIC_SMP_HELPERS static struct { struct list_head queue; raw_spinlock_t lock; } call_function __cacheline_aligned_in_smp = { .queue = LIST_HEAD_INIT(call_function.queue), .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock), }; enum { CSD_FLAG_LOCK = 0x01, }; struct call_function_data { struct call_single_data csd; atomic_t refs; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); struct call_single_queue { struct list_head list; raw_spinlock_t lock; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; struct call_function_data *cfd = &per_cpu(cfd_data, cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); break; #endif }; return NOTIFY_OK; } static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { .notifier_call = hotplug_cfd, }; void __init call_function_init(void) { void *cpu = (void *)(long)smp_processor_id(); int i; for_each_possible_cpu(i) { struct call_single_queue *q = &per_cpu(call_single_queue, i); raw_spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->list); } hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); } /* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * * For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ static void csd_lock_wait(struct call_single_data *data) { while (data->flags & CSD_FLAG_LOCK) cpu_relax(); } static void csd_lock(struct call_single_data *data) { csd_lock_wait(data); data->flags = CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data structure: */ smp_mb(); } static void csd_unlock(struct call_single_data *data) { WARN_ON(!(data->flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ smp_mb(); data->flags &= ~CSD_FLAG_LOCK; } /* * Insert a previously allocated call_single_data element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */ static void generic_exec_single(int cpu, struct call_single_data *data, int wait) { struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); unsigned long flags; int ipi; raw_spin_lock_irqsave(&dst->lock, flags); ipi = list_empty(&dst->list); list_add_tail(&data->list, &dst->list); raw_spin_unlock_irqrestore(&dst->lock, flags); /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (ipi) arch_send_call_function_single_ipi(cpu); if (wait) csd_lock_wait(data); } /* * Invoked by arch to handle an IPI for call function. Must be called with * interrupts disabled. */ void generic_smp_call_function_interrupt(void) { struct call_function_data *data; int cpu = smp_processor_id(); /* * Shouldn't receive this interrupt on a cpu that is not yet online. */ WARN_ON_ONCE(!cpu_online(cpu)); /* * Ensure entry is visible on call_function_queue after we have * entered the IPI. See comment in smp_call_function_many. * If we don't have this, then we may miss an entry on the list * and never get another IPI to process it. */ smp_mb(); /* * It's ok to use list_for_each_rcu() here even though we may * delete 'pos', since list_del_rcu() doesn't clear ->next */ list_for_each_entry_rcu(data, &call_function.queue, csd.list) { int refs; smp_call_func_t func; /* * Since we walk the list without any locks, we might * see an entry that was completed, removed from the * list and is in the process of being reused. * * We must check that the cpu is in the cpumask before * checking the refs, and both must be set before * executing the callback on this cpu. */ if (!cpumask_test_cpu(cpu, data->cpumask)) continue; smp_rmb(); if (atomic_read(&data->refs) == 0) continue; func = data->csd.func; /* save for later warn */ func(data->csd.info); /* * If the cpu mask is not still set then func enabled * interrupts (BUG), and this cpu took another smp call * function interrupt and executed func(info) twice * on this cpu. That nested execution decremented refs. */ if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { WARN(1, "%pf enabled interrupts and double executed\n", func); continue; } refs = atomic_dec_return(&data->refs); WARN_ON(refs < 0); if (refs) continue; WARN_ON(!cpumask_empty(data->cpumask)); raw_spin_lock(&call_function.lock); list_del_rcu(&data->csd.list); raw_spin_unlock(&call_function.lock); csd_unlock(&data->csd); } } /* * Invoked by arch to handle an IPI for call function single. Must be * called from the arch with interrupts disabled. */ void generic_smp_call_function_single_interrupt(void) { struct call_single_queue *q = &__get_cpu_var(call_single_queue); unsigned int data_flags; LIST_HEAD(list); /* * Shouldn't receive this interrupt on a cpu that is not yet online. */ WARN_ON_ONCE(!cpu_online(smp_processor_id())); raw_spin_lock(&q->lock); list_replace_init(&q->list, &list); raw_spin_unlock(&q->lock); while (!list_empty(&list)) { struct call_single_data *data; data = list_entry(list.next, struct call_single_data, list); list_del(&data->list); /* * 'data' can be invalid after this call if flags == 0 * (when called through generic_exec_single()), * so save them away before making the call: */ data_flags = data->flags; data->func(data->info); /* * Unlocked CSDs are valid through generic_exec_single(): */ if (data_flags & CSD_FLAG_LOCK) csd_unlock(data); } } static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. */ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { struct call_single_data d = { .flags = 0, }; unsigned long flags; int this_cpu; int err = 0; /* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); if (cpu == this_cpu) { local_irq_save(flags); func(info); local_irq_restore(flags); } else { /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else * can send smp call function interrupt to this cpu and as such * deadlocks can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { struct call_single_data *data = &d; if (!wait) data = &__get_cpu_var(csd_data); csd_lock(data); data->func = func; data->info = info; generic_exec_single(cpu, data, wait); } else { err = -ENXIO; /* CPU not online */ } } put_cpu(); return err; } EXPORT_SYMBOL(smp_call_function_single); /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). * Note that @wait will be implicitly turned on in case of allocation failures, * since we fall back to on-stack allocation. * * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ nodemask = cpumask_of_node(cpu_to_node(cpu)); for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; } EXPORT_SYMBOL_GPL(smp_call_function_any); /** * __smp_call_function_single(): Run a function on a specific CPU * @cpu: The CPU to run on. * @data: Pre-allocated and setup data structure * @wait: If true, wait until function has completed on specified CPU. * * Like smp_call_function_single(), but allow caller to pass in a * pre-allocated data structure. Useful for embedding @data inside * other structures, for instance. */ void __smp_call_function_single(int cpu, struct call_single_data *data, int wait) { unsigned int this_cpu; unsigned long flags; this_cpu = get_cpu(); if (cpu == this_cpu) { local_irq_save(flags); data->func(data->info); local_irq_restore(flags); } else { /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else * can send smp call function interrupt to this cpu and as such * deadlocks can't happen. */ WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() && !oops_in_progress); csd_lock(data); generic_exec_single(cpu, data, wait); } put_cpu(); } /** * smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { struct call_function_data *data; unsigned long flags; int refs, cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress && !early_boot_irqs_disabled); /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); /* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); if (next_cpu == this_cpu) next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { smp_call_function_single(cpu, func, info, wait); return; } data = &__get_cpu_var(cfd_data); csd_lock(&data->csd); /* This BUG_ON verifies our reuse assertions and can be removed */ BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); /* * The global call function queue list add and delete are protected * by a lock, but the list is traversed without any lock, relying * on the rcu list add and delete to allow safe concurrent traversal. * We reuse the call function data without waiting for any grace * period after some other cpu removes it from the global queue. * This means a cpu might find our data block as it is being * filled out. * * We hold off the interrupt handler on the other cpu by * ordering our writes to the cpu mask vs our setting of the * refs counter. We assert only the cpu owning the data block * will set a bit in cpumask, and each bit will only be cleared * by the subject cpu. Each cpu must first find its bit is * set and then check that refs is set indicating the element is * ready to be processed, otherwise it must skip the entry. * * On the previous iteration refs was set to 0 by another cpu. * To avoid the use of transitivity, set the counter to 0 here * so the wmb will pair with the rmb in the interrupt handler. */ atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ data->csd.func = func; data->csd.info = info; /* Ensure 0 refs is visible before mask. Also orders func and info */ smp_wmb(); /* We rely on the "and" being processed before the store */ cpumask_and(data->cpumask, mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, data->cpumask); refs = cpumask_weight(data->cpumask); /* Some callers race with other cpus changing the passed mask */ if (unlikely(!refs)) { csd_unlock(&data->csd); return; } /* * After we put an entry into the list, data->cpumask * may be cleared again when another CPU sends another IPI for * a SMP function call, so data->cpumask will be zero. */ cpumask_copy(data->cpumask_ipi, data->cpumask); raw_spin_lock_irqsave(&call_function.lock, flags); /* * Place entry at the _HEAD_ of the list, so that any cpu still * observing the entry in generic_smp_call_function_interrupt() * will not miss any other list entries: */ list_add_rcu(&data->csd.list, &call_function.queue); /* * We rely on the wmb() in list_add_rcu to complete our writes * to the cpumask before this write to refs, which indicates * data is on the list and is ready to be processed. */ atomic_set(&data->refs, refs); raw_spin_unlock_irqrestore(&call_function.lock, flags); /* * Make the list addition visible before sending the ipi. * (IPIs must obey or appear to obey normal Linux cache * coherency rules -- see comment in generic_exec_single). */ smp_mb(); /* Send a message to all CPUs in the map */ arch_send_call_function_ipi_mask(data->cpumask_ipi); /* Optionally wait for the CPUs to complete */ if (wait) csd_lock_wait(&data->csd); } EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ int smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); preempt_enable(); return 0; } EXPORT_SYMBOL(smp_call_function); void ipi_call_lock(void) { raw_spin_lock(&call_function.lock); } void ipi_call_unlock(void) { raw_spin_unlock(&call_function.lock); } void ipi_call_lock_irq(void) { raw_spin_lock_irq(&call_function.lock); } void ipi_call_unlock_irq(void) { raw_spin_unlock_irq(&call_function.lock); } #endif /* USE_GENERIC_SMP_HELPERS */ /* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ void __weak arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; } early_param("nosmp", nosmp); /* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; } early_param("nr_cpus", nrcpus); static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } /* Called by boot processor to activate the rest. */ void __init smp_init(void) { unsigned int cpu; /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } /* Any cleanup work */ printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); smp_cpus_done(setup_max_cpus); } /* * Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). */ int on_each_cpu(void (*func) (void *info), void *info, int wait) { unsigned long flags; int ret = 0; preempt_disable(); ret = smp_call_function(func, info, wait); local_irq_save(flags); func(info); local_irq_restore(flags); preempt_enable(); return ret; } EXPORT_SYMBOL(on_each_cpu); /** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { local_irq_disable(); func(info); local_irq_enable(); } put_cpu(); } EXPORT_SYMBOL(on_each_cpu_mask); /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * @gfp_flags: GFP flags to use when allocating the cpumask * used internally by the function. * * The function might sleep if the GFP flags indicates a non * atomic allocation is allowed. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { cpumask_var_t cpus; int cpu, ret; might_sleep_if(gfp_flags & __GFP_WAIT); if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); } else { /* * No free cpumask, bother. No matter, we'll * just have to IPI them one by one. */ preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); WARN_ON_ONCE(!ret); } preempt_enable(); } } EXPORT_SYMBOL(on_each_cpu_cond);
gpl-2.0
jmw7912/wat-0016-kernel-2.6.37
drivers/power/ds2782_battery.c
163
9702
/* * I2C client/driver for the Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC * * Copyright (C) 2009 Bluewater Systems Ltd * * Author: Ryan Mallon <ryan@bluewatersys.com> * * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/swab.h> #include <linux/i2c.h> #include <linux/idr.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/ds2782_battery.h> #define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */ #define DS278x_REG_VOLT_MSB 0x0c #define DS278x_REG_TEMP_MSB 0x0a #define DS278x_REG_CURRENT_MSB 0x0e /* EEPROM Block */ #define DS2782_REG_RSNSP 0x69 /* Sense resistor value */ /* Current unit measurement in uA for a 1 milli-ohm sense resistor */ #define DS2782_CURRENT_UNITS 1563 #define DS2786_REG_RARC 0x02 /* Remaining active relative capacity */ #define DS2786_CURRENT_UNITS 25 struct ds278x_info; struct ds278x_battery_ops { int (*get_battery_current)(struct ds278x_info *info, int *current_uA); int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uV); int (*get_battery_capacity)(struct ds278x_info *info, int *capacity); }; #define to_ds278x_info(x) container_of(x, struct ds278x_info, battery) struct ds278x_info { struct i2c_client *client; struct power_supply battery; struct ds278x_battery_ops *ops; int id; int rsns; }; static DEFINE_IDR(battery_id); static DEFINE_MUTEX(battery_lock); static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val) { int ret; ret = i2c_smbus_read_byte_data(info->client, reg); if (ret < 0) { dev_err(&info->client->dev, "register read failed\n"); return ret; } *val = ret; return 0; } static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb, s16 *val) { int ret; ret = swab16(i2c_smbus_read_word_data(info->client, reg_msb)); if (ret < 0) { dev_err(&info->client->dev, "register read failed\n"); return ret; } *val = ret; return 0; } static int ds278x_get_temp(struct ds278x_info *info, int *temp) { s16 raw; int err; /* * Temperature is measured in units of 0.125 degrees celcius, the * power_supply class measures temperature in tenths of degrees * celsius. The temperature value is stored as a 10 bit number, plus * sign in the upper bits of a 16 bit register. */ err = ds278x_read_reg16(info, DS278x_REG_TEMP_MSB, &raw); if (err) return err; *temp = ((raw / 32) * 125) / 100; return 0; } static int ds2782_get_current(struct ds278x_info *info, int *current_uA) { int sense_res; int err; u8 sense_res_raw; s16 raw; /* * The units of measurement for current are dependent on the value of * the sense resistor. */ err = ds278x_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw); if (err) return err; if (sense_res_raw == 0) { dev_err(&info->client->dev, "sense resistor value is 0\n"); return -ENXIO; } sense_res = 1000 / sense_res_raw; dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n", sense_res); err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw); if (err) return err; *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res); return 0; } static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uV) { s16 raw; int err; /* * Voltage is measured in units of 4.88mV. The voltage is stored as * a 10-bit number plus sign, in the upper bits of a 16-bit register */ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); if (err) return err; *voltage_uV = (raw / 32) * 4800; return 0; } static int ds2782_get_capacity(struct ds278x_info *info, int *capacity) { int err; u8 raw; err = ds278x_read_reg(info, DS2782_REG_RARC, &raw); if (err) return err; *capacity = raw; return 0; } static int ds2786_get_current(struct ds278x_info *info, int *current_uA) { int err; s16 raw; err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw); if (err) return err; *current_uA = (raw / 16) * (DS2786_CURRENT_UNITS / info->rsns); return 0; } static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV) { s16 raw; int err; /* * Voltage is measured in units of 1.22mV. The voltage is stored as * a 10-bit number plus sign, in the upper bits of a 16-bit register */ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); if (err) return err; *voltage_uV = (raw / 8) * 1220; return 0; } static int ds2786_get_capacity(struct ds278x_info *info, int *capacity) { int err; u8 raw; err = ds278x_read_reg(info, DS2786_REG_RARC, &raw); if (err) return err; /* Relative capacity is displayed with resolution 0.5 % */ *capacity = raw/2 ; return 0; } static int ds278x_get_status(struct ds278x_info *info, int *status) { int err; int current_uA; int capacity; err = info->ops->get_battery_current(info, &current_uA); if (err) return err; err = info->ops->get_battery_capacity(info, &capacity); if (err) return err; if (capacity == 100) *status = POWER_SUPPLY_STATUS_FULL; else if (current_uA == 0) *status = POWER_SUPPLY_STATUS_NOT_CHARGING; else if (current_uA < 0) *status = POWER_SUPPLY_STATUS_DISCHARGING; else *status = POWER_SUPPLY_STATUS_CHARGING; return 0; } static int ds278x_battery_get_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct ds278x_info *info = to_ds278x_info(psy); int ret; switch (prop) { case POWER_SUPPLY_PROP_STATUS: ret = ds278x_get_status(info, &val->intval); break; case POWER_SUPPLY_PROP_CAPACITY: ret = info->ops->get_battery_capacity(info, &val->intval); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = info->ops->get_battery_voltage(info, &val->intval); break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = info->ops->get_battery_current(info, &val->intval); break; case POWER_SUPPLY_PROP_TEMP: ret = ds278x_get_temp(info, &val->intval); break; default: ret = -EINVAL; } return ret; } static enum power_supply_property ds278x_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_TEMP, }; static void ds278x_power_supply_init(struct power_supply *battery) { battery->type = POWER_SUPPLY_TYPE_BATTERY; battery->properties = ds278x_battery_props; battery->num_properties = ARRAY_SIZE(ds278x_battery_props); battery->get_property = ds278x_battery_get_property; battery->external_power_changed = NULL; } static int ds278x_battery_remove(struct i2c_client *client) { struct ds278x_info *info = i2c_get_clientdata(client); power_supply_unregister(&info->battery); kfree(info->battery.name); mutex_lock(&battery_lock); idr_remove(&battery_id, info->id); mutex_unlock(&battery_lock); kfree(info); return 0; } enum ds278x_num_id { DS2782 = 0, DS2786, }; static struct ds278x_battery_ops ds278x_ops[] = { [DS2782] = { .get_battery_current = ds2782_get_current, .get_battery_voltage = ds2782_get_voltage, .get_battery_capacity = ds2782_get_capacity, }, [DS2786] = { .get_battery_current = ds2786_get_current, .get_battery_voltage = ds2786_get_voltage, .get_battery_capacity = ds2786_get_capacity, } }; static int ds278x_battery_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ds278x_platform_data *pdata = client->dev.platform_data; struct ds278x_info *info; int ret; int num; /* * ds2786 should have the sense resistor value set * in the platform data */ if (id->driver_data == DS2786 && !pdata) { dev_err(&client->dev, "missing platform data for ds2786\n"); return -EINVAL; } /* Get an ID for this battery */ ret = idr_pre_get(&battery_id, GFP_KERNEL); if (ret == 0) { ret = -ENOMEM; goto fail_id; } mutex_lock(&battery_lock); ret = idr_get_new(&battery_id, client, &num); mutex_unlock(&battery_lock); if (ret < 0) goto fail_id; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { ret = -ENOMEM; goto fail_info; } info->battery.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num); if (!info->battery.name) { ret = -ENOMEM; goto fail_name; } if (id->driver_data == DS2786) info->rsns = pdata->rsns; i2c_set_clientdata(client, info); info->client = client; info->id = num; info->ops = &ds278x_ops[id->driver_data]; ds278x_power_supply_init(&info->battery); ret = power_supply_register(&client->dev, &info->battery); if (ret) { dev_err(&client->dev, "failed to register battery\n"); goto fail_register; } return 0; fail_register: kfree(info->battery.name); fail_name: kfree(info); fail_info: mutex_lock(&battery_lock); idr_remove(&battery_id, num); mutex_unlock(&battery_lock); fail_id: return ret; } static const struct i2c_device_id ds278x_id[] = { {"ds2782", DS2782}, {"ds2786", DS2786}, {}, }; static struct i2c_driver ds278x_battery_driver = { .driver = { .name = "ds2782-battery", }, .probe = ds278x_battery_probe, .remove = ds278x_battery_remove, .id_table = ds278x_id, }; static int __init ds278x_init(void) { return i2c_add_driver(&ds278x_battery_driver); } module_init(ds278x_init); static void __exit ds278x_exit(void) { i2c_del_driver(&ds278x_battery_driver); } module_exit(ds278x_exit); MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>"); MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver"); MODULE_LICENSE("GPL");
gpl-2.0
kevinleegithup/mysql-5.6.17
strings/str2int.c
163
6739
/* Copyright (c) 2000-2003, 2006 MySQL AB Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* str2int(src, radix, lower, upper, &val) converts the string pointed to by src to an integer and stores it in val. It skips leading spaces and tabs (but not newlines, formfeeds, backspaces), then it accepts an optional sign and a sequence of digits in the specified radix. The result should satisfy lower <= *val <= upper. The result is a pointer to the first character after the number; trailing spaces will NOT be skipped. If an error is detected, the result will be NullS, the value put in val will be 0, and errno will be set to EDOM if there are no digits ERANGE if the result would overflow or otherwise fail to lie within the specified bounds. Check that the bounds are right for your machine. This looks amazingly complicated for what you probably thought was an easy task. Coping with integer overflow and the asymmetric range of twos complement machines is anything but easy. So that users of atoi and atol can check whether an error occured, I have taken a wholly unprecedented step: errno is CLEARED if this call has no problems. */ #include <my_global.h> #include "m_string.h" #include "m_ctype.h" #include "my_sys.h" /* defines errno */ #include <errno.h> #define char_val(X) (X >= '0' && X <= '9' ? X-'0' :\ X >= 'A' && X <= 'Z' ? X-'A'+10 :\ X >= 'a' && X <= 'z' ? X-'a'+10 :\ '\177') char *str2int(register const char *src, register int radix, long int lower, long int upper, long int *val) { int sign; /* is number negative (+1) or positive (-1) */ int n; /* number of digits yet to be converted */ long limit; /* "largest" possible valid input */ long scale; /* the amount to multiply next digit by */ long sofar; /* the running value */ register int d; /* (negative of) next digit */ char *start; int digits[32]; /* Room for numbers */ /* Make sure *val is sensible in case of error */ *val = 0; /* Check that the radix is in the range 2..36 */ #ifndef DBUG_OFF if (radix < 2 || radix > 36) { errno=EDOM; return NullS; } #endif /* The basic problem is: how do we handle the conversion of a number without resorting to machine-specific code to check for overflow? Obviously, we have to ensure that no calculation can overflow. We are guaranteed that the "lower" and "upper" arguments are valid machine integers. On sign-and-magnitude, twos-complement, and ones-complement machines all, if +|n| is representable, so is -|n|, but on twos complement machines the converse is not true. So the "maximum" representable number has a negative representative. Limit is set to min(-|lower|,-|upper|); this is the "largest" number we are concerned with. */ /* Calculate Limit using Scale as a scratch variable */ if ((limit = lower) > 0) limit = -limit; if ((scale = upper) > 0) scale = -scale; if (scale < limit) limit = scale; /* Skip leading spaces and check for a sign. Note: because on a 2s complement machine MinLong is a valid integer but |MinLong| is not, we have to keep the current converted value (and the scale!) as *negative* numbers, so the sign is the opposite of what you might expect. */ while (my_isspace(&my_charset_latin1,*src)) src++; sign = -1; if (*src == '+') src++; else if (*src == '-') src++, sign = 1; /* Skip leading zeros so that we never compute a power of radix in scale that we won't have a need for. Otherwise sticking enough 0s in front of a number could cause the multiplication to overflow when it neededn't. */ start=(char*) src; while (*src == '0') src++; /* Move over the remaining digits. We have to convert from left to left in order to avoid overflow. Answer is after last digit. */ for (n = 0; (digits[n]=char_val(*src)) < radix && n < 20; n++,src++) ; /* Check that there is at least one digit */ if (start == src) { errno=EDOM; return NullS; } /* The invariant we want to maintain is that src is just to the right of n digits, we've converted k digits to sofar, scale = -radix**k, and scale < sofar < 0. Now if the final number is to be within the original Limit, we must have (to the left)*scale+sofar >= Limit, or (to the left)*scale >= Limit-sofar, i.e. the digits to the left of src must form an integer <= (Limit-sofar)/(scale). In particular, this is true of the next digit. In our incremental calculation of Limit, IT IS VITAL that (-|N|)/(-|D|) = |N|/|D| */ for (sofar = 0, scale = -1; --n >= 1;) { if ((long) -(d=digits[n]) < limit) { errno=ERANGE; return NullS; } limit = (limit+d)/radix, sofar += d*scale; scale *= radix; } if (n == 0) { if ((long) -(d=digits[n]) < limit) /* get last digit */ { errno=ERANGE; return NullS; } sofar+=d*scale; } /* Now it might still happen that sofar = -32768 or its equivalent, so we can't just multiply by the sign and check that the result is in the range lower..upper. All of this caution is a right pain in the neck. If only there were a standard routine which says generate thus and such a signal on integer overflow... But not enough machines can do it *SIGH*. */ if (sign < 0) { if (sofar < -LONG_MAX || (sofar= -sofar) > upper) { errno=ERANGE; return NullS; } } else if (sofar < lower) { errno=ERANGE; return NullS; } *val = sofar; errno=0; /* indicate that all went well */ return (char*) src; } /* Theese are so slow compared with ordinary, optimized atoi */ #ifdef WANT_OUR_ATOI int atoi(const char *src) { long val; str2int(src, 10, (long) INT_MIN, (long) INT_MAX, &val); return (int) val; } long atol(const char *src) { long val; str2int(src, 10, LONG_MIN, LONG_MAX, &val); return val; } #endif /* WANT_OUR_ATOI */
gpl-2.0
avareldalton85/rt-linux-rpi2
drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
419
24998
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * ******************************************************************************/ #define _USB_OPS_LINUX_C_ #include <drv_types.h> #include <recv_osdep.h> #include <rtw_sreset.h> static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf) { struct hal_data_8188e *haldata = GET_HAL_DATA(adapt); if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) { DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len); return; } /* HISR */ memcpy(&(haldata->IntArray[0]), &(pbuf[USB_INTR_CONTENT_HISR_OFFSET]), 4); memcpy(&(haldata->IntArray[1]), &(pbuf[USB_INTR_CONTENT_HISRE_OFFSET]), 4); /* C2H Event */ if (pbuf[0] != 0) memcpy(&(haldata->C2hArray[0]), &(pbuf[USB_INTR_CONTENT_C2H_OFFSET]), 16); } static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb) { u8 *pbuf; u8 shift_sz = 0; u16 pkt_cnt; u32 pkt_offset, skb_len, alloc_sz; s32 transfer_len; struct recv_stat *prxstat; struct phy_stat *pphy_status = NULL; struct sk_buff *pkt_copy = NULL; struct recv_frame *precvframe = NULL; struct rx_pkt_attrib *pattrib = NULL; struct hal_data_8188e *haldata = GET_HAL_DATA(adapt); struct recv_priv *precvpriv = &adapt->recvpriv; struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue; transfer_len = (s32)pskb->len; pbuf = pskb->data; prxstat = (struct recv_stat *)pbuf; pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff; do { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recvbuf2recvframe: rxdesc=offsset 0:0x%08x, 4:0x%08x, 8:0x%08x, C:0x%08x\n", prxstat->rxdw0, prxstat->rxdw1, prxstat->rxdw2, prxstat->rxdw4)); prxstat = (struct recv_stat *)pbuf; precvframe = rtw_alloc_recvframe(pfree_recv_queue); if (precvframe == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvbuf2recvframe: precvframe==NULL\n")); DBG_88E("%s()-%d: rtw_alloc_recvframe() failed! RX Drop!\n", __func__, __LINE__); goto _exit_recvbuf2recvframe; } INIT_LIST_HEAD(&precvframe->list); precvframe->len = 0; update_recvframe_attrib_88e(precvframe, prxstat); pattrib = &precvframe->attrib; if ((pattrib->crc_err) || (pattrib->icv_err)) { DBG_88E("%s: RX Warning! crc_err=%d icv_err=%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err); rtw_free_recvframe(precvframe, pfree_recv_queue); goto _exit_recvbuf2recvframe; } if ((pattrib->physt) && (pattrib->pkt_rpt_type == NORMAL_RX)) pphy_status = (struct phy_stat *)(pbuf + RXDESC_OFFSET); pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz + pattrib->shift_sz + pattrib->pkt_len; if ((pattrib->pkt_len <= 0) || (pkt_offset > transfer_len)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recvbuf2recvframe: pkt_len<=0\n")); DBG_88E("%s()-%d: RX Warning!,pkt_len<=0 or pkt_offset> transfoer_len\n", __func__, __LINE__); rtw_free_recvframe(precvframe, pfree_recv_queue); goto _exit_recvbuf2recvframe; } /* Modified by Albert 20101213 */ /* For 8 bytes IP header alignment. */ if (pattrib->qos) /* Qos data, wireless lan header length is 26 */ shift_sz = 6; else shift_sz = 0; skb_len = pattrib->pkt_len; /* for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */ /* modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) { if (skb_len <= 1650) alloc_sz = 1664; else alloc_sz = skb_len + 14; } else { alloc_sz = skb_len; /* 6 is for IP header 8 bytes alignment in QoS packet case. */ /* 8 is for skb->data 4 bytes alignment. */ alloc_sz += 14; } pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz); if (pkt_copy) { pkt_copy->dev = adapt->pnetdev; precvframe->pkt = pkt_copy; precvframe->rx_head = pkt_copy->data; precvframe->rx_end = pkt_copy->data + alloc_sz; skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */ skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */ memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len); precvframe->rx_tail = pkt_copy->data; precvframe->rx_data = pkt_copy->data; } else { if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) { DBG_88E("recvbuf2recvframe: alloc_skb fail , drop frag frame\n"); rtw_free_recvframe(precvframe, pfree_recv_queue); goto _exit_recvbuf2recvframe; } precvframe->pkt = skb_clone(pskb, GFP_ATOMIC); if (precvframe->pkt) { precvframe->rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE; precvframe->rx_head = precvframe->rx_tail; precvframe->rx_data = precvframe->rx_tail; precvframe->rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz; } else { DBG_88E("recvbuf2recvframe: skb_clone fail\n"); rtw_free_recvframe(precvframe, pfree_recv_queue); goto _exit_recvbuf2recvframe; } } recvframe_put(precvframe, skb_len); switch (haldata->UsbRxAggMode) { case USB_RX_AGG_DMA: case USB_RX_AGG_MIX: pkt_offset = (u16) round_up(pkt_offset, 128); break; case USB_RX_AGG_USB: pkt_offset = (u16) round_up(pkt_offset, 4); break; case USB_RX_AGG_DISABLE: default: break; } if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */ if (pattrib->physt) update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status); if (rtw_recv_entry(precvframe) != _SUCCESS) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n")); } } else { /* enqueue recvframe to txrtp queue */ if (pattrib->pkt_rpt_type == TX_REPORT1) { /* CCX-TXRPT ack for xmit mgmt frames. */ handle_txrpt_ccx_88e(adapt, precvframe->rx_data); } else if (pattrib->pkt_rpt_type == TX_REPORT2) { ODM_RA_TxRPT2Handle_8188E( &haldata->odmpriv, precvframe->rx_data, pattrib->pkt_len, pattrib->MacIDValidEntry[0], pattrib->MacIDValidEntry[1] ); } else if (pattrib->pkt_rpt_type == HIS_REPORT) { interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data); } rtw_free_recvframe(precvframe, pfree_recv_queue); } pkt_cnt--; transfer_len -= pkt_offset; pbuf += pkt_offset; precvframe = NULL; pkt_copy = NULL; if (transfer_len > 0 && pkt_cnt == 0) pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16) & 0xff; } while ((transfer_len > 0) && (pkt_cnt > 0)); _exit_recvbuf2recvframe: return _SUCCESS; } unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr) { unsigned int pipe = 0, ep_num = 0; struct usb_device *pusbd = pdvobj->pusbdev; if (addr == RECV_BULK_IN_ADDR) { pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); } else if (addr == RECV_INT_IN_ADDR) { pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); } else if (addr < HW_QUEUE_ENTRY) { ep_num = pdvobj->Queue2Pipe[addr]; pipe = usb_sndbulkpipe(pusbd, ep_num); } return pipe; } static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 index, void *pdata, u16 len, u8 requesttype) { struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt); struct usb_device *udev = dvobjpriv->pusbdev; unsigned int pipe; int status = 0; u8 reqtype; u8 *pIo_buf; int vendorreq_times = 0; if ((adapt->bSurpriseRemoved) || (adapt->pwrctrlpriv.pnp_bstop_trx)) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usbctrl_vendorreq:(adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n")); status = -EPERM; goto exit; } if (len > MAX_VENDOR_REQ_CMD_SIZE) { DBG_88E("[%s] Buffer len error ,vendor request failed\n", __func__); status = -EINVAL; goto exit; } _enter_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL); /* Acquire IO memory for vendorreq */ pIo_buf = dvobjpriv->usb_vendor_req_buf; if (pIo_buf == NULL) { DBG_88E("[%s] pIo_buf == NULL\n", __func__); status = -ENOMEM; goto release_mutex; } while (++vendorreq_times <= MAX_USBCTRL_VENDORREQ_TIMES) { memset(pIo_buf, 0, len); if (requesttype == 0x01) { pipe = usb_rcvctrlpipe(udev, 0);/* read_in */ reqtype = REALTEK_USB_VENQT_READ; } else { pipe = usb_sndctrlpipe(udev, 0);/* write_out */ reqtype = REALTEK_USB_VENQT_WRITE; memcpy(pIo_buf, pdata, len); } status = usb_control_msg(udev, pipe, request, reqtype, value, index, pIo_buf, len, RTW_USB_CONTROL_MSG_TIMEOUT); if (status == len) { /* Success this control transfer. */ if (requesttype == 0x01) memcpy(pdata, pIo_buf, len); } else { /* error cases */ DBG_88E("reg 0x%x, usb %s %u fail, status:%d value=0x%x, vendorreq_times:%d\n", value, (requesttype == 0x01) ? "read" : "write", len, status, *(u32 *)pdata, vendorreq_times); if (status < 0) { if (status == (-ESHUTDOWN) || status == -ENODEV) { adapt->bSurpriseRemoved = true; } else { struct hal_data_8188e *haldata = GET_HAL_DATA(adapt); haldata->srestpriv.Wifi_Error_Status = USB_VEN_REQ_CMD_FAIL; } } else { /* status != len && status >= 0 */ if (status > 0) { if (requesttype == 0x01) { /* For Control read transfer, we have to copy the read data from pIo_buf to pdata. */ memcpy(pdata, pIo_buf, len); } } } } /* firmware download is checksumed, don't retry */ if ((value >= FW_8188E_START_ADDRESS && value <= FW_8188E_END_ADDRESS) || status == len) break; } release_mutex: mutex_unlock(&dvobjpriv->usb_vendor_req_mutex); exit: return status; } u8 usb_read8(struct adapter *adapter, u32 addr) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u8 data = 0; request = 0x05; requesttype = 0x01;/* read_in */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = 1; usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype); return data; } u16 usb_read16(struct adapter *adapter, u32 addr) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; __le32 data; request = 0x05; requesttype = 0x01;/* read_in */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = 2; usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype); return (u16)(le32_to_cpu(data)&0xffff); } u32 usb_read32(struct adapter *adapter, u32 addr) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; __le32 data; request = 0x05; requesttype = 0x01;/* read_in */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = 4; usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype); return le32_to_cpu(data); } static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) { struct recv_buf *precvbuf = (struct recv_buf *)purb->context; struct adapter *adapt = (struct adapter *)precvbuf->adapter; struct recv_priv *precvpriv = &adapt->recvpriv; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n")); precvpriv->rx_pending_cnt--; if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", adapt->bDriverStopped, adapt->bSurpriseRemoved)); precvbuf->reuse = true; DBG_88E("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bReadPortCancel(%d)\n", __func__, adapt->bDriverStopped, adapt->bSurpriseRemoved, adapt->bReadPortCancel); return; } if (purb->status == 0) { /* SUCCESS */ if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n")); precvbuf->reuse = true; usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__); } else { skb_put(precvbuf->pskb, purb->actual_length); skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb); if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1) tasklet_schedule(&precvpriv->recv_tasklet); precvbuf->pskb = NULL; precvbuf->reuse = false; usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } } else { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status)); DBG_88E("###=> usb_read_port_complete => urb status(%d)\n", purb->status); skb_put(precvbuf->pskb, purb->actual_length); precvbuf->pskb = NULL; switch (purb->status) { case -EINVAL: case -EPIPE: case -ENODEV: case -ESHUTDOWN: adapt->bSurpriseRemoved = true; case -ENOENT: adapt->bDriverStopped = true; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped=true\n")); break; case -EPROTO: case -EOVERFLOW: { struct hal_data_8188e *haldata = GET_HAL_DATA(adapt); haldata->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL; } precvbuf->reuse = true; usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); break; case -EINPROGRESS: DBG_88E("ERROR: URB IS IN PROGRESS!\n"); break; default: break; } } } u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem) { struct urb *purb = NULL; struct recv_buf *precvbuf = (struct recv_buf *)rmem; struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter); struct recv_priv *precvpriv = &adapter->recvpriv; struct usb_device *pusbd = pdvobj->pusbdev; int err; unsigned int pipe; size_t tmpaddr = 0; size_t alignment = 0; u32 ret = _SUCCESS; if (adapter->bDriverStopped || adapter->bSurpriseRemoved || adapter->pwrctrlpriv.pnp_bstop_trx) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port:(adapt->bDriverStopped ||adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n")); return _FAIL; } if (!precvbuf) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port:precvbuf==NULL\n")); return _FAIL; } if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) { precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue); if (NULL != precvbuf->pskb) precvbuf->reuse = true; } /* re-assign for linux based on skb */ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) { precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ); if (precvbuf->pskb == NULL) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n")); DBG_88E("#### usb_read_port() alloc_skb fail!#####\n"); return _FAIL; } tmpaddr = (size_t)precvbuf->pskb->data; alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1); skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment)); } else { /* reuse skb */ precvbuf->reuse = false; } precvpriv->rx_pending_cnt++; purb = precvbuf->purb; /* translate DMA FIFO addr to pipehandle */ pipe = ffaddr2pipehdl(pdvobj, addr); usb_fill_bulk_urb(purb, pusbd, pipe, precvbuf->pskb->data, MAX_RECVBUF_SZ, usb_read_port_complete, precvbuf);/* context is precvbuf */ err = usb_submit_urb(purb, GFP_ATOMIC); if ((err) && (err != (-EPERM))) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x", err, purb->status)); DBG_88E("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n", err, purb->status); ret = _FAIL; } return ret; } void usb_read_port_cancel(struct adapter *padapter) { int i; struct recv_buf *precvbuf; precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; DBG_88E("%s\n", __func__); padapter->bReadPortCancel = true; for (i = 0; i < NR_RECVBUFF; i++) { precvbuf->reuse = true; if (precvbuf->purb) usb_kill_urb(precvbuf->purb); precvbuf++; } } int usb_write8(struct adapter *adapter, u32 addr, u8 val) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u8 data; int ret; request = 0x05; requesttype = 0x00;/* write_out */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = 1; data = val; ret = usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype); return ret; } int usb_write16(struct adapter *adapter, u32 addr, u16 val) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; __le32 data; int ret; request = 0x05; requesttype = 0x00;/* write_out */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = 2; data = cpu_to_le32(val & 0x0000ffff); ret = usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype); return ret; } int usb_write32(struct adapter *adapter, u32 addr, u32 val) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; __le32 data; int ret; request = 0x05; requesttype = 0x00;/* write_out */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = 4; data = cpu_to_le32(val); ret = usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype); return ret; } int usb_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata) { u8 request; u8 requesttype; u16 wvalue; u16 index; u16 len; u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0}; int ret; request = 0x05; requesttype = 0x00;/* write_out */ index = 0;/* n/a */ wvalue = (u16)(addr&0x0000ffff); len = length; memcpy(buf, pdata, len); ret = usbctrl_vendorreq(adapter, request, wvalue, index, buf, len, requesttype); return RTW_STATUS_CODE(ret); } static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs) { struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context; struct adapter *padapter = pxmitbuf->padapter; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; switch (pxmitbuf->flags) { case VO_QUEUE_INX: pxmitpriv->voq_cnt--; break; case VI_QUEUE_INX: pxmitpriv->viq_cnt--; break; case BE_QUEUE_INX: pxmitpriv->beq_cnt--; break; case BK_QUEUE_INX: pxmitpriv->bkq_cnt--; break; case HIGH_QUEUE_INX: #ifdef CONFIG_88EU_AP_MODE rtw_chk_hi_queue_cmd(padapter); #endif break; default: break; } if (padapter->bSurpriseRemoved || padapter->bDriverStopped || padapter->bWritePortCancel) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved)); DBG_88E("%s(): TX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bWritePortCancel(%d) pxmitbuf->ext_tag(%x)\n", __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, padapter->bReadPortCancel, pxmitbuf->ext_tag); goto check_completion; } if (purb->status) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete : purb->status(%d) != 0\n", purb->status)); DBG_88E("###=> urb_write_port_complete status(%d)\n", purb->status); if ((purb->status == -EPIPE) || (purb->status == -EPROTO)) { sreset_set_wifi_error_status(padapter, USB_WRITE_PORT_FAIL); } else if (purb->status == -EINPROGRESS) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete: EINPROGESS\n")); goto check_completion; } else if (purb->status == -ENOENT) { DBG_88E("%s: -ENOENT\n", __func__); goto check_completion; } else if (purb->status == -ECONNRESET) { DBG_88E("%s: -ECONNRESET\n", __func__); goto check_completion; } else if (purb->status == -ESHUTDOWN) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete: ESHUTDOWN\n")); padapter->bDriverStopped = true; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bDriverStopped = true\n")); goto check_completion; } else { padapter->bSurpriseRemoved = true; DBG_88E("bSurpriseRemoved = true\n"); RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bSurpriseRemoved = true\n")); goto check_completion; } } check_completion: rtw_sctx_done_err(&pxmitbuf->sctx, purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS); rtw_free_xmitbuf(pxmitpriv, pxmitbuf); tasklet_hi_schedule(&pxmitpriv->xmit_tasklet); } u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem) { unsigned long irqL; unsigned int pipe; int status; u32 ret = _FAIL; struct urb *purb = NULL; struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem; struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data; struct usb_device *pusbd = pdvobj->pusbdev; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port\n")); if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) || (padapter->pwrctrlpriv.pnp_bstop_trx)) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n")); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY); goto exit; } spin_lock_irqsave(&pxmitpriv->lock, irqL); switch (addr) { case VO_QUEUE_INX: pxmitpriv->voq_cnt++; pxmitbuf->flags = VO_QUEUE_INX; break; case VI_QUEUE_INX: pxmitpriv->viq_cnt++; pxmitbuf->flags = VI_QUEUE_INX; break; case BE_QUEUE_INX: pxmitpriv->beq_cnt++; pxmitbuf->flags = BE_QUEUE_INX; break; case BK_QUEUE_INX: pxmitpriv->bkq_cnt++; pxmitbuf->flags = BK_QUEUE_INX; break; case HIGH_QUEUE_INX: pxmitbuf->flags = HIGH_QUEUE_INX; break; default: pxmitbuf->flags = MGT_QUEUE_INX; break; } spin_unlock_irqrestore(&pxmitpriv->lock, irqL); purb = pxmitbuf->pxmit_urb[0]; /* translate DMA FIFO addr to pipehandle */ pipe = ffaddr2pipehdl(pdvobj, addr); usb_fill_bulk_urb(purb, pusbd, pipe, pxmitframe->buf_addr, /* pxmitbuf->pbuf */ cnt, usb_write_port_complete, pxmitbuf);/* context is pxmitbuf */ status = usb_submit_urb(purb, GFP_ATOMIC); if (status) { rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR); DBG_88E("usb_write_port, status =%d\n", status); RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port(): usb_submit_urb, status =%x\n", status)); switch (status) { case -ENODEV: padapter->bDriverStopped = true; break; default: break; } goto exit; } ret = _SUCCESS; /* We add the URB_ZERO_PACKET flag to urb so that the host will send the zero packet automatically. */ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("-usb_write_port\n")); exit: if (ret != _SUCCESS) rtw_free_xmitbuf(pxmitpriv, pxmitbuf); return ret; } void usb_write_port_cancel(struct adapter *padapter) { int i, j; struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf; DBG_88E("%s\n", __func__); padapter->bWritePortCancel = true; for (i = 0; i < NR_XMITBUFF; i++) { for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } pxmitbuf++; } pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf; for (i = 0; i < NR_XMIT_EXTBUFF; i++) { for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } pxmitbuf++; } } void rtl8188eu_recv_tasklet(void *priv) { struct sk_buff *pskb; struct adapter *adapt = (struct adapter *)priv; struct recv_priv *precvpriv = &adapt->recvpriv; while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) { if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved)) { DBG_88E("recv_tasklet => bDriverStopped or bSurpriseRemoved\n"); dev_kfree_skb_any(pskb); break; } recvbuf2recvframe(adapt, pskb); skb_reset_tail_pointer(pskb); pskb->len = 0; skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb); } } void rtl8188eu_xmit_tasklet(void *priv) { int ret = false; struct adapter *adapt = (struct adapter *)priv; struct xmit_priv *pxmitpriv = &adapt->xmitpriv; if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY)) return; while (1) { if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved) || (adapt->bWritePortCancel)) { DBG_88E("xmit_tasklet => bDriverStopped or bSurpriseRemoved or bWritePortCancel\n"); break; } ret = rtl8188eu_xmitframe_complete(adapt, pxmitpriv, NULL); if (!ret) break; } }
gpl-2.0
mukulsoni/android_kernel_samsung_ms013g
drivers/media/isdbt/fc8300/fc8300_tun_table.c
931
229011
/***************************************************************************** Copyright(c) 2013 FCI Inc. All Rights Reserved File name : fc8300_tun_table.c Description : source of FC8300 tuner driver This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA History : ---------------------------------------------------------------------- ******************************************************************************/ #include "fci_types.h" u32 ch_mode_0[7][57][20] = { { {473143, 0x6e, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} }, { {473143, 0x6E, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} }, { {473143, 0x6E, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} }, { {473143, 0x6E, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} }, { {473143, 0x6e, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} }, { {473143, 0x6e, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x13, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x13, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x13, 0x81, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} }, { {473143, 0x6E, 0x40, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {479143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {485143, 0x75, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {491143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {497143, 0x75, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {503143, 0x75, 0x31, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {509143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {515143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {521143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {527143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {533143, 0x77, 0x53, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {539143, 0x76, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {545143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {551143, 0x76, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {557143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {563143, 0x76, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {569143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {575143, 0x77, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {581143, 0x77, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {587143, 0x77, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {593143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {599143, 0x78, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {605143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {611143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {617143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {623143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {629143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {635143, 0x79, 0x41, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {641143, 0x79, 0x51, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {647143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {653143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {659143, 0x79, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {665143, 0x78, 0x42, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {671143, 0x78, 0x52, 0x32, 0x73, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {677143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {683143, 0x78, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {689143, 0x77, 0x62, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {695143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {701143, 0x77, 0x52, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {707143, 0x77, 0x63, 0x32, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {713143, 0x77, 0x62, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {719143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {725143, 0x76, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {731143, 0x76, 0x42, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {737143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {743143, 0x76, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {749143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {755143, 0x75, 0x51, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {761143, 0x75, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {767143, 0x75, 0x52, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {773143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {779143, 0x74, 0x31, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {785143, 0x74, 0x41, 0x43, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {791143, 0x74, 0x52, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {797143, 0x73, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {803143, 0x73, 0x42, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {809143, 0x72, 0x41, 0x53, 0x74, 0x21, 0x78, 0x84, 0x33, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b} } }; u32 ch_mode_1[7][9][16] = { { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x72, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x72, 0x52, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219000, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219429, 0x71, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {219857, 0x71, 0x64, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220286, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {220714, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221143, 0x70, 0x63, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31}, {221571, 0x6f, 0x53, 0x42, 0x74, 0x21, 0x68, 0x84, 0x32, 0x64, 0x71, 0x70, 0x20, 0x87, 0x10, 0x31} } }; u32 ch_mode_4[7][57][20] = { { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6C, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1A, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6C, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6C, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6C, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6f, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6f, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6f, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6d, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6d, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6c, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6f, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6f, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6f, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x13, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x13, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x13, 0x81, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6f, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6e, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6d, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6d, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6c, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {473143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x43, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x33, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {773143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {779143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {785143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {791143, 0x6E, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {797143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {803143, 0x6D, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {809143, 0x6C, 0x21, 0x53, 0x74, 0x43, 0x78, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} } }; u32 ch_mode_5[7][2][16] = { { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} }, { {210429, 0x6b, 0x31, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31}, {216000, 0x6b, 0x22, 0x42, 0x74, 0x42, 0x58, 0x86, 0x44, 0x64, 0x68, 0x70, 0x20, 0x87, 0x10, 0x31} } }; u32 ch_mode_6[7][113][21] = { { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x68, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} }, { {93143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {99143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {105143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {111143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {117143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {123143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {129143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {135143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {141143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {147143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {153143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {159143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {167143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {173143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {179143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {185143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {191143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {195143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {201143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {207143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {213143, 0x6b, 0x31, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {219143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {225143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {231143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {237143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {243143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {249143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {255143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {261143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {267143, 0x6b, 0x22, 0x26, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x64, 0x90, 0x30, 0x20, 0x87, 0x10, 0x31, 0x14, 0x0f, 0x0e, 0x0b}, {273143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {279143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {285143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {291143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {297143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {303143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {309143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {315143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {321143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {327143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {333143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {339143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {345143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {351143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {357143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {363143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {369143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {375143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {381143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {387143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {393143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {399143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {405143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {411143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {417143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {423143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {429143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {435143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {441143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {447143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {453143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {459143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {465143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {473143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {479143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {485143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {491143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {497143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {503143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {509143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x40, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {515143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {521143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {527143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {533143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {539143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {545143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {551143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {557143, 0x6F, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x10, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {563143, 0x70, 0x21, 0x46, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x00, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {569143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {575143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {581143, 0x70, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {587143, 0x71, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {593143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {599143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x60, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {605143, 0x72, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {611143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {617143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {623143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {629143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {635143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x30, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {641143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {647143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {653143, 0x74, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {659143, 0x73, 0x21, 0x36, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {665143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {671143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {677143, 0x74, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {683143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {689143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {695143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {701143, 0x73, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {707143, 0x72, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {713143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {719143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {725143, 0x71, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x20, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {731143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {737143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {743143, 0x70, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {749143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {755143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {761143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x70, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b}, {767143, 0x6F, 0x21, 0x56, 0x42, 0x74, 0x42, 0xf8, 0x86, 0x44, 0x75, 0x90, 0x50, 0x1a, 0x80, 0x10, 0x31, 0x18, 0x0c, 0x0e, 0x0b} } };
gpl-2.0
djvoleur/G_N92XP-R4_AOJ6
fs/libfs.c
1699
26346
/* * fs/libfs.c * Library for filesystems writers. */ #include <linux/export.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/vfs.h> #include <linux/quotaops.h> #include <linux/mutex.h> #include <linux/exportfs.h> #include <linux/writeback.h> #include <linux/buffer_head.h> /* sync_mapping_buffers */ #include <asm/uaccess.h> #include "internal.h" static inline int simple_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); } int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; generic_fillattr(inode, stat); stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); return 0; } int simple_statfs(struct dentry *dentry, struct kstatfs *buf) { buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = PAGE_CACHE_SIZE; buf->f_namelen = NAME_MAX; return 0; } /* * Retaining negative dentries for an in-memory filesystem just wastes * memory and lookup time: arrange for them to be deleted immediately. */ static int simple_delete_dentry(const struct dentry *dentry) { return 1; } /* * Lookup the data. This is trivial - if the dentry didn't already * exist, we know it is negative. Set d_op to delete negative dentries. */ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { static const struct dentry_operations simple_dentry_operations = { .d_delete = simple_delete_dentry, }; if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); d_set_d_op(dentry, &simple_dentry_operations); d_add(dentry, NULL); return NULL; } int dcache_dir_open(struct inode *inode, struct file *file) { static struct qstr cursor_name = QSTR_INIT(".", 1); file->private_data = d_alloc(file->f_path.dentry, &cursor_name); return file->private_data ? 0 : -ENOMEM; } int dcache_dir_close(struct inode *inode, struct file *file) { dput(file->private_data); return 0; } loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) { struct dentry *dentry = file->f_path.dentry; mutex_lock(&dentry->d_inode->i_mutex); switch (whence) { case 1: offset += file->f_pos; case 0: if (offset >= 0) break; default: mutex_unlock(&dentry->d_inode->i_mutex); return -EINVAL; } if (offset != file->f_pos) { file->f_pos = offset; if (file->f_pos >= 2) { struct list_head *p; struct dentry *cursor = file->private_data; loff_t n = file->f_pos - 2; spin_lock(&dentry->d_lock); /* d_lock not required for cursor */ list_del(&cursor->d_u.d_child); p = dentry->d_subdirs.next; while (n && p != &dentry->d_subdirs) { struct dentry *next; next = list_entry(p, struct dentry, d_u.d_child); spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); if (simple_positive(next)) n--; spin_unlock(&next->d_lock); p = p->next; } list_add_tail(&cursor->d_u.d_child, p); spin_unlock(&dentry->d_lock); } } mutex_unlock(&dentry->d_inode->i_mutex); return offset; } /* Relationship between i_mode and the DT_xxx types */ static inline unsigned char dt_type(struct inode *inode) { return (inode->i_mode >> 12) & 15; } /* * Directory is locked and all positive dentries in it are safe, since * for ramfs-type trees they can't go away without unlink() or rmdir(), * both impossible due to the lock on directory. */ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct dentry *dentry = filp->f_path.dentry; struct dentry *cursor = filp->private_data; struct list_head *p, *q = &cursor->d_u.d_child; ino_t ino; int i = filp->f_pos; switch (i) { case 0: ino = dentry->d_inode->i_ino; if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) break; filp->f_pos++; i++; /* fallthrough */ case 1: ino = parent_ino(dentry); if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) break; filp->f_pos++; i++; /* fallthrough */ default: spin_lock(&dentry->d_lock); if (filp->f_pos == 2) list_move(q, &dentry->d_subdirs); for (p=q->next; p != &dentry->d_subdirs; p=p->next) { struct dentry *next; next = list_entry(p, struct dentry, d_u.d_child); spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); if (!simple_positive(next)) { spin_unlock(&next->d_lock); continue; } spin_unlock(&next->d_lock); spin_unlock(&dentry->d_lock); if (filldir(dirent, next->d_name.name, next->d_name.len, filp->f_pos, next->d_inode->i_ino, dt_type(next->d_inode)) < 0) return 0; spin_lock(&dentry->d_lock); spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); /* next is still alive */ list_move(q, p); spin_unlock(&next->d_lock); p = q; filp->f_pos++; } spin_unlock(&dentry->d_lock); } return 0; } ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos) { return -EISDIR; } const struct file_operations simple_dir_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, .readdir = dcache_readdir, .fsync = noop_fsync, }; const struct inode_operations simple_dir_inode_operations = { .lookup = simple_lookup, }; static const struct super_operations simple_super_operations = { .statfs = simple_statfs, }; /* * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that * will never be mountable) */ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, const struct super_operations *ops, const struct dentry_operations *dops, unsigned long magic) { struct super_block *s; struct dentry *dentry; struct inode *root; struct qstr d_name = QSTR_INIT(name, strlen(name)); s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL); if (IS_ERR(s)) return ERR_CAST(s); s->s_maxbytes = MAX_LFS_FILESIZE; s->s_blocksize = PAGE_SIZE; s->s_blocksize_bits = PAGE_SHIFT; s->s_magic = magic; s->s_op = ops ? ops : &simple_super_operations; s->s_time_gran = 1; root = new_inode(s); if (!root) goto Enomem; /* * since this is the first inode, make it number 1. New inodes created * after this must take care not to collide with it (by passing * max_reserved of 1 to iunique). */ root->i_ino = 1; root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; dentry = __d_alloc(s, &d_name); if (!dentry) { iput(root); goto Enomem; } d_instantiate(dentry, root); s->s_root = dentry; s->s_d_op = dops; s->s_flags |= MS_ACTIVE; return dget(s->s_root); Enomem: deactivate_locked_super(s); return ERR_PTR(-ENOMEM); } int simple_open(struct inode *inode, struct file *file) { if (inode->i_private) file->private_data = inode->i_private; return 0; } int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; inc_nlink(inode); ihold(inode); dget(dentry); d_instantiate(dentry, inode); return 0; } int simple_empty(struct dentry *dentry) { struct dentry *child; int ret = 0; spin_lock(&dentry->d_lock); list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) { spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (simple_positive(child)) { spin_unlock(&child->d_lock); goto out; } spin_unlock(&child->d_lock); } ret = 1; out: spin_unlock(&dentry->d_lock); return ret; } int simple_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; drop_nlink(inode); dput(dentry); return 0; } int simple_rmdir(struct inode *dir, struct dentry *dentry) { if (!simple_empty(dentry)) return -ENOTEMPTY; drop_nlink(dentry->d_inode); simple_unlink(dir, dentry); drop_nlink(dir); return 0; } int simple_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); if (!simple_empty(new_dentry)) return -ENOTEMPTY; if (new_dentry->d_inode) { simple_unlink(new_dir, new_dentry); if (they_are_dirs) { drop_nlink(new_dentry->d_inode); drop_nlink(old_dir); } } else if (they_are_dirs) { drop_nlink(old_dir); inc_nlink(new_dir); } old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = new_dir->i_mtime = inode->i_ctime = CURRENT_TIME; return 0; } /** * simple_setattr - setattr for simple filesystem * @dentry: dentry * @iattr: iattr structure * * Returns 0 on success, -error on failure. * * simple_setattr is a simple ->setattr implementation without a proper * implementation of size changes. * * It can either be used for in-memory filesystems or special files * on simple regular filesystems. Anything that needs to change on-disk * or wire state on size changes needs its own setattr method. */ int simple_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; int error; error = inode_change_ok(inode, iattr); if (error) return error; if (iattr->ia_valid & ATTR_SIZE) truncate_setsize(inode, iattr->ia_size); setattr_copy(inode, iattr); mark_inode_dirty(inode); return 0; } EXPORT_SYMBOL(simple_setattr); int simple_readpage(struct file *file, struct page *page) { clear_highpage(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); return 0; } int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *page; pgoff_t index; index = pos >> PAGE_CACHE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { unsigned from = pos & (PAGE_CACHE_SIZE - 1); zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); } return 0; } /** * simple_write_end - .write_end helper for non-block-device FSes * @available: See .write_end of address_space_operations * @file: " * @mapping: " * @pos: " * @len: " * @copied: " * @page: " * @fsdata: " * * simple_write_end does the minimum needed for updating a page after writing is * done. It has the same API signature as the .write_end of * address_space_operations vector. So it can just be set onto .write_end for * FSes that don't need any other processing. i_mutex is assumed to be held. * Block based filesystems should use generic_write_end(). * NOTE: Even though i_size might get updated by this function, mark_inode_dirty * is not called, so a filesystem that actually does store data in .write_inode * should extend on what's done here with a call to mark_inode_dirty() in the * case that i_size has changed. */ int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; loff_t last_pos = pos + copied; /* zero the stale part of the page if we did a short copy */ if (copied < len) { unsigned from = pos & (PAGE_CACHE_SIZE - 1); zero_user(page, from + copied, len - copied); } if (!PageUptodate(page)) SetPageUptodate(page); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. */ if (last_pos > inode->i_size) i_size_write(inode, last_pos); set_page_dirty(page); unlock_page(page); page_cache_release(page); return copied; } /* * the inodes created here are not hashed. If you use iunique to generate * unique inode values later for this filesystem, then you must take care * to pass it an appropriate max_reserved value to avoid collisions. */ int simple_fill_super(struct super_block *s, unsigned long magic, struct tree_descr *files) { struct inode *inode; struct dentry *root; struct dentry *dentry; int i; s->s_blocksize = PAGE_CACHE_SIZE; s->s_blocksize_bits = PAGE_CACHE_SHIFT; s->s_magic = magic; s->s_op = &simple_super_operations; s->s_time_gran = 1; inode = new_inode(s); if (!inode) return -ENOMEM; /* * because the root inode is 1, the files array must not contain an * entry at index 1 */ inode->i_ino = 1; inode->i_mode = S_IFDIR | 0755; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; set_nlink(inode, 2); root = d_make_root(inode); if (!root) return -ENOMEM; for (i = 0; !files->name || files->name[0]; i++, files++) { if (!files->name) continue; /* warn if it tries to conflict with the root inode */ if (unlikely(i == 1)) printk(KERN_WARNING "%s: %s passed in a files array" "with an index of 1!\n", __func__, s->s_type->name); dentry = d_alloc_name(root, files->name); if (!dentry) goto out; inode = new_inode(s); if (!inode) { dput(dentry); goto out; } inode->i_mode = S_IFREG | files->mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_fop = files->ops; inode->i_ino = i; d_add(dentry, inode); } s->s_root = root; return 0; out: d_genocide(root); shrink_dcache_parent(root); dput(root); return -ENOMEM; } static DEFINE_SPINLOCK(pin_fs_lock); int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count) { struct vfsmount *mnt = NULL; spin_lock(&pin_fs_lock); if (unlikely(!*mount)) { spin_unlock(&pin_fs_lock); mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL); if (IS_ERR(mnt)) return PTR_ERR(mnt); spin_lock(&pin_fs_lock); if (!*mount) *mount = mnt; } mntget(*mount); ++*count; spin_unlock(&pin_fs_lock); mntput(mnt); return 0; } void simple_release_fs(struct vfsmount **mount, int *count) { struct vfsmount *mnt; spin_lock(&pin_fs_lock); mnt = *mount; if (!--*count) *mount = NULL; spin_unlock(&pin_fs_lock); mntput(mnt); } /** * simple_read_from_buffer - copy data from the buffer to user space * @to: the user space buffer to read to * @count: the maximum number of bytes to read * @ppos: the current position in the buffer * @from: the buffer to read from * @available: the size of the buffer * * The simple_read_from_buffer() function reads up to @count bytes from the * buffer @from at offset @ppos into the user space address starting at @to. * * On success, the number of bytes read is returned and the offset @ppos is * advanced by this number, or negative value is returned on error. **/ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available) { loff_t pos = *ppos; size_t ret; if (pos < 0) return -EINVAL; if (pos >= available || !count) return 0; if (count > available - pos) count = available - pos; ret = copy_to_user(to, from + pos, count); if (ret == count) return -EFAULT; count -= ret; *ppos = pos + count; return count; } /** * simple_write_to_buffer - copy data from user space to the buffer * @to: the buffer to write to * @available: the size of the buffer * @ppos: the current position in the buffer * @from: the user space buffer to read from * @count: the maximum number of bytes to read * * The simple_write_to_buffer() function reads up to @count bytes from the user * space address starting at @from into the buffer @to at offset @ppos. * * On success, the number of bytes written is returned and the offset @ppos is * advanced by this number, or negative value is returned on error. **/ ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count) { loff_t pos = *ppos; size_t res; if (pos < 0) return -EINVAL; if (pos >= available || !count) return 0; if (count > available - pos) count = available - pos; res = copy_from_user(to + pos, from, count); if (res == count) return -EFAULT; count -= res; *ppos = pos + count; return count; } /** * memory_read_from_buffer - copy data from the buffer * @to: the kernel space buffer to read to * @count: the maximum number of bytes to read * @ppos: the current position in the buffer * @from: the buffer to read from * @available: the size of the buffer * * The memory_read_from_buffer() function reads up to @count bytes from the * buffer @from at offset @ppos into the kernel space address starting at @to. * * On success, the number of bytes read is returned and the offset @ppos is * advanced by this number, or negative value is returned on error. **/ ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, const void *from, size_t available) { loff_t pos = *ppos; if (pos < 0) return -EINVAL; if (pos >= available) return 0; if (count > available - pos) count = available - pos; memcpy(to, from + pos, count); *ppos = pos + count; return count; } /* * Transaction based IO. * The file expects a single write which triggers the transaction, and then * possibly a read which collects the result - which is stored in a * file-local buffer. */ void simple_transaction_set(struct file *file, size_t n) { struct simple_transaction_argresp *ar = file->private_data; BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); /* * The barrier ensures that ar->size will really remain zero until * ar->data is ready for reading. */ smp_mb(); ar->size = n; } char *simple_transaction_get(struct file *file, const char __user *buf, size_t size) { struct simple_transaction_argresp *ar; static DEFINE_SPINLOCK(simple_transaction_lock); if (size > SIMPLE_TRANSACTION_LIMIT - 1) return ERR_PTR(-EFBIG); ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL); if (!ar) return ERR_PTR(-ENOMEM); spin_lock(&simple_transaction_lock); /* only one write allowed per open */ if (file->private_data) { spin_unlock(&simple_transaction_lock); free_page((unsigned long)ar); return ERR_PTR(-EBUSY); } file->private_data = ar; spin_unlock(&simple_transaction_lock); if (copy_from_user(ar->data, buf, size)) return ERR_PTR(-EFAULT); return ar->data; } ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) { struct simple_transaction_argresp *ar = file->private_data; if (!ar) return 0; return simple_read_from_buffer(buf, size, pos, ar->data, ar->size); } int simple_transaction_release(struct inode *inode, struct file *file) { free_page((unsigned long)file->private_data); return 0; } /* Simple attribute files */ struct simple_attr { int (*get)(void *, u64 *); int (*set)(void *, u64); char get_buf[24]; /* enough to store a u64 and "\n\0" */ char set_buf[24]; void *data; const char *fmt; /* format for read operation */ struct mutex mutex; /* protects access to these buffers */ }; /* simple_attr_open is called by an actual attribute open file operation * to set the attribute specific access operations. */ int simple_attr_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) { struct simple_attr *attr; attr = kmalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return -ENOMEM; attr->get = get; attr->set = set; attr->data = inode->i_private; attr->fmt = fmt; mutex_init(&attr->mutex); file->private_data = attr; return nonseekable_open(inode, file); } int simple_attr_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } /* read from the buffer that is filled with the get function */ ssize_t simple_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct simple_attr *attr; size_t size; ssize_t ret; attr = file->private_data; if (!attr->get) return -EACCES; ret = mutex_lock_interruptible(&attr->mutex); if (ret) return ret; if (*ppos) { /* continued read */ size = strlen(attr->get_buf); } else { /* first read */ u64 val; ret = attr->get(attr->data, &val); if (ret) goto out; size = scnprintf(attr->get_buf, sizeof(attr->get_buf), attr->fmt, (unsigned long long)val); } ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); out: mutex_unlock(&attr->mutex); return ret; } /* interpret the buffer as a number to call the set function with */ ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct simple_attr *attr; u64 val; size_t size; ssize_t ret; attr = file->private_data; if (!attr->set) return -EACCES; ret = mutex_lock_interruptible(&attr->mutex); if (ret) return ret; ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) goto out; attr->set_buf[size] = '\0'; val = simple_strtoll(attr->set_buf, NULL, 0); ret = attr->set(attr->data, val); if (ret == 0) ret = len; /* on success, claim we got the whole input */ out: mutex_unlock(&attr->mutex); return ret; } /** * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation * @sb: filesystem to do the file handle conversion on * @fid: file handle to convert * @fh_len: length of the file handle in bytes * @fh_type: type of file handle * @get_inode: filesystem callback to retrieve inode * * This function decodes @fid as long as it has one of the well-known * Linux filehandle types and calls @get_inode on it to retrieve the * inode for the object specified in the file handle. */ struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)) { struct inode *inode = NULL; if (fh_len < 2) return NULL; switch (fh_type) { case FILEID_INO32_GEN: case FILEID_INO32_GEN_PARENT: inode = get_inode(sb, fid->i32.ino, fid->i32.gen); break; } return d_obtain_alias(inode); } EXPORT_SYMBOL_GPL(generic_fh_to_dentry); /** * generic_fh_to_parent - generic helper for the fh_to_parent export operation * @sb: filesystem to do the file handle conversion on * @fid: file handle to convert * @fh_len: length of the file handle in bytes * @fh_type: type of file handle * @get_inode: filesystem callback to retrieve inode * * This function decodes @fid as long as it has one of the well-known * Linux filehandle types and calls @get_inode on it to retrieve the * inode for the _parent_ object specified in the file handle if it * is specified in the file handle, or NULL otherwise. */ struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)) { struct inode *inode = NULL; if (fh_len <= 2) return NULL; switch (fh_type) { case FILEID_INO32_GEN_PARENT: inode = get_inode(sb, fid->i32.parent_ino, (fh_len > 3 ? fid->i32.parent_gen : 0)); break; } return d_obtain_alias(inode); } EXPORT_SYMBOL_GPL(generic_fh_to_parent); /** * generic_file_fsync - generic fsync implementation for simple filesystems * @file: file to synchronize * @datasync: only synchronize essential metadata if true * * This is a generic implementation of the fsync method for simple * filesystems which track all non-inode metadata in the buffers list * hanging off the address_space structure. */ int generic_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int err; int ret; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); ret = sync_mapping_buffers(inode->i_mapping); if (!(inode->i_state & I_DIRTY)) goto out; if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) goto out; err = sync_inode_metadata(inode, 1); if (ret == 0) ret = err; out: mutex_unlock(&inode->i_mutex); return ret; } EXPORT_SYMBOL(generic_file_fsync); /** * generic_check_addressable - Check addressability of file system * @blocksize_bits: log of file system block size * @num_blocks: number of blocks in file system * * Determine whether a file system with @num_blocks blocks (and a * block size of 2**@blocksize_bits) is addressable by the sector_t * and page cache of the system. Return 0 if so and -EFBIG otherwise. */ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) { u64 last_fs_block = num_blocks - 1; u64 last_fs_page = last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); if (unlikely(num_blocks == 0)) return 0; if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) return -EINVAL; if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || (last_fs_page > (pgoff_t)(~0ULL))) { return -EFBIG; } return 0; } EXPORT_SYMBOL(generic_check_addressable); /* * No-op implementation of ->fsync for in-memory filesystems. */ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync) { return 0; } EXPORT_SYMBOL(dcache_dir_close); EXPORT_SYMBOL(dcache_dir_lseek); EXPORT_SYMBOL(dcache_dir_open); EXPORT_SYMBOL(dcache_readdir); EXPORT_SYMBOL(generic_read_dir); EXPORT_SYMBOL(mount_pseudo); EXPORT_SYMBOL(simple_write_begin); EXPORT_SYMBOL(simple_write_end); EXPORT_SYMBOL(simple_dir_inode_operations); EXPORT_SYMBOL(simple_dir_operations); EXPORT_SYMBOL(simple_empty); EXPORT_SYMBOL(simple_fill_super); EXPORT_SYMBOL(simple_getattr); EXPORT_SYMBOL(simple_open); EXPORT_SYMBOL(simple_link); EXPORT_SYMBOL(simple_lookup); EXPORT_SYMBOL(simple_pin_fs); EXPORT_SYMBOL(simple_readpage); EXPORT_SYMBOL(simple_release_fs); EXPORT_SYMBOL(simple_rename); EXPORT_SYMBOL(simple_rmdir); EXPORT_SYMBOL(simple_statfs); EXPORT_SYMBOL(noop_fsync); EXPORT_SYMBOL(simple_unlink); EXPORT_SYMBOL(simple_read_from_buffer); EXPORT_SYMBOL(simple_write_to_buffer); EXPORT_SYMBOL(memory_read_from_buffer); EXPORT_SYMBOL(simple_transaction_set); EXPORT_SYMBOL(simple_transaction_get); EXPORT_SYMBOL(simple_transaction_read); EXPORT_SYMBOL(simple_transaction_release); EXPORT_SYMBOL_GPL(simple_attr_open); EXPORT_SYMBOL_GPL(simple_attr_release); EXPORT_SYMBOL_GPL(simple_attr_read); EXPORT_SYMBOL_GPL(simple_attr_write);
gpl-2.0
ywzjackal/dmmu_linux
arch/arm/mach-shmobile/pm-r8a7779.c
2723
5688
/* * r8a7779 Power management support * * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2011 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/err.h> #include <linux/pm_clock.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/console.h> #include <asm/io.h> #include <mach/common.h> #include <mach/r8a7779.h> static void __iomem *r8a7779_sysc_base; /* SYSC */ #define SYSCSR 0x00 #define SYSCISR 0x04 #define SYSCISCR 0x08 #define SYSCIER 0x0c #define SYSCIMR 0x10 #define PWRSR0 0x40 #define PWRSR1 0x80 #define PWRSR2 0xc0 #define PWRSR3 0x100 #define PWRSR4 0x140 #define PWRSR_OFFS 0x00 #define PWROFFCR_OFFS 0x04 #define PWRONCR_OFFS 0x0c #define PWRER_OFFS 0x14 #define SYSCSR_RETRIES 100 #define SYSCSR_DELAY_US 1 #define SYSCISR_RETRIES 1000 #define SYSCISR_DELAY_US 1 #if defined(CONFIG_PM) || defined(CONFIG_SMP) static DEFINE_SPINLOCK(r8a7779_sysc_lock); /* SMP CPUs + I/O devices */ static int r8a7779_sysc_pwr_on_off(struct r8a7779_pm_ch *r8a7779_ch, int sr_bit, int reg_offs) { int k; for (k = 0; k < SYSCSR_RETRIES; k++) { if (ioread32(r8a7779_sysc_base + SYSCSR) & (1 << sr_bit)) break; udelay(SYSCSR_DELAY_US); } if (k == SYSCSR_RETRIES) return -EAGAIN; iowrite32(1 << r8a7779_ch->chan_bit, r8a7779_sysc_base + r8a7779_ch->chan_offs + reg_offs); return 0; } static int r8a7779_sysc_pwr_off(struct r8a7779_pm_ch *r8a7779_ch) { return r8a7779_sysc_pwr_on_off(r8a7779_ch, 0, PWROFFCR_OFFS); } static int r8a7779_sysc_pwr_on(struct r8a7779_pm_ch *r8a7779_ch) { return r8a7779_sysc_pwr_on_off(r8a7779_ch, 1, PWRONCR_OFFS); } static int r8a7779_sysc_update(struct r8a7779_pm_ch *r8a7779_ch, int (*on_off_fn)(struct r8a7779_pm_ch *)) { unsigned int isr_mask = 1 << r8a7779_ch->isr_bit; unsigned int chan_mask = 1 << r8a7779_ch->chan_bit; unsigned int status; unsigned long flags; int ret = 0; int k; spin_lock_irqsave(&r8a7779_sysc_lock, flags); iowrite32(isr_mask, r8a7779_sysc_base + SYSCISCR); do { ret = on_off_fn(r8a7779_ch); if (ret) goto out; status = ioread32(r8a7779_sysc_base + r8a7779_ch->chan_offs + PWRER_OFFS); } while (status & chan_mask); for (k = 0; k < SYSCISR_RETRIES; k++) { if (ioread32(r8a7779_sysc_base + SYSCISR) & isr_mask) break; udelay(SYSCISR_DELAY_US); } if (k == SYSCISR_RETRIES) ret = -EIO; iowrite32(isr_mask, r8a7779_sysc_base + SYSCISCR); out: spin_unlock_irqrestore(&r8a7779_sysc_lock, flags); pr_debug("r8a7779 power domain %d: %02x %02x %02x %02x %02x -> %d\n", r8a7779_ch->isr_bit, ioread32(r8a7779_sysc_base + PWRSR0), ioread32(r8a7779_sysc_base + PWRSR1), ioread32(r8a7779_sysc_base + PWRSR2), ioread32(r8a7779_sysc_base + PWRSR3), ioread32(r8a7779_sysc_base + PWRSR4), ret); return ret; } int r8a7779_sysc_power_down(struct r8a7779_pm_ch *r8a7779_ch) { return r8a7779_sysc_update(r8a7779_ch, r8a7779_sysc_pwr_off); } int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch) { return r8a7779_sysc_update(r8a7779_ch, r8a7779_sysc_pwr_on); } static void __init r8a7779_sysc_init(void) { r8a7779_sysc_base = ioremap_nocache(0xffd85000, PAGE_SIZE); if (!r8a7779_sysc_base) panic("unable to ioremap r8a7779 SYSC hardware block\n"); /* enable all interrupt sources, but do not use interrupt handler */ iowrite32(0x0131000e, r8a7779_sysc_base + SYSCIER); iowrite32(0, r8a7779_sysc_base + SYSCIMR); } #else /* CONFIG_PM || CONFIG_SMP */ static inline void r8a7779_sysc_init(void) {} #endif /* CONFIG_PM || CONFIG_SMP */ #ifdef CONFIG_PM static int pd_power_down(struct generic_pm_domain *genpd) { return r8a7779_sysc_power_down(to_r8a7779_ch(genpd)); } static int pd_power_up(struct generic_pm_domain *genpd) { return r8a7779_sysc_power_up(to_r8a7779_ch(genpd)); } static bool pd_is_off(struct generic_pm_domain *genpd) { struct r8a7779_pm_ch *r8a7779_ch = to_r8a7779_ch(genpd); unsigned int st; st = ioread32(r8a7779_sysc_base + r8a7779_ch->chan_offs + PWRSR_OFFS); if (st & (1 << r8a7779_ch->chan_bit)) return true; return false; } static bool pd_active_wakeup(struct device *dev) { return true; } static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) { struct generic_pm_domain *genpd = &r8a7779_pd->genpd; pm_genpd_init(genpd, NULL, false); genpd->dev_ops.stop = pm_clk_suspend; genpd->dev_ops.start = pm_clk_resume; genpd->dev_ops.active_wakeup = pd_active_wakeup; genpd->dev_irq_safe = true; genpd->power_off = pd_power_down; genpd->power_on = pd_power_up; if (pd_is_off(&r8a7779_pd->genpd)) pd_power_up(&r8a7779_pd->genpd); } static struct r8a7779_pm_domain r8a7779_pm_domains[] = { { .genpd.name = "SH4A", .ch = { .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */ .isr_bit = 16, /* SH4A */ }, }, { .genpd.name = "SGX", .ch = { .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */ .isr_bit = 20, /* SGX */ }, }, { .genpd.name = "VDP1", .ch = { .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */ .isr_bit = 21, /* VDP */ }, }, { .genpd.name = "IMPX3", .ch = { .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */ .isr_bit = 24, /* IMP */ }, }, }; void __init r8a7779_init_pm_domains(void) { int j; for (j = 0; j < ARRAY_SIZE(r8a7779_pm_domains); j++) r8a7779_init_pm_domain(&r8a7779_pm_domains[j]); } #endif /* CONFIG_PM */ void __init r8a7779_pm_init(void) { static int once; if (!once++) r8a7779_sysc_init(); }
gpl-2.0
CyanogenMod/sony-kernel-msm8660
drivers/edac/i3200_edac.c
2979
12645
/* * Intel 3200/3210 Memory Controller kernel module * Copyright (C) 2008-2009 Akamai Technologies, Inc. * Portions by Hitoshi Mitake <h.mitake@gmail.com>. * * This file may be distributed under the terms of the * GNU General Public License. */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include <linux/io.h> #include "edac_core.h" #define I3200_REVISION "1.1" #define EDAC_MOD_STR "i3200_edac" #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 #define I3200_RANKS 8 #define I3200_RANKS_PER_CHANNEL 4 #define I3200_CHANNELS 2 /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */ #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ #define I3200_MCHBAR_HIGH 0x4c #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ #define I3200_MMR_WINDOW_SIZE 16384 #define I3200_TOM 0xa0 /* Top of Memory (16b) * * 15:10 reserved * 9:0 total populated physical memory */ #define I3200_TOM_MASK 0x3ff /* bits 9:0 */ #define I3200_TOM_SHIFT 26 /* 64MiB grain */ #define I3200_ERRSTS 0xc8 /* Error Status Register (16b) * * 15 reserved * 14 Isochronous TBWRR Run Behind FIFO Full * (ITCV) * 13 Isochronous TBWRR Run Behind FIFO Put * (ITSTV) * 12 reserved * 11 MCH Thermal Sensor Event * for SMI/SCI/SERR (GTSE) * 10 reserved * 9 LOCK to non-DRAM Memory Flag (LCKF) * 8 reserved * 7 DRAM Throttle Flag (DTF) * 6:2 reserved * 1 Multi-bit DRAM ECC Error Flag (DMERR) * 0 Single-bit DRAM ECC Error Flag (DSERR) */ #define I3200_ERRSTS_UE 0x0002 #define I3200_ERRSTS_CE 0x0001 #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE) /* Intel MMIO register space - device 0 function 0 - MMR space */ #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) * * 15:10 reserved * 9:0 Channel 0 DRAM Rank Boundary Address */ #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ #define I3200_DRB_MASK 0x3ff /* bits 9:0 */ #define I3200_DRB_SHIFT 26 /* 64MiB grain */ #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) * * 63:48 Error Column Address (ERRCOL) * 47:32 Error Row Address (ERRROW) * 31:29 Error Bank Address (ERRBANK) * 28:27 Error Rank Address (ERRRANK) * 26:24 reserved * 23:16 Error Syndrome (ERRSYND) * 15: 2 reserved * 1 Multiple Bit Error Status (MERRSTS) * 0 Correctable Error Status (CERRSTS) */ #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */ #define I3200_ECCERRLOG_CE 0x1 #define I3200_ECCERRLOG_UE 0x2 #define I3200_ECCERRLOG_RANK_BITS 0x18000000 #define I3200_ECCERRLOG_RANK_SHIFT 27 #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000 #define I3200_ECCERRLOG_SYNDROME_SHIFT 16 #define I3200_CAPID0 0xe0 /* P.95 of spec for details */ struct i3200_priv { void __iomem *window; }; static int nr_channels; #ifndef readq static inline __u64 readq(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; low = readl(p); high = readl(p + 1); return low + ((u64)high << 32); } #endif static int how_many_channels(struct pci_dev *pdev) { unsigned char capid0_8b; /* 8th byte of CAPID0 */ pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ debugf0("In single channel mode.\n"); return 1; } else { debugf0("In dual channel mode.\n"); return 2; } } static unsigned long eccerrlog_syndrome(u64 log) { return (log & I3200_ECCERRLOG_SYNDROME_BITS) >> I3200_ECCERRLOG_SYNDROME_SHIFT; } static int eccerrlog_row(int channel, u64 log) { u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >> I3200_ECCERRLOG_RANK_SHIFT); return rank | (channel * I3200_RANKS_PER_CHANNEL); } enum i3200_chips { I3200 = 0, }; struct i3200_dev_info { const char *ctl_name; }; struct i3200_error_info { u16 errsts; u16 errsts2; u64 eccerrlog[I3200_CHANNELS]; }; static const struct i3200_dev_info i3200_devs[] = { [I3200] = { .ctl_name = "i3200" }, }; static struct pci_dev *mci_pdev; static int i3200_registered = 1; static void i3200_clear_error_info(struct mem_ctl_info *mci) { struct pci_dev *pdev; pdev = to_pci_dev(mci->dev); /* * Clear any error bits. * (Yes, we really clear bits by writing 1 to them.) */ pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS, I3200_ERRSTS_BITS); } static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, struct i3200_error_info *info) { struct pci_dev *pdev; struct i3200_priv *priv = mci->pvt_info; void __iomem *window = priv->window; pdev = to_pci_dev(mci->dev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts); if (!(info->errsts & I3200_ERRSTS_BITS)) return; info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); if (nr_channels == 2) info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2); /* * If the error is the same for both reads then the first set * of reads is valid. If there is a change then there is a CE * with no info and the second set of reads is valid and * should be UE info. */ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); if (nr_channels == 2) info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); } i3200_clear_error_info(mci); } static void i3200_process_error_info(struct mem_ctl_info *mci, struct i3200_error_info *info) { int channel; u64 log; if (!(info->errsts & I3200_ERRSTS_BITS)) return; if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); info->errsts = info->errsts2; } for (channel = 0; channel < nr_channels; channel++) { log = info->eccerrlog[channel]; if (log & I3200_ECCERRLOG_UE) { edac_mc_handle_ue(mci, 0, 0, eccerrlog_row(channel, log), "i3200 UE"); } else if (log & I3200_ECCERRLOG_CE) { edac_mc_handle_ce(mci, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), 0, "i3200 CE"); } } } static void i3200_check(struct mem_ctl_info *mci) { struct i3200_error_info info; debugf1("MC%d: %s()\n", mci->mc_idx, __func__); i3200_get_and_clear_error_info(mci, &info); i3200_process_error_info(mci, &info); } void __iomem *i3200_map_mchbar(struct pci_dev *pdev) { union { u64 mchbar; struct { u32 mchbar_low; u32 mchbar_high; }; } u; void __iomem *window; pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low); pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high); u.mchbar &= I3200_MCHBAR_MASK; if (u.mchbar != (resource_size_t)u.mchbar) { printk(KERN_ERR "i3200: mmio space beyond accessible range (0x%llx)\n", (unsigned long long)u.mchbar); return NULL; } window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); return window; } static void i3200_get_drbs(void __iomem *window, u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) { int i; for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) { drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK; drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK; } } static bool i3200_is_stacked(struct pci_dev *pdev, u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) { u16 tom; pci_read_config_word(pdev, I3200_TOM, &tom); tom &= I3200_TOM_MASK; return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom; } static unsigned long drb_to_nr_pages( u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked, int channel, int rank) { int n; n = drbs[channel][rank]; if (rank > 0) n -= drbs[channel][rank - 1]; if (stacked && (channel == 1) && drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1]) n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); return n; } static int i3200_probe1(struct pci_dev *pdev, int dev_idx) { int rc; int i; struct mem_ctl_info *mci = NULL; unsigned long last_page; u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; bool stacked; void __iomem *window; struct i3200_priv *priv; debugf0("MC: %s()\n", __func__); window = i3200_map_mchbar(pdev); if (!window) return -ENODEV; i3200_get_drbs(window, drbs); nr_channels = how_many_channels(pdev); mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS, nr_channels, 0); if (!mci) return -ENOMEM; debugf3("MC: %s(): init mci\n", __func__); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I3200_REVISION; mci->ctl_name = i3200_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i3200_check; mci->ctl_page_to_phys = NULL; priv = mci->pvt_info; priv->window = window; stacked = i3200_is_stacked(pdev, drbs); /* * The dram rank boundary (DRB) reg values are boundary addresses * for each DRAM rank with a granularity of 64MB. DRB regs are * cumulative; the last one will contain the total memory * contained in all ranks. */ last_page = -1UL; for (i = 0; i < mci->nr_csrows; i++) { unsigned long nr_pages; struct csrow_info *csrow = &mci->csrows[i]; nr_pages = drb_to_nr_pages(drbs, stacked, i / I3200_RANKS_PER_CHANNEL, i % I3200_RANKS_PER_CHANNEL); if (nr_pages == 0) { csrow->mtype = MEM_EMPTY; continue; } csrow->first_page = last_page + 1; last_page += nr_pages; csrow->last_page = last_page; csrow->nr_pages = nr_pages; csrow->grain = nr_pages << PAGE_SHIFT; csrow->mtype = MEM_DDR2; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = EDAC_UNKNOWN; } i3200_clear_error_info(mci); rc = -ENODEV; if (edac_mc_add_mc(mci)) { debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* get this far and it's successful */ debugf3("MC: %s(): success\n", __func__); return 0; fail: iounmap(window); if (mci) edac_mc_free(mci); return rc; } static int __devinit i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; debugf0("MC: %s()\n", __func__); if (pci_enable_device(pdev) < 0) return -EIO; rc = i3200_probe1(pdev, ent->driver_data); if (!mci_pdev) mci_pdev = pci_dev_get(pdev); return rc; } static void __devexit i3200_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i3200_priv *priv; debugf0("%s()\n", __func__); mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; priv = mci->pvt_info; iounmap(priv->window); edac_mc_free(mci); } static const struct pci_device_id i3200_pci_tbl[] __devinitdata = { { PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I3200}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); static struct pci_driver i3200_driver = { .name = EDAC_MOD_STR, .probe = i3200_init_one, .remove = __devexit_p(i3200_remove_one), .id_table = i3200_pci_tbl, }; static int __init i3200_init(void) { int pci_rc; debugf3("MC: %s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i3200_driver); if (pci_rc < 0) goto fail0; if (!mci_pdev) { i3200_registered = 0; mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_3200_HB, NULL); if (!mci_pdev) { debugf0("i3200 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); if (pci_rc < 0) { debugf0("i3200 init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i3200_driver); fail0: if (mci_pdev) pci_dev_put(mci_pdev); return pci_rc; } static void __exit i3200_exit(void) { debugf3("MC: %s()\n", __func__); pci_unregister_driver(&i3200_driver); if (!i3200_registered) { i3200_remove_one(mci_pdev); pci_dev_put(mci_pdev); } } module_init(i3200_init); module_exit(i3200_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akamai Technologies, Inc."); MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
himalock/LGL2220d
net/ipv4/fib_rules.c
4515
6900
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 Forwarding Information Base: policy rules. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Thomas Graf <tgraf@suug.ch> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Rani Assaf : local_rule cannot be deleted * Marc Boucher : routing by fwmark */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <net/ip.h> #include <net/route.h> #include <net/tcp.h> #include <net/ip_fib.h> #include <net/fib_rules.h> struct fib4_rule { struct fib_rule common; u8 dst_len; u8 src_len; u8 tos; __be32 src; __be32 srcmask; __be32 dst; __be32 dstmask; #ifdef CONFIG_IP_ROUTE_CLASSID u32 tclassid; #endif }; #ifdef CONFIG_IP_ROUTE_CLASSID u32 fib_rules_tclass(const struct fib_result *res) { return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0; } #endif int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) { struct fib_lookup_arg arg = { .result = res, .flags = FIB_LOOKUP_NOREF, }; int err; err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg); res->r = arg.rule; return err; } EXPORT_SYMBOL_GPL(fib_lookup); static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) { int err = -EAGAIN; struct fib_table *tbl; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: err = -ENETUNREACH; goto errout; case FR_ACT_PROHIBIT: err = -EACCES; goto errout; case FR_ACT_BLACKHOLE: default: err = -EINVAL; goto errout; } tbl = fib_get_table(rule->fr_net, rule->table); if (!tbl) goto errout; err = fib_table_lookup(tbl, &flp->u.ip4, (struct fib_result *) arg->result, arg->flags); if (err > 0) err = -EAGAIN; errout: return err; } static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { struct fib4_rule *r = (struct fib4_rule *) rule; struct flowi4 *fl4 = &fl->u.ip4; __be32 daddr = fl4->daddr; __be32 saddr = fl4->saddr; if (((saddr ^ r->src) & r->srcmask) || ((daddr ^ r->dst) & r->dstmask)) return 0; if (r->tos && (r->tos != fl4->flowi4_tos)) return 0; return 1; } static struct fib_table *fib_empty_table(struct net *net) { u32 id; for (id = 1; id <= RT_TABLE_MAX; id++) if (fib_get_table(net, id) == NULL) return fib_new_table(net, id); return NULL; } static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { FRA_GENERIC_POLICY, [FRA_FLOW] = { .type = NLA_U32 }, }; static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) { struct net *net = sock_net(skb->sk); int err = -EINVAL; struct fib4_rule *rule4 = (struct fib4_rule *) rule; if (frh->tos & ~IPTOS_TOS_MASK) goto errout; if (rule->table == RT_TABLE_UNSPEC) { if (rule->action == FR_ACT_TO_TBL) { struct fib_table *table; table = fib_empty_table(net); if (table == NULL) { err = -ENOBUFS; goto errout; } rule->table = table->tb_id; } } if (frh->src_len) rule4->src = nla_get_be32(tb[FRA_SRC]); if (frh->dst_len) rule4->dst = nla_get_be32(tb[FRA_DST]); #ifdef CONFIG_IP_ROUTE_CLASSID if (tb[FRA_FLOW]) rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); #endif rule4->src_len = frh->src_len; rule4->srcmask = inet_make_mask(rule4->src_len); rule4->dst_len = frh->dst_len; rule4->dstmask = inet_make_mask(rule4->dst_len); rule4->tos = frh->tos; err = 0; errout: return err; } static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) { struct fib4_rule *rule4 = (struct fib4_rule *) rule; if (frh->src_len && (rule4->src_len != frh->src_len)) return 0; if (frh->dst_len && (rule4->dst_len != frh->dst_len)) return 0; if (frh->tos && (rule4->tos != frh->tos)) return 0; #ifdef CONFIG_IP_ROUTE_CLASSID if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) return 0; #endif if (frh->src_len && (rule4->src != nla_get_be32(tb[FRA_SRC]))) return 0; if (frh->dst_len && (rule4->dst != nla_get_be32(tb[FRA_DST]))) return 0; return 1; } static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { struct fib4_rule *rule4 = (struct fib4_rule *) rule; frh->dst_len = rule4->dst_len; frh->src_len = rule4->src_len; frh->tos = rule4->tos; if (rule4->dst_len) NLA_PUT_BE32(skb, FRA_DST, rule4->dst); if (rule4->src_len) NLA_PUT_BE32(skb, FRA_SRC, rule4->src); #ifdef CONFIG_IP_ROUTE_CLASSID if (rule4->tclassid) NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); #endif return 0; nla_put_failure: return -ENOBUFS; } static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) { return nla_total_size(4) /* dst */ + nla_total_size(4) /* src */ + nla_total_size(4); /* flow */ } static void fib4_rule_flush_cache(struct fib_rules_ops *ops) { rt_cache_flush(ops->fro_net, -1); } static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { .family = AF_INET, .rule_size = sizeof(struct fib4_rule), .addr_size = sizeof(u32), .action = fib4_rule_action, .match = fib4_rule_match, .configure = fib4_rule_configure, .compare = fib4_rule_compare, .fill = fib4_rule_fill, .default_pref = fib_default_rule_pref, .nlmsg_payload = fib4_rule_nlmsg_payload, .flush_cache = fib4_rule_flush_cache, .nlgroup = RTNLGRP_IPV4_RULE, .policy = fib4_rule_policy, .owner = THIS_MODULE, }; static int fib_default_rules_init(struct fib_rules_ops *ops) { int err; err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, 0); if (err < 0) return err; err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0); if (err < 0) return err; err = fib_default_rule_add(ops, 0x7FFF, RT_TABLE_DEFAULT, 0); if (err < 0) return err; return 0; } int __net_init fib4_rules_init(struct net *net) { int err; struct fib_rules_ops *ops; ops = fib_rules_register(&fib4_rules_ops_template, net); if (IS_ERR(ops)) return PTR_ERR(ops); err = fib_default_rules_init(ops); if (err < 0) goto fail; net->ipv4.rules_ops = ops; return 0; fail: /* also cleans all rules already added */ fib_rules_unregister(ops); return err; } void __net_exit fib4_rules_exit(struct net *net) { fib_rules_unregister(net->ipv4.rules_ops); }
gpl-2.0
sub77/kernel_samsung_matisse
arch/arm/mach-at91/irq.c
4771
6288
/* * linux/arch/arm/mach-at91/irq.c * * Copyright (C) 2004 SAN People * Copyright (C) 2004 ATMEL * Copyright (C) Rick Bronson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irqdomain.h> #include <linux/err.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> void __iomem *at91_aic_base; static struct irq_domain *at91_aic_domain; static struct device_node *at91_aic_np; static void at91_aic_mask_irq(struct irq_data *d) { /* Disable interrupt on AIC */ at91_aic_write(AT91_AIC_IDCR, 1 << d->hwirq); } static void at91_aic_unmask_irq(struct irq_data *d) { /* Enable interrupt on AIC */ at91_aic_write(AT91_AIC_IECR, 1 << d->hwirq); } unsigned int at91_extern_irq; #define is_extern_irq(hwirq) ((1 << (hwirq)) & at91_extern_irq) static int at91_aic_set_type(struct irq_data *d, unsigned type) { unsigned int smr, srctype; switch (type) { case IRQ_TYPE_LEVEL_HIGH: srctype = AT91_AIC_SRCTYPE_HIGH; break; case IRQ_TYPE_EDGE_RISING: srctype = AT91_AIC_SRCTYPE_RISING; break; case IRQ_TYPE_LEVEL_LOW: if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */ srctype = AT91_AIC_SRCTYPE_LOW; else return -EINVAL; break; case IRQ_TYPE_EDGE_FALLING: if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */ srctype = AT91_AIC_SRCTYPE_FALLING; else return -EINVAL; break; default: return -EINVAL; } smr = at91_aic_read(AT91_AIC_SMR(d->hwirq)) & ~AT91_AIC_SRCTYPE; at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype); return 0; } #ifdef CONFIG_PM static u32 wakeups; static u32 backups; static int at91_aic_set_wake(struct irq_data *d, unsigned value) { if (unlikely(d->hwirq >= NR_AIC_IRQS)) return -EINVAL; if (value) wakeups |= (1 << d->hwirq); else wakeups &= ~(1 << d->hwirq); return 0; } void at91_irq_suspend(void) { backups = at91_aic_read(AT91_AIC_IMR); at91_aic_write(AT91_AIC_IDCR, backups); at91_aic_write(AT91_AIC_IECR, wakeups); } void at91_irq_resume(void) { at91_aic_write(AT91_AIC_IDCR, wakeups); at91_aic_write(AT91_AIC_IECR, backups); } #else #define at91_aic_set_wake NULL #endif static struct irq_chip at91_aic_chip = { .name = "AIC", .irq_ack = at91_aic_mask_irq, .irq_mask = at91_aic_mask_irq, .irq_unmask = at91_aic_unmask_irq, .irq_set_type = at91_aic_set_type, .irq_set_wake = at91_aic_set_wake, }; static void __init at91_aic_hw_init(unsigned int spu_vector) { int i; /* * Perform 8 End Of Interrupt Command to make sure AIC * will not Lock out nIRQ */ for (i = 0; i < 8; i++) at91_aic_write(AT91_AIC_EOICR, 0); /* * Spurious Interrupt ID in Spurious Vector Register. * When there is no current interrupt, the IRQ Vector Register * reads the value stored in AIC_SPU */ at91_aic_write(AT91_AIC_SPU, spu_vector); /* No debugging in AIC: Debug (Protect) Control Register */ at91_aic_write(AT91_AIC_DCR, 0); /* Disable and clear all interrupts initially */ at91_aic_write(AT91_AIC_IDCR, 0xFFFFFFFF); at91_aic_write(AT91_AIC_ICCR, 0xFFFFFFFF); } #if defined(CONFIG_OF) static int at91_aic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { /* Put virq number in Source Vector Register */ at91_aic_write(AT91_AIC_SVR(hw), virq); /* Active Low interrupt, without priority */ at91_aic_write(AT91_AIC_SMR(hw), AT91_AIC_SRCTYPE_LOW); irq_set_chip_and_handler(virq, &at91_aic_chip, handle_level_irq); set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); return 0; } static struct irq_domain_ops at91_aic_irq_ops = { .map = at91_aic_irq_map, .xlate = irq_domain_xlate_twocell, }; int __init at91_aic_of_init(struct device_node *node, struct device_node *parent) { at91_aic_base = of_iomap(node, 0); at91_aic_np = node; at91_aic_domain = irq_domain_add_linear(at91_aic_np, NR_AIC_IRQS, &at91_aic_irq_ops, NULL); if (!at91_aic_domain) panic("Unable to add AIC irq domain (DT)\n"); irq_set_default_host(at91_aic_domain); at91_aic_hw_init(NR_AIC_IRQS); return 0; } #endif /* * Initialize the AIC interrupt controller. */ void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS]) { unsigned int i; int irq_base; at91_aic_base = ioremap(AT91_AIC, 512); if (!at91_aic_base) panic("Unable to ioremap AIC registers\n"); /* Add irq domain for AIC */ irq_base = irq_alloc_descs(-1, 0, NR_AIC_IRQS, 0); if (irq_base < 0) { WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n"); irq_base = 0; } at91_aic_domain = irq_domain_add_legacy(at91_aic_np, NR_AIC_IRQS, irq_base, 0, &irq_domain_simple_ops, NULL); if (!at91_aic_domain) panic("Unable to add AIC irq domain\n"); irq_set_default_host(at91_aic_domain); /* * The IVR is used by macro get_irqnr_and_base to read and verify. * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred. */ for (i = 0; i < NR_AIC_IRQS; i++) { /* Put hardware irq number in Source Vector Register: */ at91_aic_write(AT91_AIC_SVR(i), i); /* Active Low interrupt, with the specified priority */ at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]); irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } at91_aic_hw_init(NR_AIC_IRQS); }
gpl-2.0
robcore/machinex_kernel
arch/arm/mach-at91/sam9_smc.c
4771
3562
/* * linux/arch/arm/mach-at91/sam9_smc.c * * Copyright (C) 2008 Andrew Victor * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #define AT91_SMC_CS(id, n) (smc_base_addr[id] + ((n) * 0x10)) static void __iomem *smc_base_addr[2]; static void sam9_smc_cs_write_mode(void __iomem *base, struct sam9_smc_config *config) { __raw_writel(config->mode | AT91_SMC_TDF_(config->tdf_cycles), base + AT91_SMC_MODE); } void sam9_smc_write_mode(int id, int cs, struct sam9_smc_config *config) { sam9_smc_cs_write_mode(AT91_SMC_CS(id, cs), config); } static void sam9_smc_cs_configure(void __iomem *base, struct sam9_smc_config *config) { /* Setup register */ __raw_writel(AT91_SMC_NWESETUP_(config->nwe_setup) | AT91_SMC_NCS_WRSETUP_(config->ncs_write_setup) | AT91_SMC_NRDSETUP_(config->nrd_setup) | AT91_SMC_NCS_RDSETUP_(config->ncs_read_setup), base + AT91_SMC_SETUP); /* Pulse register */ __raw_writel(AT91_SMC_NWEPULSE_(config->nwe_pulse) | AT91_SMC_NCS_WRPULSE_(config->ncs_write_pulse) | AT91_SMC_NRDPULSE_(config->nrd_pulse) | AT91_SMC_NCS_RDPULSE_(config->ncs_read_pulse), base + AT91_SMC_PULSE); /* Cycle register */ __raw_writel(AT91_SMC_NWECYCLE_(config->write_cycle) | AT91_SMC_NRDCYCLE_(config->read_cycle), base + AT91_SMC_CYCLE); /* Mode register */ sam9_smc_cs_write_mode(base, config); } void sam9_smc_configure(int id, int cs, struct sam9_smc_config *config) { sam9_smc_cs_configure(AT91_SMC_CS(id, cs), config); } static void sam9_smc_cs_read_mode(void __iomem *base, struct sam9_smc_config *config) { u32 val = __raw_readl(base + AT91_SMC_MODE); config->mode = (val & ~AT91_SMC_NWECYCLE); config->tdf_cycles = (val & AT91_SMC_NWECYCLE) >> 16 ; } void sam9_smc_read_mode(int id, int cs, struct sam9_smc_config *config) { sam9_smc_cs_read_mode(AT91_SMC_CS(id, cs), config); } static void sam9_smc_cs_read(void __iomem *base, struct sam9_smc_config *config) { u32 val; /* Setup register */ val = __raw_readl(base + AT91_SMC_SETUP); config->nwe_setup = val & AT91_SMC_NWESETUP; config->ncs_write_setup = (val & AT91_SMC_NCS_WRSETUP) >> 8; config->nrd_setup = (val & AT91_SMC_NRDSETUP) >> 16; config->ncs_read_setup = (val & AT91_SMC_NCS_RDSETUP) >> 24; /* Pulse register */ val = __raw_readl(base + AT91_SMC_PULSE); config->nwe_setup = val & AT91_SMC_NWEPULSE; config->ncs_write_pulse = (val & AT91_SMC_NCS_WRPULSE) >> 8; config->nrd_pulse = (val & AT91_SMC_NRDPULSE) >> 16; config->ncs_read_pulse = (val & AT91_SMC_NCS_RDPULSE) >> 24; /* Cycle register */ val = __raw_readl(base + AT91_SMC_CYCLE); config->write_cycle = val & AT91_SMC_NWECYCLE; config->read_cycle = (val & AT91_SMC_NRDCYCLE) >> 16; /* Mode register */ sam9_smc_cs_read_mode(base, config); } void sam9_smc_read(int id, int cs, struct sam9_smc_config *config) { sam9_smc_cs_read(AT91_SMC_CS(id, cs), config); } void __init at91sam9_ioremap_smc(int id, u32 addr) { if (id > 1) { pr_warn("%s: id > 2\n", __func__); return; } smc_base_addr[id] = ioremap(addr, 512); if (!smc_base_addr[id]) pr_warn("Impossible to ioremap smc.%d 0x%x\n", id, addr); }
gpl-2.0
showp1984/bricked-flo
drivers/gpio/gpio-max7300.c
5795
2048
/* * Copyright (C) 2009 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Check max730x.c for further details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/spi/max7301.h> #include <linux/slab.h> static int max7300_i2c_write(struct device *dev, unsigned int reg, unsigned int val) { struct i2c_client *client = to_i2c_client(dev); return i2c_smbus_write_byte_data(client, reg, val); } static int max7300_i2c_read(struct device *dev, unsigned int reg) { struct i2c_client *client = to_i2c_client(dev); return i2c_smbus_read_byte_data(client, reg); } static int __devinit max7300_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max7301 *ts; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; ts = kzalloc(sizeof(struct max7301), GFP_KERNEL); if (!ts) return -ENOMEM; ts->read = max7300_i2c_read; ts->write = max7300_i2c_write; ts->dev = &client->dev; ret = __max730x_probe(ts); if (ret) kfree(ts); return ret; } static int __devexit max7300_remove(struct i2c_client *client) { return __max730x_remove(&client->dev); } static const struct i2c_device_id max7300_id[] = { { "max7300", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max7300_id); static struct i2c_driver max7300_driver = { .driver = { .name = "max7300", .owner = THIS_MODULE, }, .probe = max7300_probe, .remove = __devexit_p(max7300_remove), .id_table = max7300_id, }; static int __init max7300_init(void) { return i2c_add_driver(&max7300_driver); } subsys_initcall(max7300_init); static void __exit max7300_exit(void) { i2c_del_driver(&max7300_driver); } module_exit(max7300_exit); MODULE_AUTHOR("Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX7300 GPIO-Expander");
gpl-2.0
jpilet/linux-anemobox
arch/arm/kernel/unwind.c
6819
12662
/* * arch/arm/kernel/unwind.c * * Copyright (C) 2008 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * Stack unwinding support for ARM * * An ARM EABI version of gcc is required to generate the unwind * tables. For information about the structure of the unwind tables, * see "Exception Handling ABI for the ARM Architecture" at: * * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html */ #ifndef __CHECKER__ #if !defined (__ARM_EABI__) #warning Your compiler does not have EABI support. #warning ARM unwind is known to compile only with EABI compilers. #warning Change compiler or disable ARM_UNWIND option. #elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) #warning Your compiler is too buggy; it is known to not compile ARM unwind support. #warning Change compiler or disable ARM_UNWIND option. #endif #endif /* __CHECKER__ */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> #include <asm/stacktrace.h> #include <asm/traps.h> #include <asm/unwind.h> /* Dummy functions to avoid linker complaints */ void __aeabi_unwind_cpp_pr0(void) { }; EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0); void __aeabi_unwind_cpp_pr1(void) { }; EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1); void __aeabi_unwind_cpp_pr2(void) { }; EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); struct unwind_ctrl_block { unsigned long vrs[16]; /* virtual register set */ const unsigned long *insn; /* pointer to the current instructions word */ int entries; /* number of entries left to interpret */ int byte; /* current byte number in the instructions word */ }; enum regs { #ifdef CONFIG_THUMB2_KERNEL FP = 7, #else FP = 11, #endif SP = 13, LR = 14, PC = 15 }; extern const struct unwind_idx __start_unwind_idx[]; static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; static DEFINE_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); /* Convert a prel31 symbol to an absolute address */ #define prel31_to_addr(ptr) \ ({ \ /* sign-extend to 32 bits */ \ long offset = (((long)*(ptr)) << 1) >> 1; \ (unsigned long)(ptr) + offset; \ }) /* * Binary search in the unwind index. The entries are * guaranteed to be sorted in ascending order by the linker. * * start = first entry * origin = first entry with positive offset (or stop if there is no such entry) * stop - 1 = last entry */ static const struct unwind_idx *search_index(unsigned long addr, const struct unwind_idx *start, const struct unwind_idx *origin, const struct unwind_idx *stop) { unsigned long addr_prel31; pr_debug("%s(%08lx, %p, %p, %p)\n", __func__, addr, start, origin, stop); /* * only search in the section with the matching sign. This way the * prel31 numbers can be compared as unsigned longs. */ if (addr < (unsigned long)start) /* negative offsets: [start; origin) */ stop = origin; else /* positive offsets: [origin; stop) */ start = origin; /* prel31 for address relavive to start */ addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; while (start < stop - 1) { const struct unwind_idx *mid = start + ((stop - start) >> 1); /* * As addr_prel31 is relative to start an offset is needed to * make it relative to mid. */ if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < mid->addr_offset) stop = mid; else { /* keep addr_prel31 relative to start */ addr_prel31 -= ((unsigned long)mid - (unsigned long)start); start = mid; } } if (likely(start->addr_offset <= addr_prel31)) return start; else { pr_warning("unwind: Unknown symbol address %08lx\n", addr); return NULL; } } static const struct unwind_idx *unwind_find_origin( const struct unwind_idx *start, const struct unwind_idx *stop) { pr_debug("%s(%p, %p)\n", __func__, start, stop); while (start < stop) { const struct unwind_idx *mid = start + ((stop - start) >> 1); if (mid->addr_offset >= 0x40000000) /* negative offset */ start = mid + 1; else /* positive offset */ stop = mid; } pr_debug("%s -> %p\n", __func__, stop); return stop; } static const struct unwind_idx *unwind_find_idx(unsigned long addr) { const struct unwind_idx *idx = NULL; unsigned long flags; pr_debug("%s(%08lx)\n", __func__, addr); if (core_kernel_text(addr)) { if (unlikely(!__origin_unwind_idx)) __origin_unwind_idx = unwind_find_origin(__start_unwind_idx, __stop_unwind_idx); /* main unwind table */ idx = search_index(addr, __start_unwind_idx, __origin_unwind_idx, __stop_unwind_idx); } else { /* module unwind tables */ struct unwind_table *table; spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->begin_addr && addr < table->end_addr) { idx = search_index(addr, table->start, table->origin, table->stop); /* Move-to-front to exploit common traces */ list_move(&table->list, &unwind_tables); break; } } spin_unlock_irqrestore(&unwind_lock, flags); } pr_debug("%s: idx = %p\n", __func__, idx); return idx; } static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) { unsigned long ret; if (ctrl->entries <= 0) { pr_warning("unwind: Corrupt unwind table\n"); return 0; } ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff; if (ctrl->byte == 0) { ctrl->insn++; ctrl->entries--; ctrl->byte = 3; } else ctrl->byte--; return ret; } /* * Execute the current unwind instruction. */ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) { unsigned long insn = unwind_get_byte(ctrl); pr_debug("%s: insn = %08lx\n", __func__, insn); if ((insn & 0xc0) == 0x00) ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; else if ((insn & 0xc0) == 0x40) ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; else if ((insn & 0xf0) == 0x80) { unsigned long mask; unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; int load_sp, reg = 4; insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n", insn); return -URC_FAILURE; } /* pop R4-R15 according to mask */ load_sp = mask & (1 << (13 - 4)); while (mask) { if (mask & 1) ctrl->vrs[reg] = *vsp++; mask >>= 1; reg++; } if (!load_sp) ctrl->vrs[SP] = (unsigned long)vsp; } else if ((insn & 0xf0) == 0x90 && (insn & 0x0d) != 0x0d) ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; else if ((insn & 0xf0) == 0xa0) { unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; int reg; /* pop R4-R[4+bbb] */ for (reg = 4; reg <= 4 + (insn & 7); reg++) ctrl->vrs[reg] = *vsp++; if (insn & 0x80) ctrl->vrs[14] = *vsp++; ctrl->vrs[SP] = (unsigned long)vsp; } else if (insn == 0xb0) { if (ctrl->vrs[PC] == 0) ctrl->vrs[PC] = ctrl->vrs[LR]; /* no further processing */ ctrl->entries = 0; } else if (insn == 0xb1) { unsigned long mask = unwind_get_byte(ctrl); unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; int reg = 0; if (mask == 0 || mask & 0xf0) { pr_warning("unwind: Spare encoding %04lx\n", (insn << 8) | mask); return -URC_FAILURE; } /* pop R0-R3 according to mask */ while (mask) { if (mask & 1) ctrl->vrs[reg] = *vsp++; mask >>= 1; reg++; } ctrl->vrs[SP] = (unsigned long)vsp; } else if (insn == 0xb2) { unsigned long uleb128 = unwind_get_byte(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { pr_warning("unwind: Unhandled instruction %02lx\n", insn); return -URC_FAILURE; } pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__, ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]); return URC_OK; } /* * Unwind a single frame starting with *sp for the symbol at *pc. It * updates the *pc and *sp with the new values. */ int unwind_frame(struct stackframe *frame) { unsigned long high, low; const struct unwind_idx *idx; struct unwind_ctrl_block ctrl; /* only go to a higher address on the stack */ low = frame->sp; high = ALIGN(low, THREAD_SIZE); pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, frame->pc, frame->lr, frame->sp); if (!kernel_text_address(frame->pc)) return -URC_FAILURE; idx = unwind_find_idx(frame->pc); if (!idx) { pr_warning("unwind: Index not found %08lx\n", frame->pc); return -URC_FAILURE; } ctrl.vrs[FP] = frame->fp; ctrl.vrs[SP] = frame->sp; ctrl.vrs[LR] = frame->lr; ctrl.vrs[PC] = 0; if (idx->insn == 1) /* can't unwind */ return -URC_FAILURE; else if ((idx->insn & 0x80000000) == 0) /* prel31 to the unwind table */ ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn); else if ((idx->insn & 0xff000000) == 0x80000000) /* only personality routine 0 supported in the index */ ctrl.insn = &idx->insn; else { pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n", idx->insn, idx); return -URC_FAILURE; } /* check the personality routine */ if ((*ctrl.insn & 0xff000000) == 0x80000000) { ctrl.byte = 2; ctrl.entries = 1; } else if ((*ctrl.insn & 0xff000000) == 0x81000000) { ctrl.byte = 1; ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); } else { pr_warning("unwind: Unsupported personality routine %08lx at %p\n", *ctrl.insn, ctrl.insn); return -URC_FAILURE; } while (ctrl.entries > 0) { int urc = unwind_exec_insn(&ctrl); if (urc < 0) return urc; if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) return -URC_FAILURE; } if (ctrl.vrs[PC] == 0) ctrl.vrs[PC] = ctrl.vrs[LR]; /* check for infinite loop */ if (frame->pc == ctrl.vrs[PC]) return -URC_FAILURE; frame->fp = ctrl.vrs[FP]; frame->sp = ctrl.vrs[SP]; frame->lr = ctrl.vrs[LR]; frame->pc = ctrl.vrs[PC]; return URC_OK; } void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; register unsigned long current_sp asm ("sp"); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (regs) { frame.fp = regs->ARM_fp; frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; /* PC might be corrupted, use LR in that case. */ frame.pc = kernel_text_address(regs->ARM_pc) ? regs->ARM_pc : regs->ARM_lr; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)unwind_backtrace; } else { /* task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); /* * The function calling __switch_to cannot be a leaf function * so LR is recovered from the stack. */ frame.lr = 0; frame.pc = thread_saved_pc(tsk); } while (1) { int urc; unsigned long where = frame.pc; urc = unwind_frame(&frame); if (urc < 0) break; dump_backtrace_entry(where, frame.pc, frame.sp - 4); } } struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, unsigned long text_addr, unsigned long text_size) { unsigned long flags; struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, text_addr, text_size); if (!tab) return tab; tab->start = (const struct unwind_idx *)start; tab->stop = (const struct unwind_idx *)(start + size); tab->origin = unwind_find_origin(tab->start, tab->stop); tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); spin_unlock_irqrestore(&unwind_lock, flags); return tab; } void unwind_table_del(struct unwind_table *tab) { unsigned long flags; if (!tab) return; spin_lock_irqsave(&unwind_lock, flags); list_del(&tab->list); spin_unlock_irqrestore(&unwind_lock, flags); kfree(tab); }
gpl-2.0
eaglerazor/android_kernel_samsung_apollo
arch/s390/oprofile/backtrace.c
9379
1784
/** * arch/s390/oprofile/backtrace.c * * S390 Version * Copyright (C) 2005 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com> */ #include <linux/oprofile.h> #include <asm/processor.h> /* for struct stack_frame */ static unsigned long __show_trace(unsigned int *depth, unsigned long sp, unsigned long low, unsigned long high) { struct stack_frame *sf; struct pt_regs *regs; while (*depth) { sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; (*depth)--; oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); /* Follow the backchain. */ while (*depth) { low = sp; sp = sf->back_chain & PSW_ADDR_INSN; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; (*depth)--; oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); } if (*depth == 0) break; /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long) (sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; (*depth)--; oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); low = sp; sp = regs->gprs[15]; } return sp; } void s390_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long head; struct stack_frame* head_sf; if (user_mode (regs)) return; head = regs->gprs[15]; head_sf = (struct stack_frame*)head; if (!head_sf->back_chain) return; head = head_sf->back_chain; head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, S390_lowcore.async_stack); __show_trace(&depth, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); }
gpl-2.0
svimes/android_kernel_motorola_msm8960-common
drivers/input/joystick/spaceorb.c
9891
6749
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * David Thompson */ /* * SpaceTec SpaceOrb 360 and Avenger 6dof controller driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serio.h> #define DRIVER_DESC "SpaceTec SpaceOrb 360 and Avenger 6dof controller driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Constants. */ #define SPACEORB_MAX_LENGTH 64 static int spaceorb_buttons[] = { BTN_TL, BTN_TR, BTN_Y, BTN_X, BTN_B, BTN_A }; static int spaceorb_axes[] = { ABS_X, ABS_Y, ABS_Z, ABS_RX, ABS_RY, ABS_RZ }; /* * Per-Orb data. */ struct spaceorb { struct input_dev *dev; int idx; unsigned char data[SPACEORB_MAX_LENGTH]; char phys[32]; }; static unsigned char spaceorb_xor[] = "SpaceWare"; static unsigned char *spaceorb_errors[] = { "EEPROM storing 0 failed", "Receive queue overflow", "Transmit queue timeout", "Bad packet", "Power brown-out", "EEPROM checksum error", "Hardware fault" }; /* * spaceorb_process_packet() decodes packets the driver receives from the * SpaceOrb. */ static void spaceorb_process_packet(struct spaceorb *spaceorb) { struct input_dev *dev = spaceorb->dev; unsigned char *data = spaceorb->data; unsigned char c = 0; int axes[6]; int i; if (spaceorb->idx < 2) return; for (i = 0; i < spaceorb->idx; i++) c ^= data[i]; if (c) return; switch (data[0]) { case 'R': /* Reset packet */ spaceorb->data[spaceorb->idx - 1] = 0; for (i = 1; i < spaceorb->idx && spaceorb->data[i] == ' '; i++); printk(KERN_INFO "input: %s [%s] is %s\n", dev->name, spaceorb->data + i, spaceorb->phys); break; case 'D': /* Ball + button data */ if (spaceorb->idx != 12) return; for (i = 0; i < 9; i++) spaceorb->data[i+2] ^= spaceorb_xor[i]; axes[0] = ( data[2] << 3) | (data[ 3] >> 4); axes[1] = ((data[3] & 0x0f) << 6) | (data[ 4] >> 1); axes[2] = ((data[4] & 0x01) << 9) | (data[ 5] << 2) | (data[4] >> 5); axes[3] = ((data[6] & 0x1f) << 5) | (data[ 7] >> 2); axes[4] = ((data[7] & 0x03) << 8) | (data[ 8] << 1) | (data[7] >> 6); axes[5] = ((data[9] & 0x3f) << 4) | (data[10] >> 3); for (i = 0; i < 6; i++) input_report_abs(dev, spaceorb_axes[i], axes[i] - ((axes[i] & 0x200) ? 1024 : 0)); for (i = 0; i < 6; i++) input_report_key(dev, spaceorb_buttons[i], (data[1] >> i) & 1); break; case 'K': /* Button data */ if (spaceorb->idx != 5) return; for (i = 0; i < 6; i++) input_report_key(dev, spaceorb_buttons[i], (data[2] >> i) & 1); break; case 'E': /* Error packet */ if (spaceorb->idx != 4) return; printk(KERN_ERR "spaceorb: Device error. [ "); for (i = 0; i < 7; i++) if (data[1] & (1 << i)) printk("%s ", spaceorb_errors[i]); printk("]\n"); break; } input_sync(dev); } static irqreturn_t spaceorb_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct spaceorb* spaceorb = serio_get_drvdata(serio); if (~data & 0x80) { if (spaceorb->idx) spaceorb_process_packet(spaceorb); spaceorb->idx = 0; } if (spaceorb->idx < SPACEORB_MAX_LENGTH) spaceorb->data[spaceorb->idx++] = data & 0x7f; return IRQ_HANDLED; } /* * spaceorb_disconnect() is the opposite of spaceorb_connect() */ static void spaceorb_disconnect(struct serio *serio) { struct spaceorb* spaceorb = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(spaceorb->dev); kfree(spaceorb); } /* * spaceorb_connect() is the routine that is called when someone adds a * new serio device that supports SpaceOrb/Avenger protocol and registers * it as an input device. */ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv) { struct spaceorb *spaceorb; struct input_dev *input_dev; int err = -ENOMEM; int i; spaceorb = kzalloc(sizeof(struct spaceorb), GFP_KERNEL); input_dev = input_allocate_device(); if (!spaceorb || !input_dev) goto fail1; spaceorb->dev = input_dev; snprintf(spaceorb->phys, sizeof(spaceorb->phys), "%s/input0", serio->phys); input_dev->name = "SpaceTec SpaceOrb 360 / Avenger"; input_dev->phys = spaceorb->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_SPACEORB; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (i = 0; i < 6; i++) set_bit(spaceorb_buttons[i], input_dev->keybit); for (i = 0; i < 6; i++) input_set_abs_params(input_dev, spaceorb_axes[i], -508, 508, 0, 0); serio_set_drvdata(serio, spaceorb); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(spaceorb->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(spaceorb); return err; } /* * The serio driver structure. */ static struct serio_device_id spaceorb_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_SPACEORB, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, spaceorb_serio_ids); static struct serio_driver spaceorb_drv = { .driver = { .name = "spaceorb", }, .description = DRIVER_DESC, .id_table = spaceorb_serio_ids, .interrupt = spaceorb_interrupt, .connect = spaceorb_connect, .disconnect = spaceorb_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init spaceorb_init(void) { return serio_register_driver(&spaceorb_drv); } static void __exit spaceorb_exit(void) { serio_unregister_driver(&spaceorb_drv); } module_init(spaceorb_init); module_exit(spaceorb_exit);
gpl-2.0
aduggan/linux
drivers/pci/vpd.c
9891
1192
/* * File: vpd.c * Purpose: Provide PCI VPD support * * Copyright (C) 2010 Broadcom Corporation. */ #include <linux/pci.h> #include <linux/export.h> int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt) { int i; for (i = off; i < len; ) { u8 val = buf[i]; if (val & PCI_VPD_LRDT) { /* Don't return success of the tag isn't complete */ if (i + PCI_VPD_LRDT_TAG_SIZE > len) break; if (val == rdt) return i; i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(&buf[i]); } else { u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK; if (tag == rdt) return i; if (tag == PCI_VPD_SRDT_END) break; i += PCI_VPD_SRDT_TAG_SIZE + pci_vpd_srdt_size(&buf[i]); } } return -ENOENT; } EXPORT_SYMBOL_GPL(pci_vpd_find_tag); int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, unsigned int len, const char *kw) { int i; for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) { if (buf[i + 0] == kw[0] && buf[i + 1] == kw[1]) return i; i += PCI_VPD_INFO_FLD_HDR_SIZE + pci_vpd_info_field_size(&buf[i]); } return -ENOENT; } EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
gpl-2.0
sakuraba001/android_kernel_samsung_klteactive
drivers/input/joystick/spaceorb.c
9891
6749
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * David Thompson */ /* * SpaceTec SpaceOrb 360 and Avenger 6dof controller driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serio.h> #define DRIVER_DESC "SpaceTec SpaceOrb 360 and Avenger 6dof controller driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Constants. */ #define SPACEORB_MAX_LENGTH 64 static int spaceorb_buttons[] = { BTN_TL, BTN_TR, BTN_Y, BTN_X, BTN_B, BTN_A }; static int spaceorb_axes[] = { ABS_X, ABS_Y, ABS_Z, ABS_RX, ABS_RY, ABS_RZ }; /* * Per-Orb data. */ struct spaceorb { struct input_dev *dev; int idx; unsigned char data[SPACEORB_MAX_LENGTH]; char phys[32]; }; static unsigned char spaceorb_xor[] = "SpaceWare"; static unsigned char *spaceorb_errors[] = { "EEPROM storing 0 failed", "Receive queue overflow", "Transmit queue timeout", "Bad packet", "Power brown-out", "EEPROM checksum error", "Hardware fault" }; /* * spaceorb_process_packet() decodes packets the driver receives from the * SpaceOrb. */ static void spaceorb_process_packet(struct spaceorb *spaceorb) { struct input_dev *dev = spaceorb->dev; unsigned char *data = spaceorb->data; unsigned char c = 0; int axes[6]; int i; if (spaceorb->idx < 2) return; for (i = 0; i < spaceorb->idx; i++) c ^= data[i]; if (c) return; switch (data[0]) { case 'R': /* Reset packet */ spaceorb->data[spaceorb->idx - 1] = 0; for (i = 1; i < spaceorb->idx && spaceorb->data[i] == ' '; i++); printk(KERN_INFO "input: %s [%s] is %s\n", dev->name, spaceorb->data + i, spaceorb->phys); break; case 'D': /* Ball + button data */ if (spaceorb->idx != 12) return; for (i = 0; i < 9; i++) spaceorb->data[i+2] ^= spaceorb_xor[i]; axes[0] = ( data[2] << 3) | (data[ 3] >> 4); axes[1] = ((data[3] & 0x0f) << 6) | (data[ 4] >> 1); axes[2] = ((data[4] & 0x01) << 9) | (data[ 5] << 2) | (data[4] >> 5); axes[3] = ((data[6] & 0x1f) << 5) | (data[ 7] >> 2); axes[4] = ((data[7] & 0x03) << 8) | (data[ 8] << 1) | (data[7] >> 6); axes[5] = ((data[9] & 0x3f) << 4) | (data[10] >> 3); for (i = 0; i < 6; i++) input_report_abs(dev, spaceorb_axes[i], axes[i] - ((axes[i] & 0x200) ? 1024 : 0)); for (i = 0; i < 6; i++) input_report_key(dev, spaceorb_buttons[i], (data[1] >> i) & 1); break; case 'K': /* Button data */ if (spaceorb->idx != 5) return; for (i = 0; i < 6; i++) input_report_key(dev, spaceorb_buttons[i], (data[2] >> i) & 1); break; case 'E': /* Error packet */ if (spaceorb->idx != 4) return; printk(KERN_ERR "spaceorb: Device error. [ "); for (i = 0; i < 7; i++) if (data[1] & (1 << i)) printk("%s ", spaceorb_errors[i]); printk("]\n"); break; } input_sync(dev); } static irqreturn_t spaceorb_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct spaceorb* spaceorb = serio_get_drvdata(serio); if (~data & 0x80) { if (spaceorb->idx) spaceorb_process_packet(spaceorb); spaceorb->idx = 0; } if (spaceorb->idx < SPACEORB_MAX_LENGTH) spaceorb->data[spaceorb->idx++] = data & 0x7f; return IRQ_HANDLED; } /* * spaceorb_disconnect() is the opposite of spaceorb_connect() */ static void spaceorb_disconnect(struct serio *serio) { struct spaceorb* spaceorb = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(spaceorb->dev); kfree(spaceorb); } /* * spaceorb_connect() is the routine that is called when someone adds a * new serio device that supports SpaceOrb/Avenger protocol and registers * it as an input device. */ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv) { struct spaceorb *spaceorb; struct input_dev *input_dev; int err = -ENOMEM; int i; spaceorb = kzalloc(sizeof(struct spaceorb), GFP_KERNEL); input_dev = input_allocate_device(); if (!spaceorb || !input_dev) goto fail1; spaceorb->dev = input_dev; snprintf(spaceorb->phys, sizeof(spaceorb->phys), "%s/input0", serio->phys); input_dev->name = "SpaceTec SpaceOrb 360 / Avenger"; input_dev->phys = spaceorb->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_SPACEORB; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (i = 0; i < 6; i++) set_bit(spaceorb_buttons[i], input_dev->keybit); for (i = 0; i < 6; i++) input_set_abs_params(input_dev, spaceorb_axes[i], -508, 508, 0, 0); serio_set_drvdata(serio, spaceorb); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(spaceorb->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(spaceorb); return err; } /* * The serio driver structure. */ static struct serio_device_id spaceorb_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_SPACEORB, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, spaceorb_serio_ids); static struct serio_driver spaceorb_drv = { .driver = { .name = "spaceorb", }, .description = DRIVER_DESC, .id_table = spaceorb_serio_ids, .interrupt = spaceorb_interrupt, .connect = spaceorb_connect, .disconnect = spaceorb_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init spaceorb_init(void) { return serio_register_driver(&spaceorb_drv); } static void __exit spaceorb_exit(void) { serio_unregister_driver(&spaceorb_drv); } module_init(spaceorb_init); module_exit(spaceorb_exit);
gpl-2.0
varunchitre15/android_kernel_xperiaL
kernel/gcov/gcc_3_4.c
11683
11215
/* * This code provides functions to handle gcc's profiling data format * introduced with gcc 3.4. Future versions of gcc may change the gcov * format (as happened before), so all format-specific information needs * to be kept modular and easily exchangeable. * * This file is based on gcc-internal definitions. Functions and data * structures are defined to be compatible with gcc counterparts. * For a better understanding, refer to gcc source: gcc/gcov-io.h. * * Copyright IBM Corp. 2009 * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * * Uses gcc-internal data definitions. */ #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> #include "gcov.h" /* Symbolic links to be created for each profiling data file. */ const struct gcov_link gcov_link[] = { { OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */ { 0, NULL}, }; /* * Determine whether a counter is active. Based on gcc magic. Doesn't change * at run-time. */ static int counter_active(struct gcov_info *info, unsigned int type) { return (1 << type) & info->ctr_mask; } /* Determine number of active counters. Based on gcc magic. */ static unsigned int num_counter_active(struct gcov_info *info) { unsigned int i; unsigned int result = 0; for (i = 0; i < GCOV_COUNTERS; i++) { if (counter_active(info, i)) result++; } return result; } /** * gcov_info_reset - reset profiling data to zero * @info: profiling data set */ void gcov_info_reset(struct gcov_info *info) { unsigned int active = num_counter_active(info); unsigned int i; for (i = 0; i < active; i++) { memset(info->counts[i].values, 0, info->counts[i].num * sizeof(gcov_type)); } } /** * gcov_info_is_compatible - check if profiling data can be added * @info1: first profiling data set * @info2: second profiling data set * * Returns non-zero if profiling data can be added, zero otherwise. */ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) { return (info1->stamp == info2->stamp); } /** * gcov_info_add - add up profiling data * @dest: profiling data set to which data is added * @source: profiling data set which is added * * Adds profiling counts of @source to @dest. */ void gcov_info_add(struct gcov_info *dest, struct gcov_info *source) { unsigned int i; unsigned int j; for (i = 0; i < num_counter_active(dest); i++) { for (j = 0; j < dest->counts[i].num; j++) { dest->counts[i].values[j] += source->counts[i].values[j]; } } } /* Get size of function info entry. Based on gcc magic. */ static size_t get_fn_size(struct gcov_info *info) { size_t size; size = sizeof(struct gcov_fn_info) + num_counter_active(info) * sizeof(unsigned int); if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int)) size = ALIGN(size, __alignof__(struct gcov_fn_info)); return size; } /* Get address of function info entry. Based on gcc magic. */ static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn) { return (struct gcov_fn_info *) ((char *) info->functions + fn * get_fn_size(info)); } /** * gcov_info_dup - duplicate profiling data set * @info: profiling data set to duplicate * * Return newly allocated duplicate on success, %NULL on error. */ struct gcov_info *gcov_info_dup(struct gcov_info *info) { struct gcov_info *dup; unsigned int i; unsigned int active; /* Duplicate gcov_info. */ active = num_counter_active(info); dup = kzalloc(sizeof(struct gcov_info) + sizeof(struct gcov_ctr_info) * active, GFP_KERNEL); if (!dup) return NULL; dup->version = info->version; dup->stamp = info->stamp; dup->n_functions = info->n_functions; dup->ctr_mask = info->ctr_mask; /* Duplicate filename. */ dup->filename = kstrdup(info->filename, GFP_KERNEL); if (!dup->filename) goto err_free; /* Duplicate table of functions. */ dup->functions = kmemdup(info->functions, info->n_functions * get_fn_size(info), GFP_KERNEL); if (!dup->functions) goto err_free; /* Duplicate counter arrays. */ for (i = 0; i < active ; i++) { struct gcov_ctr_info *ctr = &info->counts[i]; size_t size = ctr->num * sizeof(gcov_type); dup->counts[i].num = ctr->num; dup->counts[i].merge = ctr->merge; dup->counts[i].values = vmalloc(size); if (!dup->counts[i].values) goto err_free; memcpy(dup->counts[i].values, ctr->values, size); } return dup; err_free: gcov_info_free(dup); return NULL; } /** * gcov_info_free - release memory for profiling data set duplicate * @info: profiling data set duplicate to free */ void gcov_info_free(struct gcov_info *info) { unsigned int active = num_counter_active(info); unsigned int i; for (i = 0; i < active ; i++) vfree(info->counts[i].values); kfree(info->functions); kfree(info->filename); kfree(info); } /** * struct type_info - iterator helper array * @ctr_type: counter type * @offset: index of the first value of the current function for this type * * This array is needed to convert the in-memory data format into the in-file * data format: * * In-memory: * for each counter type * for each function * values * * In-file: * for each function * for each counter type * values * * See gcc source gcc/gcov-io.h for more information on data organization. */ struct type_info { int ctr_type; unsigned int offset; }; /** * struct gcov_iterator - specifies current file position in logical records * @info: associated profiling data * @record: record type * @function: function number * @type: counter type * @count: index into values array * @num_types: number of counter types * @type_info: helper array to get values-array offset for current function */ struct gcov_iterator { struct gcov_info *info; int record; unsigned int function; unsigned int type; unsigned int count; int num_types; struct type_info type_info[0]; }; static struct gcov_fn_info *get_func(struct gcov_iterator *iter) { return get_fn_info(iter->info, iter->function); } static struct type_info *get_type(struct gcov_iterator *iter) { return &iter->type_info[iter->type]; } /** * gcov_iter_new - allocate and initialize profiling data iterator * @info: profiling data set to be iterated * * Return file iterator on success, %NULL otherwise. */ struct gcov_iterator *gcov_iter_new(struct gcov_info *info) { struct gcov_iterator *iter; iter = kzalloc(sizeof(struct gcov_iterator) + num_counter_active(info) * sizeof(struct type_info), GFP_KERNEL); if (iter) iter->info = info; return iter; } /** * gcov_iter_free - release memory for iterator * @iter: file iterator to free */ void gcov_iter_free(struct gcov_iterator *iter) { kfree(iter); } /** * gcov_iter_get_info - return profiling data set for given file iterator * @iter: file iterator */ struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) { return iter->info; } /** * gcov_iter_start - reset file iterator to starting position * @iter: file iterator */ void gcov_iter_start(struct gcov_iterator *iter) { int i; iter->record = 0; iter->function = 0; iter->type = 0; iter->count = 0; iter->num_types = 0; for (i = 0; i < GCOV_COUNTERS; i++) { if (counter_active(iter->info, i)) { iter->type_info[iter->num_types].ctr_type = i; iter->type_info[iter->num_types++].offset = 0; } } } /* Mapping of logical record number to actual file content. */ #define RECORD_FILE_MAGIC 0 #define RECORD_GCOV_VERSION 1 #define RECORD_TIME_STAMP 2 #define RECORD_FUNCTION_TAG 3 #define RECORD_FUNCTON_TAG_LEN 4 #define RECORD_FUNCTION_IDENT 5 #define RECORD_FUNCTION_CHECK 6 #define RECORD_COUNT_TAG 7 #define RECORD_COUNT_LEN 8 #define RECORD_COUNT 9 /** * gcov_iter_next - advance file iterator to next logical record * @iter: file iterator * * Return zero if new position is valid, non-zero if iterator has reached end. */ int gcov_iter_next(struct gcov_iterator *iter) { switch (iter->record) { case RECORD_FILE_MAGIC: case RECORD_GCOV_VERSION: case RECORD_FUNCTION_TAG: case RECORD_FUNCTON_TAG_LEN: case RECORD_FUNCTION_IDENT: case RECORD_COUNT_TAG: /* Advance to next record */ iter->record++; break; case RECORD_COUNT: /* Advance to next count */ iter->count++; /* fall through */ case RECORD_COUNT_LEN: if (iter->count < get_func(iter)->n_ctrs[iter->type]) { iter->record = 9; break; } /* Advance to next counter type */ get_type(iter)->offset += iter->count; iter->count = 0; iter->type++; /* fall through */ case RECORD_FUNCTION_CHECK: if (iter->type < iter->num_types) { iter->record = 7; break; } /* Advance to next function */ iter->type = 0; iter->function++; /* fall through */ case RECORD_TIME_STAMP: if (iter->function < iter->info->n_functions) iter->record = 3; else iter->record = -1; break; } /* Check for EOF. */ if (iter->record == -1) return -EINVAL; else return 0; } /** * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file * @seq: seq_file handle * @v: value to be stored * * Number format defined by gcc: numbers are recorded in the 32 bit * unsigned binary form of the endianness of the machine generating the * file. */ static int seq_write_gcov_u32(struct seq_file *seq, u32 v) { return seq_write(seq, &v, sizeof(v)); } /** * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file * @seq: seq_file handle * @v: value to be stored * * Number format defined by gcc: numbers are recorded in the 32 bit * unsigned binary form of the endianness of the machine generating the * file. 64 bit numbers are stored as two 32 bit numbers, the low part * first. */ static int seq_write_gcov_u64(struct seq_file *seq, u64 v) { u32 data[2]; data[0] = (v & 0xffffffffUL); data[1] = (v >> 32); return seq_write(seq, data, sizeof(data)); } /** * gcov_iter_write - write data for current pos to seq_file * @iter: file iterator * @seq: seq_file handle * * Return zero on success, non-zero otherwise. */ int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) { int rc = -EINVAL; switch (iter->record) { case RECORD_FILE_MAGIC: rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC); break; case RECORD_GCOV_VERSION: rc = seq_write_gcov_u32(seq, iter->info->version); break; case RECORD_TIME_STAMP: rc = seq_write_gcov_u32(seq, iter->info->stamp); break; case RECORD_FUNCTION_TAG: rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION); break; case RECORD_FUNCTON_TAG_LEN: rc = seq_write_gcov_u32(seq, 2); break; case RECORD_FUNCTION_IDENT: rc = seq_write_gcov_u32(seq, get_func(iter)->ident); break; case RECORD_FUNCTION_CHECK: rc = seq_write_gcov_u32(seq, get_func(iter)->checksum); break; case RECORD_COUNT_TAG: rc = seq_write_gcov_u32(seq, GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); break; case RECORD_COUNT_LEN: rc = seq_write_gcov_u32(seq, get_func(iter)->n_ctrs[iter->type] * 2); break; case RECORD_COUNT: rc = seq_write_gcov_u64(seq, iter->info->counts[iter->type]. values[iter->count + get_type(iter)->offset]); break; } return rc; }
gpl-2.0
CyanogenMod/android_kernel_samsung_p1
kernel/gcov/gcc_3_4.c
11683
11215
/* * This code provides functions to handle gcc's profiling data format * introduced with gcc 3.4. Future versions of gcc may change the gcov * format (as happened before), so all format-specific information needs * to be kept modular and easily exchangeable. * * This file is based on gcc-internal definitions. Functions and data * structures are defined to be compatible with gcc counterparts. * For a better understanding, refer to gcc source: gcc/gcov-io.h. * * Copyright IBM Corp. 2009 * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * * Uses gcc-internal data definitions. */ #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> #include "gcov.h" /* Symbolic links to be created for each profiling data file. */ const struct gcov_link gcov_link[] = { { OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */ { 0, NULL}, }; /* * Determine whether a counter is active. Based on gcc magic. Doesn't change * at run-time. */ static int counter_active(struct gcov_info *info, unsigned int type) { return (1 << type) & info->ctr_mask; } /* Determine number of active counters. Based on gcc magic. */ static unsigned int num_counter_active(struct gcov_info *info) { unsigned int i; unsigned int result = 0; for (i = 0; i < GCOV_COUNTERS; i++) { if (counter_active(info, i)) result++; } return result; } /** * gcov_info_reset - reset profiling data to zero * @info: profiling data set */ void gcov_info_reset(struct gcov_info *info) { unsigned int active = num_counter_active(info); unsigned int i; for (i = 0; i < active; i++) { memset(info->counts[i].values, 0, info->counts[i].num * sizeof(gcov_type)); } } /** * gcov_info_is_compatible - check if profiling data can be added * @info1: first profiling data set * @info2: second profiling data set * * Returns non-zero if profiling data can be added, zero otherwise. */ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) { return (info1->stamp == info2->stamp); } /** * gcov_info_add - add up profiling data * @dest: profiling data set to which data is added * @source: profiling data set which is added * * Adds profiling counts of @source to @dest. */ void gcov_info_add(struct gcov_info *dest, struct gcov_info *source) { unsigned int i; unsigned int j; for (i = 0; i < num_counter_active(dest); i++) { for (j = 0; j < dest->counts[i].num; j++) { dest->counts[i].values[j] += source->counts[i].values[j]; } } } /* Get size of function info entry. Based on gcc magic. */ static size_t get_fn_size(struct gcov_info *info) { size_t size; size = sizeof(struct gcov_fn_info) + num_counter_active(info) * sizeof(unsigned int); if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int)) size = ALIGN(size, __alignof__(struct gcov_fn_info)); return size; } /* Get address of function info entry. Based on gcc magic. */ static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn) { return (struct gcov_fn_info *) ((char *) info->functions + fn * get_fn_size(info)); } /** * gcov_info_dup - duplicate profiling data set * @info: profiling data set to duplicate * * Return newly allocated duplicate on success, %NULL on error. */ struct gcov_info *gcov_info_dup(struct gcov_info *info) { struct gcov_info *dup; unsigned int i; unsigned int active; /* Duplicate gcov_info. */ active = num_counter_active(info); dup = kzalloc(sizeof(struct gcov_info) + sizeof(struct gcov_ctr_info) * active, GFP_KERNEL); if (!dup) return NULL; dup->version = info->version; dup->stamp = info->stamp; dup->n_functions = info->n_functions; dup->ctr_mask = info->ctr_mask; /* Duplicate filename. */ dup->filename = kstrdup(info->filename, GFP_KERNEL); if (!dup->filename) goto err_free; /* Duplicate table of functions. */ dup->functions = kmemdup(info->functions, info->n_functions * get_fn_size(info), GFP_KERNEL); if (!dup->functions) goto err_free; /* Duplicate counter arrays. */ for (i = 0; i < active ; i++) { struct gcov_ctr_info *ctr = &info->counts[i]; size_t size = ctr->num * sizeof(gcov_type); dup->counts[i].num = ctr->num; dup->counts[i].merge = ctr->merge; dup->counts[i].values = vmalloc(size); if (!dup->counts[i].values) goto err_free; memcpy(dup->counts[i].values, ctr->values, size); } return dup; err_free: gcov_info_free(dup); return NULL; } /** * gcov_info_free - release memory for profiling data set duplicate * @info: profiling data set duplicate to free */ void gcov_info_free(struct gcov_info *info) { unsigned int active = num_counter_active(info); unsigned int i; for (i = 0; i < active ; i++) vfree(info->counts[i].values); kfree(info->functions); kfree(info->filename); kfree(info); } /** * struct type_info - iterator helper array * @ctr_type: counter type * @offset: index of the first value of the current function for this type * * This array is needed to convert the in-memory data format into the in-file * data format: * * In-memory: * for each counter type * for each function * values * * In-file: * for each function * for each counter type * values * * See gcc source gcc/gcov-io.h for more information on data organization. */ struct type_info { int ctr_type; unsigned int offset; }; /** * struct gcov_iterator - specifies current file position in logical records * @info: associated profiling data * @record: record type * @function: function number * @type: counter type * @count: index into values array * @num_types: number of counter types * @type_info: helper array to get values-array offset for current function */ struct gcov_iterator { struct gcov_info *info; int record; unsigned int function; unsigned int type; unsigned int count; int num_types; struct type_info type_info[0]; }; static struct gcov_fn_info *get_func(struct gcov_iterator *iter) { return get_fn_info(iter->info, iter->function); } static struct type_info *get_type(struct gcov_iterator *iter) { return &iter->type_info[iter->type]; } /** * gcov_iter_new - allocate and initialize profiling data iterator * @info: profiling data set to be iterated * * Return file iterator on success, %NULL otherwise. */ struct gcov_iterator *gcov_iter_new(struct gcov_info *info) { struct gcov_iterator *iter; iter = kzalloc(sizeof(struct gcov_iterator) + num_counter_active(info) * sizeof(struct type_info), GFP_KERNEL); if (iter) iter->info = info; return iter; } /** * gcov_iter_free - release memory for iterator * @iter: file iterator to free */ void gcov_iter_free(struct gcov_iterator *iter) { kfree(iter); } /** * gcov_iter_get_info - return profiling data set for given file iterator * @iter: file iterator */ struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) { return iter->info; } /** * gcov_iter_start - reset file iterator to starting position * @iter: file iterator */ void gcov_iter_start(struct gcov_iterator *iter) { int i; iter->record = 0; iter->function = 0; iter->type = 0; iter->count = 0; iter->num_types = 0; for (i = 0; i < GCOV_COUNTERS; i++) { if (counter_active(iter->info, i)) { iter->type_info[iter->num_types].ctr_type = i; iter->type_info[iter->num_types++].offset = 0; } } } /* Mapping of logical record number to actual file content. */ #define RECORD_FILE_MAGIC 0 #define RECORD_GCOV_VERSION 1 #define RECORD_TIME_STAMP 2 #define RECORD_FUNCTION_TAG 3 #define RECORD_FUNCTON_TAG_LEN 4 #define RECORD_FUNCTION_IDENT 5 #define RECORD_FUNCTION_CHECK 6 #define RECORD_COUNT_TAG 7 #define RECORD_COUNT_LEN 8 #define RECORD_COUNT 9 /** * gcov_iter_next - advance file iterator to next logical record * @iter: file iterator * * Return zero if new position is valid, non-zero if iterator has reached end. */ int gcov_iter_next(struct gcov_iterator *iter) { switch (iter->record) { case RECORD_FILE_MAGIC: case RECORD_GCOV_VERSION: case RECORD_FUNCTION_TAG: case RECORD_FUNCTON_TAG_LEN: case RECORD_FUNCTION_IDENT: case RECORD_COUNT_TAG: /* Advance to next record */ iter->record++; break; case RECORD_COUNT: /* Advance to next count */ iter->count++; /* fall through */ case RECORD_COUNT_LEN: if (iter->count < get_func(iter)->n_ctrs[iter->type]) { iter->record = 9; break; } /* Advance to next counter type */ get_type(iter)->offset += iter->count; iter->count = 0; iter->type++; /* fall through */ case RECORD_FUNCTION_CHECK: if (iter->type < iter->num_types) { iter->record = 7; break; } /* Advance to next function */ iter->type = 0; iter->function++; /* fall through */ case RECORD_TIME_STAMP: if (iter->function < iter->info->n_functions) iter->record = 3; else iter->record = -1; break; } /* Check for EOF. */ if (iter->record == -1) return -EINVAL; else return 0; } /** * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file * @seq: seq_file handle * @v: value to be stored * * Number format defined by gcc: numbers are recorded in the 32 bit * unsigned binary form of the endianness of the machine generating the * file. */ static int seq_write_gcov_u32(struct seq_file *seq, u32 v) { return seq_write(seq, &v, sizeof(v)); } /** * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file * @seq: seq_file handle * @v: value to be stored * * Number format defined by gcc: numbers are recorded in the 32 bit * unsigned binary form of the endianness of the machine generating the * file. 64 bit numbers are stored as two 32 bit numbers, the low part * first. */ static int seq_write_gcov_u64(struct seq_file *seq, u64 v) { u32 data[2]; data[0] = (v & 0xffffffffUL); data[1] = (v >> 32); return seq_write(seq, data, sizeof(data)); } /** * gcov_iter_write - write data for current pos to seq_file * @iter: file iterator * @seq: seq_file handle * * Return zero on success, non-zero otherwise. */ int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) { int rc = -EINVAL; switch (iter->record) { case RECORD_FILE_MAGIC: rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC); break; case RECORD_GCOV_VERSION: rc = seq_write_gcov_u32(seq, iter->info->version); break; case RECORD_TIME_STAMP: rc = seq_write_gcov_u32(seq, iter->info->stamp); break; case RECORD_FUNCTION_TAG: rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION); break; case RECORD_FUNCTON_TAG_LEN: rc = seq_write_gcov_u32(seq, 2); break; case RECORD_FUNCTION_IDENT: rc = seq_write_gcov_u32(seq, get_func(iter)->ident); break; case RECORD_FUNCTION_CHECK: rc = seq_write_gcov_u32(seq, get_func(iter)->checksum); break; case RECORD_COUNT_TAG: rc = seq_write_gcov_u32(seq, GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); break; case RECORD_COUNT_LEN: rc = seq_write_gcov_u32(seq, get_func(iter)->n_ctrs[iter->type] * 2); break; case RECORD_COUNT: rc = seq_write_gcov_u64(seq, iter->info->counts[iter->type]. values[iter->count + get_type(iter)->offset]); break; } return rc; }
gpl-2.0
shinkumara/royss_shinkumara_kernel
block/partitions/acorn.c
13219
12563
/* * linux/fs/partitions/acorn.c * * Copyright (c) 1996-2000 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Scan ADFS partitions on hard disk drives. Unfortunately, there * isn't a standard for partitioning drives on Acorn machines, so * every single manufacturer of SCSI and IDE cards created their own * method. */ #include <linux/buffer_head.h> #include <linux/adfs_fs.h> #include "check.h" #include "acorn.h" /* * Partition types. (Oh for reusability) */ #define PARTITION_RISCIX_MFM 1 #define PARTITION_RISCIX_SCSI 2 #define PARTITION_LINUX 9 #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ defined(CONFIG_ACORN_PARTITION_ADFS) static struct adfs_discrecord * adfs_partition(struct parsed_partitions *state, char *name, char *data, unsigned long first_sector, int slot) { struct adfs_discrecord *dr; unsigned int nr_sects; if (adfs_checkbblk(data)) return NULL; dr = (struct adfs_discrecord *)(data + 0x1c0); if (dr->disc_size == 0 && dr->disc_size_high == 0) return NULL; nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) | (le32_to_cpu(dr->disc_size) >> 9); if (name) { strlcat(state->pp_buf, " [", PAGE_SIZE); strlcat(state->pp_buf, name, PAGE_SIZE); strlcat(state->pp_buf, "]", PAGE_SIZE); } put_partition(state, slot, first_sector, nr_sects); return dr; } #endif #ifdef CONFIG_ACORN_PARTITION_RISCIX struct riscix_part { __le32 start; __le32 length; __le32 one; char name[16]; }; struct riscix_record { __le32 magic; #define RISCIX_MAGIC cpu_to_le32(0x4a657320) __le32 date; struct riscix_part part[8]; }; #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ defined(CONFIG_ACORN_PARTITION_ADFS) static int riscix_partition(struct parsed_partitions *state, unsigned long first_sect, int slot, unsigned long nr_sects) { Sector sect; struct riscix_record *rr; rr = read_part_sector(state, first_sect, &sect); if (!rr) return -1; strlcat(state->pp_buf, " [RISCiX]", PAGE_SIZE); if (rr->magic == RISCIX_MAGIC) { unsigned long size = nr_sects > 2 ? 2 : nr_sects; int part; strlcat(state->pp_buf, " <", PAGE_SIZE); put_partition(state, slot++, first_sect, size); for (part = 0; part < 8; part++) { if (rr->part[part].one && memcmp(rr->part[part].name, "All\0", 4)) { put_partition(state, slot++, le32_to_cpu(rr->part[part].start), le32_to_cpu(rr->part[part].length)); strlcat(state->pp_buf, "(", PAGE_SIZE); strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE); strlcat(state->pp_buf, ")", PAGE_SIZE); } } strlcat(state->pp_buf, " >\n", PAGE_SIZE); } else { put_partition(state, slot++, first_sect, nr_sects); } put_dev_sector(sect); return slot; } #endif #endif #define LINUX_NATIVE_MAGIC 0xdeafa1de #define LINUX_SWAP_MAGIC 0xdeafab1e struct linux_part { __le32 magic; __le32 start_sect; __le32 nr_sects; }; #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ defined(CONFIG_ACORN_PARTITION_ADFS) static int linux_partition(struct parsed_partitions *state, unsigned long first_sect, int slot, unsigned long nr_sects) { Sector sect; struct linux_part *linuxp; unsigned long size = nr_sects > 2 ? 2 : nr_sects; strlcat(state->pp_buf, " [Linux]", PAGE_SIZE); put_partition(state, slot++, first_sect, size); linuxp = read_part_sector(state, first_sect, &sect); if (!linuxp) return -1; strlcat(state->pp_buf, " <", PAGE_SIZE); while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) || linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) { if (slot == state->limit) break; put_partition(state, slot++, first_sect + le32_to_cpu(linuxp->start_sect), le32_to_cpu(linuxp->nr_sects)); linuxp ++; } strlcat(state->pp_buf, " >", PAGE_SIZE); put_dev_sector(sect); return slot; } #endif #ifdef CONFIG_ACORN_PARTITION_CUMANA int adfspart_check_CUMANA(struct parsed_partitions *state) { unsigned long first_sector = 0; unsigned int start_blk = 0; Sector sect; unsigned char *data; char *name = "CUMANA/ADFS"; int first = 1; int slot = 1; /* * Try Cumana style partitions - sector 6 contains ADFS boot block * with pointer to next 'drive'. * * There are unknowns in this code - is the 'cylinder number' of the * next partition relative to the start of this one - I'm assuming * it is. * * Also, which ID did Cumana use? * * This is totally unfinished, and will require more work to get it * going. Hence it is totally untested. */ do { struct adfs_discrecord *dr; unsigned int nr_sects; data = read_part_sector(state, start_blk * 2 + 6, &sect); if (!data) return -1; if (slot == state->limit) break; dr = adfs_partition(state, name, data, first_sector, slot++); if (!dr) break; name = NULL; nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) * (dr->heads + (dr->lowsector & 0x40 ? 1 : 0)) * dr->secspertrack; if (!nr_sects) break; first = 0; first_sector += nr_sects; start_blk += nr_sects >> (BLOCK_SIZE_BITS - 9); nr_sects = 0; /* hmm - should be partition size */ switch (data[0x1fc] & 15) { case 0: /* No partition / ADFS? */ break; #ifdef CONFIG_ACORN_PARTITION_RISCIX case PARTITION_RISCIX_SCSI: /* RISCiX - we don't know how to find the next one. */ slot = riscix_partition(state, first_sector, slot, nr_sects); break; #endif case PARTITION_LINUX: slot = linux_partition(state, first_sector, slot, nr_sects); break; } put_dev_sector(sect); if (slot == -1) return -1; } while (1); put_dev_sector(sect); return first ? 0 : 1; } #endif #ifdef CONFIG_ACORN_PARTITION_ADFS /* * Purpose: allocate ADFS partitions. * * Params : hd - pointer to gendisk structure to store partition info. * dev - device number to access. * * Returns: -1 on error, 0 for no ADFS boot sector, 1 for ok. * * Alloc : hda = whole drive * hda1 = ADFS partition on first drive. * hda2 = non-ADFS partition. */ int adfspart_check_ADFS(struct parsed_partitions *state) { unsigned long start_sect, nr_sects, sectscyl, heads; Sector sect; unsigned char *data; struct adfs_discrecord *dr; unsigned char id; int slot = 1; data = read_part_sector(state, 6, &sect); if (!data) return -1; dr = adfs_partition(state, "ADFS", data, 0, slot++); if (!dr) { put_dev_sector(sect); return 0; } heads = dr->heads + ((dr->lowsector >> 6) & 1); sectscyl = dr->secspertrack * heads; start_sect = ((data[0x1fe] << 8) + data[0x1fd]) * sectscyl; id = data[0x1fc] & 15; put_dev_sector(sect); /* * Work out start of non-adfs partition. */ nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect; if (start_sect) { switch (id) { #ifdef CONFIG_ACORN_PARTITION_RISCIX case PARTITION_RISCIX_SCSI: case PARTITION_RISCIX_MFM: slot = riscix_partition(state, start_sect, slot, nr_sects); break; #endif case PARTITION_LINUX: slot = linux_partition(state, start_sect, slot, nr_sects); break; } } strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; } #endif #ifdef CONFIG_ACORN_PARTITION_ICS struct ics_part { __le32 start; __le32 size; }; static int adfspart_check_ICSLinux(struct parsed_partitions *state, unsigned long block) { Sector sect; unsigned char *data = read_part_sector(state, block, &sect); int result = 0; if (data) { if (memcmp(data, "LinuxPart", 9) == 0) result = 1; put_dev_sector(sect); } return result; } /* * Check for a valid ICS partition using the checksum. */ static inline int valid_ics_sector(const unsigned char *data) { unsigned long sum; int i; for (i = 0, sum = 0x50617274; i < 508; i++) sum += data[i]; sum -= le32_to_cpu(*(__le32 *)(&data[508])); return sum == 0; } /* * Purpose: allocate ICS partitions. * Params : hd - pointer to gendisk structure to store partition info. * dev - device number to access. * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok. * Alloc : hda = whole drive * hda1 = ADFS partition 0 on first drive. * hda2 = ADFS partition 1 on first drive. * ..etc.. */ int adfspart_check_ICS(struct parsed_partitions *state) { const unsigned char *data; const struct ics_part *p; int slot; Sector sect; /* * Try ICS style partitions - sector 0 contains partition info. */ data = read_part_sector(state, 0, &sect); if (!data) return -1; if (!valid_ics_sector(data)) { put_dev_sector(sect); return 0; } strlcat(state->pp_buf, " [ICS]", PAGE_SIZE); for (slot = 1, p = (const struct ics_part *)data; p->size; p++) { u32 start = le32_to_cpu(p->start); s32 size = le32_to_cpu(p->size); /* yes, it's signed. */ if (slot == state->limit) break; /* * Negative sizes tell the RISC OS ICS driver to ignore * this partition - in effect it says that this does not * contain an ADFS filesystem. */ if (size < 0) { size = -size; /* * Our own extension - We use the first sector * of the partition to identify what type this * partition is. We must not make this visible * to the filesystem. */ if (size > 1 && adfspart_check_ICSLinux(state, start)) { start += 1; size -= 1; } } if (size) put_partition(state, slot++, start, size); } put_dev_sector(sect); strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; } #endif #ifdef CONFIG_ACORN_PARTITION_POWERTEC struct ptec_part { __le32 unused1; __le32 unused2; __le32 start; __le32 size; __le32 unused5; char type[8]; }; static inline int valid_ptec_sector(const unsigned char *data) { unsigned char checksum = 0x2a; int i; /* * If it looks like a PC/BIOS partition, then it * probably isn't PowerTec. */ if (data[510] == 0x55 && data[511] == 0xaa) return 0; for (i = 0; i < 511; i++) checksum += data[i]; return checksum == data[511]; } /* * Purpose: allocate ICS partitions. * Params : hd - pointer to gendisk structure to store partition info. * dev - device number to access. * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok. * Alloc : hda = whole drive * hda1 = ADFS partition 0 on first drive. * hda2 = ADFS partition 1 on first drive. * ..etc.. */ int adfspart_check_POWERTEC(struct parsed_partitions *state) { Sector sect; const unsigned char *data; const struct ptec_part *p; int slot = 1; int i; data = read_part_sector(state, 0, &sect); if (!data) return -1; if (!valid_ptec_sector(data)) { put_dev_sector(sect); return 0; } strlcat(state->pp_buf, " [POWERTEC]", PAGE_SIZE); for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) { u32 start = le32_to_cpu(p->start); u32 size = le32_to_cpu(p->size); if (size) put_partition(state, slot++, start, size); } put_dev_sector(sect); strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; } #endif #ifdef CONFIG_ACORN_PARTITION_EESOX struct eesox_part { char magic[6]; char name[10]; __le32 start; __le32 unused6; __le32 unused7; __le32 unused8; }; /* * Guess who created this format? */ static const char eesox_name[] = { 'N', 'e', 'i', 'l', ' ', 'C', 'r', 'i', 't', 'c', 'h', 'e', 'l', 'l', ' ', ' ' }; /* * EESOX SCSI partition format. * * This is a goddamned awful partition format. We don't seem to store * the size of the partition in this table, only the start addresses. * * There are two possibilities where the size comes from: * 1. The individual ADFS boot block entries that are placed on the disk. * 2. The start address of the next entry. */ int adfspart_check_EESOX(struct parsed_partitions *state) { Sector sect; const unsigned char *data; unsigned char buffer[256]; struct eesox_part *p; sector_t start = 0; int i, slot = 1; data = read_part_sector(state, 7, &sect); if (!data) return -1; /* * "Decrypt" the partition table. God knows why... */ for (i = 0; i < 256; i++) buffer[i] = data[i] ^ eesox_name[i & 15]; put_dev_sector(sect); for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) { sector_t next; if (memcmp(p->magic, "Eesox", 6)) break; next = le32_to_cpu(p->start); if (i) put_partition(state, slot++, start, next - start); start = next; } if (i != 0) { sector_t size; size = get_capacity(state->bdev->bd_disk); put_partition(state, slot++, start, size - start); strlcat(state->pp_buf, "\n", PAGE_SIZE); } return i ? 1 : 0; } #endif
gpl-2.0
denggww123/IMX6_DB_Kernel_3.0.35
arch/parisc/lib/memset.c
14243
2442
/* Copyright (C) 1991, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ #include <linux/types.h> #include <asm/string.h> #define OPSIZ (BITS_PER_LONG/8) typedef unsigned long op_t; void * memset (void *dstpp, int sc, size_t len) { unsigned int c = sc; long int dstp = (long int) dstpp; if (len >= 8) { size_t xlen; op_t cccc; cccc = (unsigned char) c; cccc |= cccc << 8; cccc |= cccc << 16; if (OPSIZ > 4) /* Do the shift in two steps to avoid warning if long has 32 bits. */ cccc |= (cccc << 16) << 16; /* There are at least some bytes to set. No need to test for LEN == 0 in this alignment loop. */ while (dstp % OPSIZ != 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */ xlen = len / (OPSIZ * 8); while (xlen > 0) { ((op_t *) dstp)[0] = cccc; ((op_t *) dstp)[1] = cccc; ((op_t *) dstp)[2] = cccc; ((op_t *) dstp)[3] = cccc; ((op_t *) dstp)[4] = cccc; ((op_t *) dstp)[5] = cccc; ((op_t *) dstp)[6] = cccc; ((op_t *) dstp)[7] = cccc; dstp += 8 * OPSIZ; xlen -= 1; } len %= OPSIZ * 8; /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */ xlen = len / OPSIZ; while (xlen > 0) { ((op_t *) dstp)[0] = cccc; dstp += OPSIZ; xlen -= 1; } len %= OPSIZ; } /* Write the last few bytes. */ while (len > 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } return dstpp; }
gpl-2.0
garwynn/L900_3.8_Experiment
arch/parisc/lib/memset.c
14243
2442
/* Copyright (C) 1991, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ #include <linux/types.h> #include <asm/string.h> #define OPSIZ (BITS_PER_LONG/8) typedef unsigned long op_t; void * memset (void *dstpp, int sc, size_t len) { unsigned int c = sc; long int dstp = (long int) dstpp; if (len >= 8) { size_t xlen; op_t cccc; cccc = (unsigned char) c; cccc |= cccc << 8; cccc |= cccc << 16; if (OPSIZ > 4) /* Do the shift in two steps to avoid warning if long has 32 bits. */ cccc |= (cccc << 16) << 16; /* There are at least some bytes to set. No need to test for LEN == 0 in this alignment loop. */ while (dstp % OPSIZ != 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */ xlen = len / (OPSIZ * 8); while (xlen > 0) { ((op_t *) dstp)[0] = cccc; ((op_t *) dstp)[1] = cccc; ((op_t *) dstp)[2] = cccc; ((op_t *) dstp)[3] = cccc; ((op_t *) dstp)[4] = cccc; ((op_t *) dstp)[5] = cccc; ((op_t *) dstp)[6] = cccc; ((op_t *) dstp)[7] = cccc; dstp += 8 * OPSIZ; xlen -= 1; } len %= OPSIZ * 8; /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */ xlen = len / OPSIZ; while (xlen > 0) { ((op_t *) dstp)[0] = cccc; dstp += OPSIZ; xlen -= 1; } len %= OPSIZ; } /* Write the last few bytes. */ while (len > 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } return dstpp; }
gpl-2.0
VanirAOSP/kernel_lge_g3
arch/arm/mvp/mvpkm/mvpkm_main.c
164
72878
/* * Linux 2.6.32 and later Kernel module for VMware MVP Hypervisor Support * * Copyright (C) 2010-2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #line 5 /** * @file * * @brief The kernel level driver. */ #define __KERNEL_SYSCALLS__ #include <linux/version.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/fcntl.h> #include <linux/syscalls.h> #include <linux/kmod.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/capability.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/sysfs.h> #include <linux/debugfs.h> #include <linux/pid.h> #include <linux/highmem.h> #include <linux/syscalls.h> #include <linux/swap.h> #ifdef CONFIG_HAS_WAKELOCK #include <linux/wakelock.h> #endif #include <net/sock.h> #include <asm/cacheflush.h> #include <asm/memory.h> #include <asm/pgtable.h> #include <asm/system.h> #include <linux/uaccess.h> #include "mvp.h" #include "mvp_version.h" #include "mvpkm_types.h" #include "mvpkm_private.h" #include "mvpkm_kernel.h" #include "actions.h" #include "wscalls.h" #include "arm_inline.h" #include "tsc.h" #include "mksck_kernel.h" #include "mmu_types.h" #include "mvp_timer.h" #include "qp.h" #include "qp_host_kernel.h" #include "cpufreq_kernel.h" #include "mvpkm_comm_ev.h" #ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER #include "mvp_balloon.h" #endif /* * Definition of the file operations */ static _Bool LockedListAdd(struct MvpkmVM *vm, __u32 mpn, __u32 order, PhysMem_RegionType forRegion); static _Bool LockedListDel(struct MvpkmVM *vm, __u32 mpn); static void LockedListUnlockAll(struct MvpkmVM *vm); static _Bool LockedListLookup(struct MvpkmVM *vm, __u32 mpn); static int SetupMonitor(struct MvpkmVM *vm); static int RunMonitor(struct MvpkmVM *vm); static MPN AllocZeroedFreePages(struct MvpkmVM *vm, uint32 order, _Bool highmem, PhysMem_RegionType forRegion, HKVA *hkvaRet); static HKVA MapWSPHKVA(struct MvpkmVM *vm, HkvaMapInfo *mapInfo); static void UnmapWSPHKVA(struct MvpkmVM *vm); static int MvpkmWaitForInt(struct MvpkmVM *vm, _Bool suspend); static void ReleaseVM(struct MvpkmVM *vm); /* * Mksck open request must come from this uid. It must be root until * it is set via an ioctl from mvpd. */ uid_t Mvpkm_vmwareUid; EXPORT_SYMBOL(Mvpkm_vmwareUid); gid_t Mvpkm_vmwareGid; EXPORT_SYMBOL(Mvpkm_vmwareGid); /* * Mvpd should copy the content of /sys/module/lowmemorykiller/parameters/adj * here, as we don't have access to these numbers within the kernel itself. * Note: Android uses 6 values, and we rely on this. */ static int lowmemAdjSize; static int lowmemAdj[6]; /* * vCPU cpu affinity to let monitor/guest run on some CPUs only (when possible) */ static DECLARE_BITMAP(vcpuAffinity, NR_CPUS); /* * Which CPUs are running a monitor ? */ struct cpumask inMonitor; /********************************************************************* * * Sysfs nodes * *********************************************************************/ /* * kobject for our sysfs representation, used for global nodes. */ static struct kobject *mvpkmKObj; /* * kobject for the balloon exports. */ static struct kobject *balloonKObj; /** * @brief sysfs show function for global version attribute. * * @param kobj reference to kobj nested in MvpkmVM struct. * @param attr kobj_attribute reference, not used. * @param buf PAGE_SIZEd buffer to write to. * * @return number of characters printed (not including trailing null character). */ static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, MVP_VERSION_FORMATSTR "\n", MVP_VERSION_FORMATARGS); } static struct kobj_attribute versionAttr = __ATTR_RO(version); /** * @brief sysfs show function for global background_pages attribute. * * Used by vmx balloon policy controller to gauge the amount of freeable * anonymous memory. * * @param kobj reference to kobj nested in MvpkmVM struct. * @param attr kobj_attribute reference, not used. * @param buf PAGE_SIZEd buffer to write to. * * @return number of characters printed (not including trailing null character). */ static ssize_t background_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { #ifndef CONFIG_ANDROID_LOW_MEMORY_KILLER return snprintf(buf, PAGE_SIZE, "0\n"); #else /* The HIDDEN_APP_MIN_ADJ value is the 5th in a list of 6 parameters. */ FATAL_IF(lowmemAdjSize != 6); return snprintf(buf, PAGE_SIZE, "%d\n", Balloon_AndroidBackgroundPages(lowmemAdj[4])); #endif } static struct kobj_attribute backgroundAttr = __ATTR_RO(background); /** * @brief sysfs show function to export the other_file calculation in * lowmemorykiller. * * It's helpful, in the balloon controller, to know what the lowmemorykiller * module is using to know when the system has crossed a minfree threshold. * Since there exists a number of different other_file calculations in various * lowmemorykiller patches (@see{MVP-1674}), and the module itself doesn't * provide a clean export of this figure, we provide it on a case-by-case basis * for the various supported hosts here. * * @param kobj reference to kobj nested in MvpkmVM struct. * @param attr kobj_attribute reference, not used. * @param buf PAGE_SIZEd buffer to write to. * * @return number of characters printed (not including trailing null character). */ static ssize_t other_file_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int32 other_file = 0; #ifndef LOWMEMKILLER_VARIANT #define LOWMEMKILLER_VARIANT 0 #endif #ifndef LOWMEMKILLER_MD5 #define LOWMEMKILLER_MD5 0 #endif #ifndef LOWMEMKILLER_SHRINK_MD5 #define LOWMEMKILLER_SHRINK_MD5 0 #endif /* * The build system hashes the lowmemorykiller section related to the * other_file calculation in the kernel source for us, here we have to * provide the code. */ #if LOWMEMKILLER_VARIANT == 1 /* * This is the same as the non-exported global_reclaimable_pages() * when there is no swap. */ other_file = global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_FILE); #elif LOWMEMKILLER_VARIANT == 2 other_file = global_page_state(NR_FILE_PAGES); #elif LOWMEMKILLER_VARIANT == 3 other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); #elif LOWMEMKILLER_VARIANT == 4 /* * Here free/file pages are fungible and max(free, file) isn't used, * but we can continue to use max(free, file) since * max(free, file) = other_file in this case. */ other_file = global_page_state(NR_FREE_PAGES) + global_page_state(NR_FILE_PAGES); #elif LOWMEMKILLER_VARIANT == 5 /* * other_free and other_file are modified depending on zone index or/and * memory offlining and compared to "lowmem_minfree[i] - zone_adj". */ other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); #elif LOWMEMKILLER_VARIANT == 7 /* * other_file depends on total_swapcache_pages AND * other_free and other_file are modified depending on zone index or/and * memory offlining. */ if (global_page_state(NR_SHMEM) + total_swapcache_pages < global_page_state(NR_FILE_PAGES)) other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages; else other_file = 0; #elif defined(NONANDROID) /* * Non-Android host platforms don't have ballooning enabled. */ #else /* * If you get this message, you need to run 'make lowmem-info' and * inspect lowmemorykiller.c. If the "other_file = ..." calculation in * lowmem_shrink appears above, simply add the "Shrink#" to an existing * entry in lowmemkiller-variant.sh, pointing to the variant number * above. Otherwise, provide a new entry above and variant number, * with the appropriate other_file calculation and update * lowmemkiller-variant.sh accordingly. */ #warning "Unknown lowmemorykiller variant in hosted/module/mvpkm_main.c, " \ "falling back on default (see other_file_show for the remedy)" /* * Fall back on default - this may bias strangely for/against the host, * but nothing catastrophic should result. */ other_file = global_page_state(NR_FILE_PAGES); #endif #define _STRINGIFY(x) (#x) #define STRINGIFY(x) _STRINGIFY(x) return snprintf(buf, PAGE_SIZE, "%d %d %s %s\n", other_file, LOWMEMKILLER_VARIANT, STRINGIFY(LOWMEMKILLER_MD5), STRINGIFY(LOWMEMKILLER_SHRINK_MD5)); #undef _STRINGIFY #undef STRINGIFY } static struct kobj_attribute otherFileAttr = __ATTR_RO(other_file); /********************************************************************* * * Debugfs nodes * *********************************************************************/ static struct dentry *mvpDebugDentry; /** * @brief debugfs show function for global inMonitor * @param m seq_file reference * @param private ignored * @return 0 for success */ static int InMonitorShow(struct seq_file *m, void *private) { seq_bitmap_list(m, cpumask_bits(&inMonitor), nr_cpumask_bits); seq_puts(m, "\n"); return 0; } /** * @brief debugfs open function for global inMonitor * @param inode inode * @param file file * @return result of single_open */ static int InMonitorOpen(struct inode *inode, struct file *file) { return single_open(file, InMonitorShow, NULL); } static const struct file_operations inMonitorFops = { .open = InMonitorOpen, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * kset for our sysfs representation, used for per-VM nodes. */ static struct kset *mvpkmKSet; static ssize_t MvpkmAttrShow(struct kobject *kobj, struct attribute *attr, char *buf); static ssize_t MvpkmAttrStore(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count); static void MvpkmKObjRelease(struct kobject *kobj) __attribute__((optimize("-fomit-frame-pointer"))); /** * @brief Releases the vm structure containing the kobject. * * @param kobj the vm's kobject. */ static void MvpkmKObjRelease(struct kobject *kobj) { struct MvpkmVM *vm = container_of(kobj, struct MvpkmVM, kobj); ReleaseVM(vm); module_put(THIS_MODULE); } /** * @name mvpkm ktype attribute structures for locked_pages. * * @{ */ static const struct sysfs_ops mvpkmSysfsOps = { .show = MvpkmAttrShow, .store = MvpkmAttrStore }; static struct attribute mvpkmLockedPagesAttr = { .name = "locked_pages", .mode = 0444, }; static struct attribute mvpkmBalloonWatchdogAttr = { .name = "balloon_watchdog", .mode = 0444 }; static struct attribute mvpkmMonitorAttr = { .name = "monitor", .mode = 0400, }; static struct attribute *mvpkmDefaultAttrs[] = { &mvpkmLockedPagesAttr, &mvpkmBalloonWatchdogAttr, &mvpkmMonitorAttr, NULL, }; static struct kobj_type mvpkmKType = { .sysfs_ops = &mvpkmSysfsOps, .release = MvpkmKObjRelease, .default_attrs = mvpkmDefaultAttrs, }; /*@}*/ /* * As it is not very common for host kernels to have SYS_HYPERVISOR enabled and * you have to "hack" a Kconfig file to enable it, just include the * functionality inline if it is not enabled. */ #ifndef CONFIG_SYS_HYPERVISOR struct kobject *hypervisor_kobj; EXPORT_SYMBOL_GPL(hypervisor_kobj); #endif /* * kobject and kset utilities. */ extern struct kobject *kset_find_obj(struct kset *, const char *) __attribute__((weak)); /** * @brief Finds a kobject in a kset. The actual implementation is copied from * kernel source in lib/kobject.c. Although the symbol is extern-declared, * it is not EXPORT_SYMBOL-ed. We use a weak reference in case the symbol * might be exported in future kernel versions. * * @param kset set to search. * @param name object name. * * @return retained kobject if found, NULL otherwise. */ struct kobject * kset_find_obj(struct kset *kset, const char *name) { struct kobject *k; struct kobject *ret = NULL; spin_lock(&kset->list_lock); list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ret = kobject_get(k); break; } } spin_unlock(&kset->list_lock); return ret; } /** * @brief Finds one of the VM's pre-defined ksets. * * @param vmID a VM ID. * @param name name of one of the VM's pre-defined ksets. * * @return retained kset if found, NULL otherwise. */ struct kset * Mvpkm_FindVMNamedKSet(int vmID, const char *name) { struct MvpkmVM *vm; struct kobject *kobj; char vmName[32] = {}; /* Large enough for externally-formatted int32. */ struct kset *res = NULL; if (!mvpkmKSet) return NULL; snprintf(vmName, sizeof(vmName), "%d", vmID); /* Always null-terminate, no overflow. */ vmName[sizeof(vmName) - 1] = '\0'; kobj = kset_find_obj(mvpkmKSet, vmName); if (!kobj) return NULL; vm = container_of(kobj, struct MvpkmVM, kobj); if (!strcmp(name, "devices")) res = kset_get(vm->devicesKSet); else if (!strcmp(name, "misc")) res = kset_get(vm->miscKSet); kobject_put(kobj); return res; } EXPORT_SYMBOL(Mvpkm_FindVMNamedKSet); /********************************************************************* * * Standard Linux miscellaneous device registration * *********************************************************************/ MODULE_LICENSE("GPL"); /* for kallsyms_lookup_name */ static int MvpkmFault(struct vm_area_struct *vma, struct vm_fault *vmf); /** * @brief Linux vma operations for /dev/mem-like kernel module mmap. We * enforce the restriction that only MPNs that have been allocated * to the opened VM may be mapped and also increment the reference * count (via vm_insert_page), so that even if the memory is later * freed by the VM, host process vma's containing the MPN can't * compromise the system. * * However, only trusted host processes (e.g. the vmx) should be allowed * to use this interface, since you can mmap the monitor's code/data/ * page tables etc. with it. Untrusted host processes are limited to * typed messages for sharing memory with the monitor. Unix file system * access permissions are the intended method of restricting access. * Unfortunately, today _any_ host process utilizing Mksck requires * access to mvpkm to setup its Mksck pages and obtain socket info via * ioctls - we probably should be exporting two devices, one for trusted * and one for arbitrary host processes to avoid this confusion of * concerns. */ static struct vm_operations_struct mvpkmVMOps = { .fault = MvpkmFault }; /* * Generic kernel module file ops. These functions will be registered * at the time the kernel module is loaded. */ static long MvpkmUnlockedIoctl(struct file *filep, unsigned int cmd, unsigned long arg); static int MvpkmOpen(struct inode *inode, struct file *filp); static int MvpkmRelease(struct inode *inode, struct file *filp); static int MvpkmMMap(struct file *filp, struct vm_area_struct *vma); /** * @brief the file_operation structure contains the callback functions * that are registered with Linux to handle file operations on * the mvpkm device. * * The structure contains other members that the mvpkm device * does not use. Those members are auto-initialized to NULL. * * WARNING, this structure has changed after Linux kernel 2.6.19: * readv/writev are changed to aio_read/aio_write (neither is used here). */ static const struct file_operations mvpkmFileOps = { .owner = THIS_MODULE, .unlocked_ioctl = MvpkmUnlockedIoctl, .open = MvpkmOpen, .release = MvpkmRelease, .mmap = MvpkmMMap }; /** * @brief The mvpkm device identifying information to be used to register * the device with the Linux kernel. */ static struct miscdevice mvpkmDev = { .minor = 165, .name = "mvpkm", .fops = &mvpkmFileOps }; /** * Mvpkm is loaded by mvpd and only mvpd will be allowed to open * it. There is a very simple way to verify that: record the process * id (thread group id) at the time the module is loaded and test it * at the time the module is opened. */ static struct pid *initTgid; #ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER /** * @name Slab shrinker for triggering balloon adjustment. * * @note shrinker us used as a trigger for guest balloon. * * @{ */ static int MvpkmShrink(struct shrinker *this, struct shrink_control *sc); static struct shrinker mvpkmShrinker = { .shrink = MvpkmShrink, .seeks = DEFAULT_SEEKS }; /*@}*/ #endif module_param_array(vcpuAffinity, ulong, NULL, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(vcpuAffinity, "vCPU affinity"); /** * @brief Initialize the mvpkm device, register it with the Linux kernel. * * @return A zero is returned on success and a negative errno code for failure. * (Same as the return policy of misc_register(9).) */ static int __init MvpkmInit(void) { int err = 0; _Bool mksckInited = false; _Bool cpuFreqInited = false; pr_info("Mvpkm: " MVP_VERSION_FORMATSTR "\n", MVP_VERSION_FORMATARGS); pr_info("Mvpkm: started from process %s tgid=%d, pid=%d\n", current->comm, task_tgid_vnr(current), task_pid_vnr(current)); if (bitmap_empty(vcpuAffinity, nr_cpumask_bits)) bitmap_copy(vcpuAffinity, cpumask_bits(cpu_possible_mask), nr_cpumask_bits); err = misc_register(&mvpkmDev); if (err) return -ENOENT; err = Mksck_Init(); if (err) goto error; else mksckInited = true; mksckInited = true; QP_HostInit(); CpuFreq_Init(); cpuFreqInited = true; /* * Reference mvpd (module loader) tgid struct, so that we can avoid * attacks based on pid number wraparound. */ initTgid = get_pid(task_tgid(current)); #ifndef CONFIG_SYS_HYPERVISOR hypervisor_kobj = kobject_create_and_add("hypervisor", NULL); if (!hypervisor_kobj) { err = -ENOMEM; goto error; } #endif mvpkmKObj = kobject_create_and_add("mvp", hypervisor_kobj); if (!mvpkmKObj) { err = -ENOMEM; goto error; } balloonKObj = kobject_create_and_add("lowmem", mvpkmKObj); if (!balloonKObj) { err = -ENOMEM; goto error; } mvpkmKSet = kset_create_and_add("vm", NULL, mvpkmKObj); if (!mvpkmKSet) { err = -ENOMEM; goto error; } err = sysfs_create_file(mvpkmKObj, &versionAttr.attr); if (err) goto error; err = sysfs_create_file(balloonKObj, &backgroundAttr.attr); if (err) goto error; err = sysfs_create_file(balloonKObj, &otherFileAttr.attr); if (err) goto error; #ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER register_shrinker(&mvpkmShrinker); #endif /* Create /sys/kernel/debug/mvp for debufs nodes */ mvpDebugDentry = debugfs_create_dir("mvp", NULL); if (mvpDebugDentry) { debugfs_create_file("inMonitor", S_IRUGO, mvpDebugDentry, NULL, &inMonitorFops); MksckPageInfo_Init(mvpDebugDentry); } return 0; error: if (mvpkmKSet) kset_unregister(mvpkmKSet); if (balloonKObj) { kobject_del(balloonKObj); kobject_put(balloonKObj); } if (mvpkmKObj) { kobject_del(mvpkmKObj); kobject_put(mvpkmKObj); } #ifndef CONFIG_SYS_HYPERVISOR if (hypervisor_kobj) { kobject_del(hypervisor_kobj); kobject_put(hypervisor_kobj); } #endif if (cpuFreqInited) CpuFreq_Exit(); if (mksckInited) Mksck_Exit(); if (initTgid) put_pid(initTgid); misc_deregister(&mvpkmDev); return err; } /** * @brief De-register the mvpkm device with the Linux kernel. */ void MvpkmExit(void) { PRINTK("MvpkmExit called !\n"); if (mvpDebugDentry) debugfs_remove_recursive(mvpDebugDentry); #ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER unregister_shrinker(&mvpkmShrinker); #endif kset_unregister(mvpkmKSet); kobject_del(balloonKObj); kobject_put(balloonKObj); kobject_del(mvpkmKObj); kobject_put(mvpkmKObj); #ifndef CONFIG_SYS_HYPERVISOR kobject_del(hypervisor_kobj); kobject_put(hypervisor_kobj); #endif CpuFreq_Exit(); Mksck_Exit(); put_pid(initTgid); misc_deregister(&mvpkmDev); } /* * The standard module registration macros of Linux. */ module_init(MvpkmInit); module_exit(MvpkmExit); module_param_array_named(lowmemAdj, lowmemAdj, int, &lowmemAdjSize, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(lowmemAdj, "copy of /sys/module/lowmemorykiller/parameters/adj"); #ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER /** * @brief Balloon watchdog timeout callback. * * Terminate the VM since it's not responsive. * * @param data vm reference representation. */ static void WatchdogCB(unsigned long data) { struct MvpkmVM *vm = (struct MvpkmVM *)data; pr_err("Balloon watchdog expired (%d s)!\n", BALLOON_WATCHDOG_TIMEOUT_SECS); vm->watchdogTriggered = true; Mvpkm_WakeGuest(vm, ACTION_ABORT); } /** * @brief Slab shrinker. * * Called by Linux kernel when we're under memory pressure. We treat all locked * pages as a slab for this purpose, similar to the Android low memory killer. * * @param this reference to registered shrinker for callback context. * @param nrToScan number of entries to scan. If 0 then just return the number * of present entries. We ignore the value of nrToScan when > 1 * since the shrinker is a trigger to readjust guest balloons, * where the actual balloon size is determined in conjunction * with the guest. * @param gfpMask ignored. * * @return number of locked pages. */ static int MvpkmShrink(struct shrinker *this, struct shrink_control *sc) { uint32 locked = 0; struct kobject *k; int nrToScan = sc->nr_to_scan; spin_lock(&mvpkmKSet->list_lock); list_for_each_entry(k, &mvpkmKSet->list, entry) { struct MvpkmVM *vm = container_of(k, struct MvpkmVM, kobj); locked += ATOMIC_GETO(vm->usedPages); /* * Try and grab the WSP semaphore - if we fail, we must be * VM setup or teardown, no point trying to wake the guest. */ if (nrToScan > 0 && down_read_trylock(&vm->wspSem)) { if (vm->wsp) { /* * Balloon watchdog. * We start the timer before waking up the * guest to avoid races in case of immediate * descheduling. */ if (vm->balloonWDEnabled) { struct timer_list *t = &vm->balloonWDTimer; if (!timer_pending(t)) { t->data = (unsigned long)vm; t->function = WatchdogCB; t->expires = jiffies + BALLOON_WATCHDOG_TIMEOUT_SECS * HZ; add_timer(t); } } Mvpkm_WakeGuest(vm, ACTION_BALLOON); } up_read(&vm->wspSem); } } spin_unlock(&mvpkmKSet->list_lock); return locked; } #endif /** * @brief The open file operation. Initializes the vm specific structure. */ int MvpkmOpen(struct inode *inode, struct file *filp) { struct MvpkmVM *vm; if (initTgid != task_tgid(current)) { pr_err("%s: MVPKM can be opened only from MVPD (process %d).\n", __func__, pid_vnr(initTgid)); return -EPERM; } pr_debug("%s: Allocating an MvpkmVM structure from process %s tgid=%d, pid=%d\n", __func__, current->comm, task_tgid_vnr(current), task_pid_vnr(current)); vm = kmalloc(sizeof(struct MvpkmVM), GFP_KERNEL); if (!vm) return -ENOMEM; memset(vm, 0, sizeof(*vm)); init_timer(&vm->balloonWDTimer); init_rwsem(&vm->lockedSem); init_rwsem(&vm->wspSem); init_rwsem(&vm->monThreadTaskSem); vm->monThreadTask = NULL; vm->isMonitorInited = false; filp->private_data = vm; if (!Mvpkm_vmwareUid) current_uid_gid(&Mvpkm_vmwareUid, &Mvpkm_vmwareGid); return 0; } /** * @brief Releases a VMs resources * @param vm vm to release */ static void ReleaseVM(struct MvpkmVM *vm) { /* * Delete balloon watchdog timer. We are already out of VM kset, * so there is no race with shrink callback. */ del_timer_sync(&vm->balloonWDTimer); down_write(&vm->wspSem); if (vm->isMonitorInited) { MonitorTimer_Request(&vm->monTimer, 0); Mksck_WspRelease(vm->wsp); vm->wsp = NULL; #ifdef CONFIG_HAS_WAKELOCK /* * Destroy wakelock after WSP is released (and MksckPage * detached). */ wake_lock_destroy(&vm->wakeLock); #endif } up_write(&vm->wspSem); LockedListUnlockAll(vm); UnmapWSPHKVA(vm); /* * All sockets potentially connected to sockets of this vm's vmId * will fail at send now. DGRAM sockets are not required to tear * down connection explicitly. */ kfree(vm); } /** * @brief The release file operation. Releases the vm specific * structure including all the locked pages. * * @param inode Unused * @param filp which VM we're dealing with * @return 0 */ int MvpkmRelease(struct inode *inode, struct file *filp) { struct MvpkmVM *vm = filp->private_data; /* * Tear down any queue pairs associated with this VM */ if (vm->isMonitorInited) { ASSERT(vm->wsp); QP_DetachAll(vm->wsp->guestId); } /* * Release the VM's ksets. */ kset_unregister(vm->miscKSet); kset_unregister(vm->devicesKSet); if (vm->haveKObj) { /* * Release the VM's kobject. * 'vm' will be kfree-d in its kobject's release function. */ kobject_del(&vm->kobj); kobject_put(&vm->kobj); } else { ReleaseVM(vm); } filp->private_data = NULL; pr_info("%s: Released MvpkmVM structure from process %s tgid=%d, pid=%d\n", __func__, current->comm, task_tgid_vnr(current), task_pid_vnr(current)); return 0; } /** * @brief Page fault handler for /dev/mem-like regions (see mvpkmVMOps * block comment). */ static int MvpkmFault(struct vm_area_struct *vma, struct vm_fault *vmf) { unsigned long address = (unsigned long)vmf->virtual_address; MPN mpn = vmf->pgoff; struct MvpkmVM *vm = vma->vm_file->private_data; /* * Only insert pages belonging to the VM. The check is slow, O(n) in the * number of MPNs associated with the VM, but it doesn't matter - the * mmap interface should only be used by trusted processes at * initialization time and for debugging. * * The mpn can be either in the memory reserved the monitor or mvpd * through the regular mechanisms or it could be a mksck page. */ if (!pfn_valid(mpn)) { pr_err("MvpkmMMap: Failed to insert %x @ %lx, mpn invalid\n", mpn, address); } else if (LockedListLookup(vm, mpn)) { if (vm_insert_page(vma, address, pfn_to_page(mpn)) == 0) return VM_FAULT_NOPAGE; pr_err("MvpkmMMap: Failed to insert %x @ %lx\n", mpn, address); } else if (MksckPage_LookupAndInsertPage(vma, address, mpn) == 0) { return VM_FAULT_NOPAGE; } if (vm->stubPageMPN) { if (vm_insert_page(vma, address, pfn_to_page(vm->stubPageMPN)) == 0) { pr_info("MvpkmMMap: mapped the stub page at %x @ %lx\n", mpn, address); return VM_FAULT_NOPAGE; } pr_err("MvpkmMMap: Could not insert stub page %x @ %lx\n", mpn, address); } return VM_FAULT_SIGBUS; } /** * @brief sysfs show function for per-VM locked_pages attribute. * * @param kobj reference to kobj nested in MvpkmVM struct. * @param attr attribute reference. * @param buf PAGE_SIZEd buffer to write to. * * @return number of characters printed (not including trailing null character). */ static ssize_t MvpkmAttrShow(struct kobject *kobj, struct attribute *attr, char *buf) { if (attr == &mvpkmLockedPagesAttr) { struct MvpkmVM *vm = container_of(kobj, struct MvpkmVM, kobj); return snprintf(buf, PAGE_SIZE, "%d\n", ATOMIC_GETO(vm->usedPages)); } else if (attr == &mvpkmMonitorAttr) { struct MvpkmVM *vm = container_of(kobj, struct MvpkmVM, kobj); return snprintf(buf, PAGE_SIZE, "hostActions %x callno %d\n", ATOMIC_GETO(vm->wsp->hostActions), WSP_Params(vm->wsp)->callno); } else if (attr == &mvpkmBalloonWatchdogAttr) { struct MvpkmVM *vm = container_of(kobj, struct MvpkmVM, kobj); /* * Enable balloon watchdog on first read. This includes all * ballooning capable guest. */ vm->balloonWDEnabled = true; del_timer_sync(&vm->balloonWDTimer); buf[0] = 1; return 1; } else { return -EPERM; } } /** * @brief sysfs store function for per-VM locked_pages attribute. * * @param kobj reference to kobj nested in MvpkmVM struct. * @param attr attribute reference. * @param buf PAGE_SIZEd buffer to write to. * @param buf input buffer. * @param count input buffer length. * * @return number of bytes consumed or negative error code. */ static ssize_t MvpkmAttrStore(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { return -EPERM; } /** * @brief Map machine address space region into host process. * * @param filp file reference (ignored). * @param vma Linux virtual memory area defining the region. * * @return 0 on success, otherwise error code. */ static int MvpkmMMap(struct file *filp, struct vm_area_struct *vma) { vma->vm_ops = &mvpkmVMOps; return 0; } #ifdef CONFIG_ARM_LPAE /** * @brief Determine host cacheability/shareability attributes. * * Used to ensure monitor/guest shared mappings are consistent with * those of host user/kernel. * * @param[out] attribMAN when setting up the HW monitor this provides the * attributes in the generic ARM_MemAttrNormal form, * suitable for configuring the monitor and guest's * [H]MAIR0 and setting the shareability attributes of * the LPAE descriptors. */ static void DetermineMemAttrLPAE(ARM_MemAttrNormal *attribMAN) { /* * We use set_pte_ext to sample what {S,TEX,CB} bits Linux is using for * normal kernel/user L2D mappings. These bits should be consistent both * with each other and what we use in the monitor since we share various * pages with both host processes, the kernel module and monitor, and * the ARM ARM requires that synonyms have the same cacheability * attributes, see end of A3.5.{4,7} ARM DDI 0406A. */ HKVA hkva = __get_free_pages(GFP_KERNEL, 0); ARM_LPAE_L3D *pt = (ARM_LPAE_L3D *)hkva; ARM_LPAE_L3D *kernL3D = &pt[0], *userL3D = &pt[1]; uint32 attr, mair0, mair1; set_pte_ext((pte_t *)kernL3D, pfn_pte(0, PAGE_KERNEL), 0); set_pte_ext((pte_t *)userL3D, pfn_pte(0, PAGE_NONE), 0); pr_info("DetermineMemAttr: Kernel L3D AttrIndx=%x SH=%x\n", kernL3D->blockS1.attrIndx, kernL3D->blockS1.sh); pr_info("DetermineMemAttr: User L3D AttrIndx=%x SH=%x\n", userL3D->blockS1.attrIndx, userL3D->blockS1.sh); ASSERT(kernL3D->blockS1.attrIndx == userL3D->blockS1.attrIndx); ASSERT(kernL3D->blockS1.sh == userL3D->blockS1.sh); switch (kernL3D->blockS1.sh) { case 0: attribMAN->share = ARM_SHARE_ATTR_NONE; break; case 2: attribMAN->share = ARM_SHARE_ATTR_OUTER; break; case 3: attribMAN->share = ARM_SHARE_ATTR_INNER; break; default: FATAL(); } ARM_MRC_CP15(MAIR0, mair0); ARM_MRC_CP15(MAIR1, mair1); attr = MVP_EXTRACT_FIELD(kernL3D->blockS1.attrIndx >= 4 ? mair1 : mair0, 8 * (kernL3D->blockS1.attrIndx % 4), 8); /* * See B4-1615 ARM DDI 0406C-2c for magic. */ #define MAIR_ATTR_2_CACHE_ATTR(x, y) \ do { \ switch (x) { \ case 2: \ (y) = ARM_CACHE_ATTR_NORMAL_WT; \ break; \ case 3: \ (y) = ARM_CACHE_ATTR_NORMAL_WB; \ break; \ default: \ FATAL(); \ } \ } while (0) MAIR_ATTR_2_CACHE_ATTR(MVP_EXTRACT_FIELD(attr, 2, 2), attribMAN->innerCache); MAIR_ATTR_2_CACHE_ATTR(MVP_EXTRACT_FIELD(attr, 6, 2), attribMAN->outerCache); #undef MAIR_ATTR_2_CACHE_ATTR pr_info("DetermineMemAttr: innerCache %x outerCache %x share %x\n", attribMAN->innerCache, attribMAN->outerCache, attribMAN->share); free_pages(hkva, 0); } #else /** * @brief Determine host cacheability/shareability attributes. * * Used to ensure monitor/guest shared mappings are consistent with * those of host user/kernel. * * @param[out] attribL2D when setting up the LPV monitor a template L2D * containing cacheability attributes {S, TEX,CB} used by * host kernel for normal memory mappings. These may be * used directly for monitor/guest mappings, since both * worlds share a common {TRE, PRRR, NMRR}. * @param[out] attribMAN when setting up TTBR0 in the LPV monitor and the page * tables for the HW monitor this provides the attributes * in the generic ARM_MemAttrNormal form, suitable for * configuring TTBR0 + the monitor and guest's [H]MAIR0 * and setting the shareability attributes of the LPAE * descriptors. */ static void DetermineMemAttrNonLPAE(ARM_L2D *attribL2D, ARM_MemAttrNormal *attribMAN) { /* * We use set_pte_ext to sample what {S,TEX,CB} bits Linux is using for * normal kernel/user L2D mappings. These bits should be consistent both * with each other and what we use in the monitor since we share various * pages with both host processes, the kernel module and monitor, and * the ARM ARM requires that synonyms have the same cacheability * attributes, see end of A3.5.{4,7} ARM DDI 0406A. */ HKVA hkva = __get_free_pages(GFP_KERNEL, 0); uint32 sctlr; ARM_L2D *pt = (ARM_L2D *)hkva; ARM_L2D *kernL2D = &pt[0], *userL2D = &pt[1]; /* * Linux 2.6.38 switched the order of Linux vs hardware page tables. * See mainline d30e45eeabefadc6039d7f876a59e5f5f6cb11c6. */ const uint32 set_pte_ext_offset = 0; set_pte_ext((pte_t *)(kernL2D + set_pte_ext_offset/sizeof(ARM_L2D)), pfn_pte(0, PAGE_KERNEL), 0); set_pte_ext((pte_t *)(userL2D + set_pte_ext_offset/sizeof(ARM_L2D)), pfn_pte(0, PAGE_NONE), 0); /* * Linux 2.6.38 switched the order of Linux vs hardware page tables. * See mainline d30e45eeabefadc6039d7f876a59e5f5f6cb11c6. */ kernL2D += 2048/sizeof(ARM_L2D); userL2D += 2048/sizeof(ARM_L2D); pr_info("DetermineMemAttr: Kernel L2D TEX=%x CB=%x S=%x\n", kernL2D->small.tex, kernL2D->small.cb, kernL2D->small.s); pr_info("DetermineMemAttr: User L2D TEX=%x CB=%x S=%x\n", userL2D->small.tex, userL2D->small.cb, userL2D->small.s); ASSERT((kernL2D->small.tex & 1) == (userL2D->small.tex & 1)); ASSERT(kernL2D->small.cb == userL2D->small.cb); ASSERT(kernL2D->small.s == userL2D->small.s); *attribL2D = *kernL2D; /* * We now decode TEX remap and obtain the more generic form for use in * the LPV monitor's TTBR0 initialization and the HW monitor. */ ARM_MRC_CP15(CONTROL_REGISTER, sctlr); if (sctlr & ARM_CP15_CNTL_TRE) { uint32 prrr, nmrr, indx, type; uint32 innerCache, outerCache, outerShare, share; pr_info("DetermineMemAttr: TEX remapping enabled\n"); ARM_MRC_CP15(PRIMARY_REGION_REMAP, prrr); ARM_MRC_CP15(NORMAL_MEMORY_REMAP, nmrr); pr_info("DetermineMemAttr: PRRR=%x NMRR=%x\n", prrr, nmrr); /* * Decode PRRR/NMRR below. See B3.7 ARM DDI 0406B for register * encodings, tables and magic numbers. */ indx = (MVP_BIT(kernL2D->small.tex, 0) << 2) | kernL2D->small.cb; /* * Only normal memory makes sense here. */ type = MVP_EXTRACT_FIELD(prrr, 2 * indx, 2); ASSERT(type == 2); innerCache = MVP_EXTRACT_FIELD(nmrr, 2 * indx, 2); outerCache = MVP_EXTRACT_FIELD(nmrr, 16 + 2 * indx, 2); outerShare = !MVP_BIT(prrr, 24 + indx); share = MVP_BIT(prrr, 18 + kernL2D->small.s); pr_info("DetermineMemAttr: type %x innerCache %x outerCache %x"\ " share %x outerShare %x\n", type, innerCache, outerCache, share, outerShare); if (share) { if (outerShare) attribMAN->share = ARM_SHARE_ATTR_OUTER; else attribMAN->share = ARM_SHARE_ATTR_INNER; } else { attribMAN->share = ARM_SHARE_ATTR_NONE; } attribMAN->innerCache = innerCache; attribMAN->outerCache = outerCache; } else { NOT_IMPLEMENTED_JIRA(1849); } free_pages(hkva, 0); } #endif /** * @brief The ioctl file operation. * * The ioctl command is the main communication method between the * vmx and the mvpkm kernel module. * * @param filp which VM we're dealing with * @param cmd select which cmd function needs to be performed * @param arg argument for command * @return error code, 0 on success */ long MvpkmUnlockedIoctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct MvpkmVM *vm = filp->private_data; int retval = 0; switch (cmd) { case MVPKM_DISABLE_FAULT: if (!vm->stubPageMPN) { uint32 *ptr; vm->stubPageMPN = AllocZeroedFreePages(vm, 0, false, MEMREGION_MAINMEM, (HKVA *)&ptr); if (!vm->stubPageMPN) break; ptr[0] = MVPKM_STUBPAGE_BEG; ptr[PAGE_SIZE/sizeof(uint32) - 1] = MVPKM_STUBPAGE_END; } break; /* * Allocate some pinned pages from kernel. * Returns -ENOMEM if no host pages available for allocation. */ case MVPKM_LOCK_MPN: { struct MvpkmLockMPN buf; if (copy_from_user(&buf, (void *)arg, sizeof(buf))) return -EFAULT; buf.mpn = AllocZeroedFreePages(vm, buf.order, false, buf.forRegion, NULL); if (buf.mpn == 0) return -ENOMEM; if (copy_to_user((void *)arg, &buf, sizeof(buf))) return -EFAULT; break; } case MVPKM_UNLOCK_MPN: { struct MvpkmLockMPN buf; if (copy_from_user(&buf, (void *)arg, sizeof(buf))) return -EFAULT; if (!LockedListDel(vm, buf.mpn)) return -EINVAL; break; } case MVPKM_MAP_WSPHKVA: { MvpkmMapHKVA mvpkmMapInfo; HkvaMapInfo mapInfo[WSP_PAGE_COUNT]; if (copy_from_user(&mvpkmMapInfo, (void *)arg, sizeof(mvpkmMapInfo))) return -EFAULT; if (copy_from_user(mapInfo, (void *)mvpkmMapInfo.mapInfo, sizeof(mapInfo))) return -EFAULT; mvpkmMapInfo.hkva = MapWSPHKVA(vm, mapInfo); BUG_ON(mvpkmMapInfo.hkva == 0); if (mvpkmMapInfo.forRegion == MEMREGION_WSP) vm->wsp = (WorldSwitchPage *) mvpkmMapInfo.hkva; if (copy_to_user((void *)arg, &mvpkmMapInfo, sizeof(mvpkmMapInfo))) return -EFAULT; break; } case MVPKM_RUN_MONITOR: if (!vm->isMonitorInited) vm->isMonitorInited = ((retval = SetupMonitor(vm)) == 0); if (vm->isMonitorInited) retval = RunMonitor(vm); break; case MVPKM_ABORT_MONITOR: if (!vm->isMonitorInited) return -EINVAL; ASSERT(vm->wsp != NULL); pr_err("MvpkmIoctl: Aborting monitor.\n"); Mvpkm_WakeGuest(vm, ACTION_ABORT); break; case MVPKM_CPU_INFO: { struct MvpkmCpuInfo buf; uint32 mpidr; #ifdef CONFIG_ARM_LPAE DetermineMemAttrLPAE(&buf.attribMAN); /** * We need to add support to the LPV monitor for LPAE page * tables if we want to use it on a LPAE host, due to the * costs involved in transitioning between LPAE and non-LPAE * page tables without Hyp assistance. * * @knownjira{MVP-2184} */ buf.attribL2D.u = 0; #else DetermineMemAttrNonLPAE(&buf.attribL2D, &buf.attribMAN); #endif /* * Are MP extensions implemented? * See B4-1618 ARM DDI 0406C-2c for magic. */ ARM_MRC_CP15(MPIDR, mpidr); buf.mpExt = mpidr & ARM_CP15_MPIDR_MP; if (copy_to_user((int *)arg, &buf, sizeof(struct MvpkmCpuInfo))) retval = -EFAULT; break; } default: retval = -EINVAL; break; } PRINTK("Returning from IOCTL(%d) retval = %d %s\n", cmd, retval, signal_pending(current) ? "(pending signal)" : ""); return retval; } /********************************************************************* * * Locked page management * *********************************************************************/ /* * Pages locked by the kernel module are remembered so an unlockAll * operation can be performed when the vmm is closed. The locked page * identifiers are stored in a red-black tree to support O(log n) * removal and search (required for /dev/mem-like mmap). */ /** * @brief Descriptor of a locked page range */ struct LockedPage { struct { __u32 mpn:20; /**< MPN. */ __u32 order:6; /**< Size/alignment exponent for page. */ __u32 forRegion:6; /**< Annotate/identify guest page alloc. */ } page; struct rb_node rb; }; static void FreeLockedPages(struct LockedPage *lp); /** * @brief Search for an mpn inside a RB tree of LockedPages. The mpn * will match a LockedPage as long as it is covered by the * entry, i.e. in a non-zero order entry it doesn't have to be * the base MPN. * * This must be called with the relevant vm->lockedSem held. * * @param root RB tree root. * @param mpn MPN to search for. * * @return reference to LockedPage entry if found, otherwise NULL. */ static struct LockedPage * LockedListSearch(struct rb_root *root, __u32 mpn) { struct rb_node *n = root->rb_node; while (n) { struct LockedPage *lp = rb_entry(n, struct LockedPage, rb); if (lp->page.mpn == (mpn & (~0UL << lp->page.order))) return lp; if (mpn < lp->page.mpn) n = n->rb_left; else n = n->rb_right; } return NULL; } /** * @brief Delete an mpn from the list of locked pages. * * @param vm Mvpkm module control structure pointer * @param mpn MPN to be unlocked and freed for reuse * @return true if list contained MPN and it was deleted from list */ static _Bool LockedListDel(struct MvpkmVM *vm, __u32 mpn) { struct LockedPage *lp; down_write(&vm->lockedSem); lp = LockedListSearch(&vm->lockedRoot, mpn); /* * The MPN should be in the locked pages RB tree and it should be the * base of an entry, i.e. we can't fragment existing allocations for * a VM. */ if (lp == NULL || lp->page.mpn != mpn) { up_write(&vm->lockedSem); return false; } FreeLockedPages(lp); if (lp->page.forRegion == MEMREGION_MAINMEM) ATOMIC_SUBV(vm->usedPages, 1U << lp->page.order); rb_erase(&lp->rb, &vm->lockedRoot); kfree(lp); up_write(&vm->lockedSem); return true; } /** * @brief Scan the list of locked pages to see if an MPN matches. * * @param vm Mvpkm module control structure pointer * @param mpn MPN to check * * @return true iff list contains MPN. */ static _Bool LockedListLookup(struct MvpkmVM *vm, __u32 mpn) { struct LockedPage *lp; down_read(&vm->lockedSem); lp = LockedListSearch(&vm->lockedRoot, mpn); up_read(&vm->lockedSem); return lp != NULL; } /** * @brief Add a new mpn to the locked pages RB tree. * * @param vm control structure pointer * * @param mpn mpn of page that was locked with get_user_pages or some sort of * get that is undone by put_page. * The mpn is assumed to be non-zero * @param order size/alignment exponent for page * @param forRegion Annotation for Page pool to identify guest page allocations * * @return false: couldn't allocate internal memory to record mpn in<br> * true: successful. */ static _Bool LockedListAdd(struct MvpkmVM *vm, __u32 mpn, __u32 order, PhysMem_RegionType forRegion) { struct rb_node *parent, **p; struct LockedPage *tp, *lp = kmalloc(sizeof(*lp), GFP_KERNEL); if (!lp) return false; lp->page.mpn = mpn; lp->page.order = order; lp->page.forRegion = forRegion; down_write(&vm->lockedSem); if (forRegion == MEMREGION_MAINMEM) ATOMIC_ADDV(vm->usedPages, 1U << order); /* * Insert as a red leaf in the tree (see include/linux/rbtree.h). */ p = &vm->lockedRoot.rb_node; parent = NULL; while (*p) { parent = *p; tp = rb_entry(parent, struct LockedPage, rb); /* * MPN should not already exist in the tree. */ ASSERT(tp->page.mpn != (mpn & (~0UL << tp->page.order))); if (mpn < tp->page.mpn) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&lp->rb, parent, p); /* * Restructure tree if necessary (see include/linux/rbtree.h). */ rb_insert_color(&lp->rb, &vm->lockedRoot); up_write(&vm->lockedSem); return true; } /** * @brief Traverse RB locked tree, freeing every entry. * * This must be called with the relevant vm->lockedSem held. * * @param node reference to RB node at root of subtree. */ static void LockedListNuke(struct rb_node *node) { while (node) { if (node->rb_left) { node = node->rb_left; } else if (node->rb_right) { node = node->rb_right; } else { /* * We found a leaf, free it and go back to parent. */ struct LockedPage *lp = rb_entry(node, struct LockedPage, rb); node = rb_parent(node); if (node) { if (node->rb_left) node->rb_left = NULL; else node->rb_right = NULL; } FreeLockedPages(lp); kfree(lp); } } } /** * @brief Unlock all pages at vm close time. * * @param vm control structure pointer */ static void LockedListUnlockAll(struct MvpkmVM *vm) { down_write(&vm->lockedSem); LockedListNuke(vm->lockedRoot.rb_node); ATOMIC_SETV(vm->usedPages, 0); up_write(&vm->lockedSem); } /** * @brief Allocate zeroed free pages * * @param[in] vm which VM the pages are for so they will be freed when the vm * closes * @param[in] order log2(number of contiguous pages to allocate) * @param[in] highmem is it OK to allocate this page in ZONE_HIGHMEM? This * option should only be specified for pages the host kernel * will not need to address directly. * @param[out] hkvaRet where to return host kernel virtual address of the * allocated pages, if non-NULL, and ONLY IF !highmem. * @param forRegion Annotation for Page pool to identify guest page allocations * @return 0: no host memory available<br> * else: starting MPN<br> * *hkvaRet = filled in */ static MPN AllocZeroedFreePages(struct MvpkmVM *vm, uint32 order, _Bool highmem, PhysMem_RegionType forRegion, HKVA *hkvaRet) { MPN mpn; struct page *page; if (order > PAGE_ALLOC_COSTLY_ORDER) pr_warn("Order %d allocation for region %d exceeds the safe " \ "maximum order %d\n", order, forRegion, PAGE_ALLOC_COSTLY_ORDER); /* * System RAM bank in 0x00000000 workaround. Should only happens once * in host lifetime as memory page is leaked forever. Also leak the * MVP's INVALID_MPN page if it appears. */ do { /* * Get some pages for the requested range. They will be * physically contiguous and have the requested alignment. * They will also have a kernel virtual mapping if !highmem. * * We allocate out of ZONE_MOVABLE even though we can't just * pick up our bags. We do this to support platforms that * explicitly configure ZONE_MOVABLE, such as the Qualcomm * MSM8960, to enable deep power down of memory banks. When * the kernel attempts to take a memory bank offline, it will * try and place the pages on the isolate LRU - only pages * already on an LRU, such as anon/file, can get there, so it * will not be able to migrate/move our pages (and hence the * bank will not be offlined). The other alternative is to * live withing ZONE_NORMAL, and only have available a small * fraction of system memory. Long term we plan on hooking the * offlining callback in mvpkm and perform our own migration * with the cooperation of the monitor, but we don't have dev * board to support this today. * * @knownjira{MVP-3477} * * Allocating all memory as MOVABLE is breaking the linux * Contiguous Memory Allocator. It sets up several memory * regions reserved for MOVABLE memory, so that it is able to * move pages from them on request to satifsy a large memory * allocation. But as our pages are not really movable, it * happens that it cannot find enough contiguous memory. * As a workaround, we now only allocate MOVABLE pages when * CONFIG_MEMORY_HOTPLUG is enabled. * * @knownjira{HW-28182} * * In order to fully support linux memory hotplug, we should * implement a mapping with the "migrate_page" callback and * corresponding backend in monitor. * * @knownjira{HW-28658} */ gfp_t gfp = GFP_USER | __GFP_COMP | __GFP_ZERO; if (highmem) { gfp |= __GFP_HIGHMEM; #ifdef CONFIG_MEMORY_HOTPLUG gfp |= __GFP_MOVABLE; #endif } page = alloc_pages(gfp, order); if (page == NULL) return 0; /* * Return the corresponding page number. */ mpn = page_to_pfn(page); } while (mpn == 0 || mpn == INVALID_MPN); /* * Remember to unlock the pages when the FD is closed. */ if (!LockedListAdd(vm, mpn, order, forRegion)) { __free_pages(page, order); return 0; } if (hkvaRet) *hkvaRet = highmem ? 0 : __phys_to_virt(page_to_phys(page)); return mpn; } /** * @brief Map already-pinned WSP memory in host kernel virtual address(HKVA) * space. Assumes 2 world switch pages on an 8k boundary. * * @param[in] vm which VM the HKVA Area is to be mapped for * @param[in] mapInfo array of MPNs and execute permission flags to be used in * inserting a new contiguous map in HKVA space * @return 0: HKVA space could not be mapped * else: HKVA where mapping was inserted */ static HKVA MapWSPHKVA(struct MvpkmVM *vm, HkvaMapInfo *mapInfo) { unsigned int i; struct page **pages = NULL; struct page **pagesPtr; pgprot_t prot; int retval; int allocateCount = WSP_PAGE_COUNT + 1; /* extra page for alignment */ int pageIndex = 0; HKVA dummyPage = (HKVA)NULL; HKVA start; HKVA startSegment; HKVA endSegment; /* * Add one page for alignment purposes in case __get_vm_area returns an * unaligned address. */ ASSERT(allocateCount == 3); ASSERT_ON_COMPILE(WSP_PAGE_COUNT == 2); /* * NOT_IMPLEMENTED if MapHKVA is called more than once. */ BUG_ON(vm->wspHkvaArea); /* * Reserve virtual address space. */ vm->wspHkvaArea = __get_vm_area((allocateCount * PAGE_SIZE), VM_ALLOC, MODULES_VADDR, MODULES_END); if (!vm->wspHkvaArea) return 0; pages = kmalloc(allocateCount * sizeof(struct page *), GFP_TEMPORARY); if (!pages) goto err; pagesPtr = pages; /* * Use a dummy page to boundary align the section, if needed. */ dummyPage = __get_free_pages(GFP_KERNEL, 0); if (!dummyPage) goto err; vm->wspHKVADummyPage = dummyPage; /* * Back every entry with the dummy page. */ for (i = 0; i < allocateCount; i++) pages[i] = virt_to_page(dummyPage); /* * World switch pages must not span a 1MB boundary in order to * maintain only a single L2 page table. */ start = (HKVA)vm->wspHkvaArea->addr; startSegment = start & ~(ARM_L1D_SECTION_SIZE - 1); endSegment = (start + PAGE_SIZE) & ~(ARM_L1D_SECTION_SIZE - 1); /* * Insert dummy page at pageIndex, if needed. */ pageIndex = (startSegment != endSegment); /* * Back the rest with the actual world switch pages */ for (i = pageIndex; i < pageIndex + WSP_PAGE_COUNT; i++) pages[i] = pfn_to_page(mapInfo[i - pageIndex].mpn); /* * Given the lack of functionality in the kernel for being able to mark * mappings for a given vm area with different sets of protection bits, * we simply mark the entire vm area as PAGE_KERNEL_EXEC for now * (i.e., union of all the protection bits). Given that the kernel * itself does something similar while loading modules, this should be a * reasonable workaround for now. In the future, we should set the * protection bits to strictly adhere to what has been requested in the * mapInfo parameter. */ prot = PAGE_KERNEL_EXEC; retval = map_vm_area(vm->wspHkvaArea, prot, &pagesPtr); if (retval < 0) goto err; kfree(pages); return (HKVA)(vm->wspHkvaArea->addr) + pageIndex * PAGE_SIZE; err: if (dummyPage) { free_pages(dummyPage, 0); vm->wspHKVADummyPage = (HKVA)NULL; } kfree(pages); free_vm_area(vm->wspHkvaArea); vm->wspHkvaArea = (struct vm_struct *)NULL; return 0; } static void UnmapWSPHKVA(struct MvpkmVM *vm) { if (vm->wspHkvaArea) free_vm_area(vm->wspHkvaArea); if (vm->wspHKVADummyPage) { free_pages(vm->wspHKVADummyPage, 0); vm->wspHKVADummyPage = (HKVA)NULL; } } /** * @brief Clean and release locked pages * * @param lp Reference to the locked pages */ static void FreeLockedPages(struct LockedPage *lp) { struct page *page; int count; page = pfn_to_page(lp->page.mpn); count = page_count(page); if (count == 0) { pr_err("%s: found locked page with 0 reference (mpn %05x)\n", __func__, lp->page.mpn); return; } if (count == 1) { int i; /* * There is no other user for this page, clean it. * * We don't bother checking if the page was highmem or not, * clear_highmem works for both. * We clear the content of the page, and rely on the fact that * the previous worldswitch has cleaned the potential * VIVT I-CACHE. */ for (i = 0; i < (1 << lp->page.order); i++) clear_highpage(page + i); } else if (lp->page.forRegion != MEMREGION_MAINMEM) { pr_warn("%s: mpn 0x%05x for region %d is still in use\n", __func__, lp->page.mpn, lp->page.forRegion); } __free_pages(page, lp->page.order); } /********************************************************************* * * Communicate with monitor * *********************************************************************/ /** * @brief Register a new monitor page. * * @param vm which virtual machine we're running * @return 0: successful<br> * else: -errno */ static int SetupMonitor(struct MvpkmVM *vm) { int retval; WorldSwitchPage *wsp = vm->wsp; #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40501 #define USE_ARCH_EXTENSION_SEC 1 #else #define USE_ARCH_EXTENSION_SEC 0 #endif if (!wsp || wsp->wspHKVA != (HKVA)wsp) return -EINVAL; retval = Mksck_WspInitialize(vm); if (retval) return retval; vm->kobj.kset = mvpkmKSet; retval = kobject_init_and_add(&vm->kobj, &mvpkmKType, NULL, "%d", wsp->guestId); if (retval) goto error; /* * Get a reference to this module such that it cannot be unloaded until * our kobject's release function completes. */ __module_get(THIS_MODULE); vm->haveKObj = true; /* * Caution: From here on, if we fail, we must not call kobject_put() * on vm->kobj since that may / will deallocate 'vm'. Unregistering VM * ksets on failures is fine and should be done for proper ref counting. */ vm->devicesKSet = kset_create_and_add("devices", NULL, &vm->kobj); if (!vm->devicesKSet) { retval = -ENOMEM; goto error; } vm->miscKSet = kset_create_and_add("misc", NULL, &vm->kobj); if (!vm->miscKSet) { kset_unregister(vm->devicesKSet); vm->devicesKSet = NULL; retval = -ENOMEM; goto error; } down_write(&vm->wspSem); /* * The VE monitor needs to issue a SMC to bootstrap Hyp mode. */ if (wsp->monType == MONITOR_TYPE_VE) { /* * Here we assemble the monitor's HMAIR0 based on wsp->memAttr. * We map from the inner/outer normal page cacheability * attributes obtained from DetermineCacheabilityAttribs to * the format required in 4.2.8 ARM PRD03-GENC-008469 13.0 * (see this document for the magic numbers). * * * Where a choice is available, we opt for read and/or * write allocation. */ static const uint32 normalCacheAttr2MAIR[4] = { 0x4, 0xf, 0xa, 0xe }; uint32 hmair0 = ((normalCacheAttr2MAIR[wsp->memAttr.innerCache] | (normalCacheAttr2MAIR[wsp->memAttr.outerCache] << 4)) << 8 * MVA_MEMORY) | (0x4 << 8 * MVA_DEVICE); /* * See B4.1.74 ARM DDI 0406C-2c for the HTCR magic. */ uint32 htcr = 0x80000000 | (wsp->memAttr.innerCache << 8) | (wsp->memAttr.outerCache << 10) | (wsp->memAttr.share << 12); /** * @knownjira{MVP-377} * Set HSCTLR to enable MMU and caches. We should really run * the monitor WXN, in non-MVP_DEVEL builds. * See 13.18 ARM PRD03-GENC-008353 11.0 for the magic. */ static const uint32 hsctlr = 0x30c5187d; register uint32 r0 asm("r0") = wsp->monVA.excVec; register uint32 r1 asm("r1") = wsp->regSave.ve.mHTTBR; register uint32 r2 asm("r2") = htcr; register uint32 r3 asm("r3") = hmair0; register uint32 r4 asm("r4") = hsctlr; asm volatile ( #if USE_ARCH_EXTENSION_SEC ".arch_extension sec\n\t" #endif "smc 0" : : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4) : "memory" ); } /* * Initialize guest wait-for-interrupt waitqueue. */ init_waitqueue_head(&vm->wfiWaitQ); MonitorTimer_Setup(vm); #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&vm->wakeLock, WAKE_LOCK_SUSPEND, "mvpkm"); #endif wsp->mvpkmVersion = MVP_VERSION_CODE; up_write(&vm->wspSem); /* * Ensure coherence of monitor loading and page tables. */ flush_cache_all(); return 0; error: Mksck_WspRelease(wsp); vm->wsp = NULL; return retval; } /** * @brief dummy function to drop the info parameter * @param info ignored */ static void FlushAllCpuCaches(void *info) { flush_cache_all(); } /** * @brief return to where monitor called worldswitch * * @param vm which virtual machine we're running * @return 0: successful, just call back when ready<br> * 1: successful, process code in WSP_Params(wsp)->callno<br> * else: -errno */ static int RunMonitor(struct MvpkmVM *vm) { int ii; unsigned long flags; WorldSwitchPage *wsp = vm->wsp; int retval = 0; unsigned int freq = -1; ASSERT(wsp); #ifdef CONFIG_HAS_WAKELOCK wake_lock(&vm->wakeLock); #endif /* * Set VCPUThread affinity */ if (cpumask_intersects(to_cpumask(vcpuAffinity), cpu_active_mask)) set_cpus_allowed_ptr(current, to_cpumask(vcpuAffinity)); /* * Record the the current task structure, so an ABORT will know, * who to wake. */ down_write(&vm->monThreadTaskSem); vm->monThreadTask = get_current(); up_write(&vm->monThreadTaskSem); /* * Keep going as long as the monitor is in critical section or * there are no pending signals such as SIGINT or SIGKILL. Block * interrupts before checking so any IPI sent will remain pending * if our check just misses detecting the signal. */ local_irq_save(flags); while (wsp->critSecCount > 0 || (!signal_pending(current) && !(ATOMIC_GETO(wsp->hostActions) & ACTION_ABORT))) { cpumask_set_cpu(smp_processor_id(), &inMonitor); /* * ARMv7 Performance counters are per CPU core and might be * disabled over CPU core sleep if there is nothing else in * the system to re-enable them, so now that we have been * allocated a CPU core to run the guest, * enable them and in particular the TSC (CCNT) which is used * for monitor timing between world switches. */ { uint32 pmnc; uint32 pmcnt; /* make sure that Performance Counters are enabled */ ARM_MRC_CP15(PERF_MON_CONTROL_REGISTER, pmnc); if ((pmnc & (ARM_PMNC_E | ARM_PMNC_D)) != (ARM_PMNC_E)) { pmnc |= ARM_PMNC_E; /* Enable TSC */ /* Disable cycle count divider */ pmnc &= ~ARM_PMNC_D; ARM_MCR_CP15(PERF_MON_CONTROL_REGISTER, pmnc); } /* make sure that the CCNT is enabled */ ARM_MRC_CP15(PERF_MON_COUNT_SET, pmcnt); if ((pmcnt & ARM_PMCNT_C) != ARM_PMCNT_C) { pmcnt |= ARM_PMCNT_C; ARM_MCR_CP15(PERF_MON_COUNT_SET, pmcnt); } } /* * Update TSC to RATE64 ratio */ { struct TscToRate64Cb ttr; if (CpuFreqUpdate(&freq, &ttr)) { wsp->tscToRate64Mult = ttr.mult; wsp->tscToRate64Shift = ttr.shift; } } /* * Save the time of day for the monitor's timer facility. * The timing facility in the vmm needs to compute current * time in the host linux's time representation. It uses * the formula: * now = wsp->switchedAt64 + (uint32)(TSC_READ() - * wsp->lowerTSC) * * Read the timestamp counter *immediately after* ktime_get() * as that will give the most consistent offset between * reading the hardware clock register in ktime_get() and * reading the hardware timestamp counter with TSC_READ(). */ ASSERT_ON_COMPILE(MVP_TIMER_RATE64 == NSEC_PER_SEC); { ktime_t now = ktime_get(); TSC_READ(wsp->switchedAtTSC); wsp->switchedAt64 = ktime_to_ns(now); } /* * Save host FPU contents and load monitor contents. */ SWITCH_VFP_TO_MONITOR; /* * Call into the monitor to run guest instructions until it * wants us to do something for it. Note that any hardware * interrupt request will cause it to volunteer. */ switch (wsp->monType) { case MONITOR_TYPE_LPV: { uint32 hostVBAR; ARM_MRC_CP15(VECTOR_BASE, hostVBAR); (*wsp->switchToMonitor)(&wsp->regSave); ARM_MCR_CP15(VECTOR_BASE, hostVBAR); break; } case MONITOR_TYPE_VE: { register uint32 r1 asm("r1") = wsp->regSave.ve.mHTTBR; asm volatile ( ".word " MVP_STRINGIFY(ARM_INSTR_HVC_A1_ENC(0)) : "=r" (r1) : "r" (r1) : "r0", "r2", "memory" ); break; } default: FATAL(); } /* * Save monitor FPU contents and load host contents. */ SWITCH_VFP_TO_HOST; cpumask_clear_cpu(smp_processor_id(), &inMonitor); /* * Re-enable local interrupts now that we are back in the * host world. */ local_irq_restore(flags); /* * Maybe the monitor wrote some messages to monitor->host * sockets. This will wake the corresponding host threads to * receive them. */ /** * @todo This lousy loop is in the critical path. It should * be changed to some faster algorithm to wake blocked host * sockets. */ for (ii = 0; ii < MKSCK_MAX_SHARES; ii++) { if (wsp->isPageMapped[ii]) Mksck_WakeBlockedSockets( MksckPage_GetFromIdx(ii)); } switch (WSP_Params(wsp)->callno) { case WSCALL_ACQUIRE_PAGE: { uint32 i; for (i = 0; i < WSP_Params(wsp)->pages.pages; ++i) { MPN mpn = AllocZeroedFreePages(vm, WSP_Params(wsp)->pages.order, true, WSP_Params(wsp)->pages.forRegion, NULL); if (mpn == 0) { pr_err("WSCALL_ACQUIRE_PAGE: no order "\ "%u pages available\n", WSP_Params(wsp)->pages.order); WSP_Params(wsp)->pages.pages = i; break; } WSP_Params(wsp)->pages.mpns[i] = mpn; } break; } case WSCALL_RELEASE_PAGE: { uint32 i; for (i = 0; i < WSP_Params(wsp)->pages.pages; ++i) { if (!LockedListDel(vm, WSP_Params(wsp)->pages.mpns[i])) { WSP_Params(wsp)->pages.pages = i; break; } } break; } case WSCALL_MUTEXLOCK: retval = Mutex_Lock((void *)WSP_Params(wsp)->mutex.mtxHKVA, WSP_Params(wsp)->mutex.mode); if (retval < 0) { WSP_Params(wsp)->mutex.ok = false; goto monitorExit; } /* * The locking succeeded. From this point on the monitor * is in critical section. Even if an interrupt comes * right here, it must return to the monitor to unlock * the mutex. */ wsp->critSecCount++; WSP_Params(wsp)->mutex.ok = true; break; case WSCALL_MUTEXUNLOCK: Mutex_Unlock((void *)WSP_Params(wsp)->mutex.mtxHKVA, WSP_Params(wsp)->mutex.mode); break; case WSCALL_MUTEXUNLSLEEP: /* * The vcpu has just come back from the monitor. During * the transition interrupts were disabled. Above, * however, interrupts were enabled again and it is * possible that a context switch happened into a thread * (serve_vmx) that instructed the vcpu thread to * abort. After returning to this thread the vcpu may * enter a sleep below never to return from it. To avoid * this deadlock we need to test the abort flag in * Mutex_UnlSleepTest. */ retval = Mutex_UnlSleepTest( (void *)WSP_Params(wsp)->mutex.mtxHKVA, WSP_Params(wsp)->mutex.mode, WSP_Params(wsp)->mutex.cvi, &wsp->hostActions, ACTION_ABORT); if (retval < 0) goto monitorExit; break; case WSCALL_MUTEXUNLWAKE: Mutex_UnlWake((void *)WSP_Params(wsp)->mutex.mtxHKVA, WSP_Params(wsp)->mutex.mode, WSP_Params(wsp)->mutex.cvi, WSP_Params(wsp)->mutex.all); break; /* * The monitor wants us to block (allowing other host threads * to run) until an async message is waiting for the monitor * to process. * * If MvpkmWaitForInt() returns an error, it should only be * if there is another signal pending (such as SIGINT). * So we pretend it completed normally, as the monitor is * ready to be called again (it will see no messages to * process and wait again), and return to user mode so the * signals can be processed. */ case WSCALL_WAIT: #ifdef CONFIG_HAS_WAKELOCK if (WSP_Params(wsp)->wait.suspendMode) { /* * Guest has ok'ed suspend mode, so release * SUSPEND wakelock */ wake_unlock(&vm->wakeLock); retval = MvpkmWaitForInt(vm, true); wake_lock(&vm->wakeLock); WSP_Params(wsp)->wait.suspendMode = 0; } else { /* * Guest has asked for WFI not suspend so * keep holding SUSPEND wakelock */ retval = MvpkmWaitForInt(vm, false); } #else retval = MvpkmWaitForInt(vm, WSP_Params(wsp)->wait.suspendMode); #endif if (retval < 0) goto monitorExit; break; /* * The only reason the monitor returned was because there was a * pending hardware interrupt. The host serviced and cleared * that interrupt when we enabled interrupts above. * Now we call the scheduler in case that interrupt woke * another thread, we want to allow that thread to run before * returning to do more guest code. */ case WSCALL_IRQ: break; case WSCALL_GET_PAGE_FROM_VMID: { MksckPage *mksckPage; mksckPage = MksckPage_GetFromVmIdIncRefc( WSP_Params(wsp)->pageMgmnt.vmId); if (mksckPage) { int ii; int pageIndex; WSP_Params(wsp)->pageMgmnt.found = true; for (ii = 0; ii < MKSCKPAGE_TOTAL; ii++) { WSP_Params(wsp)->pageMgmnt.mpn[ii] = vmalloc_to_pfn((void *)(((HKVA)mksckPage) + ii * PAGE_SIZE)); } pageIndex = MKSCK_VMID2IDX(mksckPage->vmId); ASSERT(!wsp->isPageMapped[pageIndex]); wsp->isPageMapped[pageIndex] = true; } else { WSP_Params(wsp)->pageMgmnt.found = false; } break; } case WSCALL_REMOVE_PAGE_FROM_VMID: { MksckPage *mksckPage; int pageIndex; mksckPage = MksckPage_GetFromVmId(WSP_Params(wsp)->pageMgmnt.vmId); pageIndex = MKSCK_VMID2IDX(mksckPage->vmId); ASSERT(wsp->isPageMapped[pageIndex]); wsp->isPageMapped[pageIndex] = false; MksckPage_DecRefc(mksckPage); break; } /* * Read current wallclock time. */ case WSCALL_READTOD: { struct timeval nowTV; do_gettimeofday(&nowTV); WSP_Params(wsp)->tod.now = nowTV.tv_sec; WSP_Params(wsp)->tod.nowusec = nowTV.tv_usec; break; } case WSCALL_LOG: { int len = strlen(WSP_Params(wsp)->log.messg); pr_info("VMM: %s%s", WSP_Params(wsp)->log.messg, (WSP_Params(wsp)->log.messg[len-1] == '\n') ? "" : "\n"); break; } case WSCALL_ABORT: retval = WSP_Params(wsp)->abort.status; goto monitorExit; case WSCALL_QP_GUEST_ATTACH: { int32 rc; QPInitArgs args; uint32 base; uint32 nrPages; args.id = WSP_Params(wsp)->qp.id; args.capacity = WSP_Params(wsp)->qp.capacity; args.type = WSP_Params(wsp)->qp.type; base = WSP_Params(wsp)->qp.base; nrPages = WSP_Params(wsp)->qp.nrPages; rc = QP_GuestAttachRequest(vm, &args, base, nrPages); WSP_Params(wsp)->qp.rc = rc; WSP_Params(wsp)->qp.id = args.id; break; } case WSCALL_QP_NOTIFY: { QPInitArgs args; args.id = WSP_Params(wsp)->qp.id; args.capacity = WSP_Params(wsp)->qp.capacity; args.type = WSP_Params(wsp)->qp.type; WSP_Params(wsp)->qp.rc = QP_NotifyListener(&args); break; } case WSCALL_MONITOR_TIMER: MonitorTimer_Request(&vm->monTimer, WSP_Params(wsp)->timer.when64); break; case WSCALL_COMM_SIGNAL: Mvpkm_CommEvSignal(&WSP_Params(wsp)->commEvent.transpID, WSP_Params(wsp)->commEvent.event); break; case WSCALL_FLUSH_ALL_DCACHES: /* * Broadcast Flush DCache request to all cores. * Block while waiting for all of them to get done. */ on_each_cpu(FlushAllCpuCaches, NULL, 1); break; default: retval = -EPIPE; goto monitorExit; } /* * The params.callno callback was handled in kernel mode and * completed successfully. Repeat for another call without * returning to user mode, unless there are signals pending. * * But first, call the Linux scheduler to switch threads if * there is some other thread Linux wants to run now. */ if (need_resched()) schedule(); /* * Check if cpus allowed mask has to be updated. * Updating it must be done outside of an atomic context. */ if (cpumask_intersects(to_cpumask(vcpuAffinity), cpu_active_mask) && !cpumask_equal(to_cpumask(vcpuAffinity), &current->cpus_allowed)) set_cpus_allowed_ptr(current, to_cpumask(vcpuAffinity)); local_irq_save(flags); } /* * There are signals pending so don't try to do any more monitor/guest * stuff. But since we were at the point of just about to run the * monitor, return success status as user mode can simply call us * back to run the monitor again. */ local_irq_restore(flags); monitorExit: ASSERT(wsp->critSecCount == 0); if (ATOMIC_GETO(wsp->hostActions) & ACTION_ABORT) { PRINTK("Monitor has ABORT flag set.\n"); retval = ExitStatusHostRequest; } if (retval == ExitStatusHostRequest && vm->watchdogTriggered) retval = ExitStatusVMMFatalKnown; #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&vm->wakeLock); #endif down_write(&vm->monThreadTaskSem); vm->monThreadTask = NULL; up_write(&vm->monThreadTaskSem); return retval; } /** * @brief Guest is waiting for interrupts, sleep if necessary * * @param vm which virtual machine we're running * @param suspend is the guest entering suspend or just WFI? * @return 0: woken up, hostActions should have pending events * -ERESTARTSYS: broke out because other signals are pending * * This function is called in the VCPU context after the world switch to wait * for an incoming message. If any message gets queued to this VCPU, the * sender will wake us up. */ int MvpkmWaitForInt(struct MvpkmVM *vm, _Bool suspend) { WorldSwitchPage *wsp = vm->wsp; wait_queue_head_t *q = &vm->wfiWaitQ; if (suspend) { return wait_event_interruptible(*q, ATOMIC_GETO(wsp->hostActions) != 0); } else { int ret; ret = wait_event_interruptible_timeout(*q, ATOMIC_GETO(wsp->hostActions) != 0, 10*HZ); if (ret == 0) pr_warn("MvpkmWaitForInt: guest stuck for 10s in " \ "WFI! (hostActions %08x)\n", ATOMIC_GETO(wsp->hostActions)); return ret > 0 ? 0 : ret; } } /** * @brief Force the guest to evaluate its hostActions flag field * * @param vm which guest needs waking * @param why why should be guest be woken up? * * This function updates the hostAction flag field as and wakes up the guest as * required so that it can evaluate it. The guest could be executing guest * code in an SMP system, in that case send an IPI; or it could be sleeping, in * the case wake it up. */ void Mvpkm_WakeGuest(struct MvpkmVM *vm, int why) { ASSERT(why != 0); /* set the host action */ if (ATOMIC_ORO(vm->wsp->hostActions, why) & why) /* guest has already been woken up so no need to do it again */ return; /* * VCPU is certainly in 'wait for interrupt' wait. Wake it up! */ #ifdef CONFIG_HAS_WAKELOCK /* * To prevent the system to go in suspend mode before the monitor had a * chance on being scheduled, we will hold the VM wakelock from now. * As the wakelocks are not managed as reference counts, this is not an * an issue to take a wake_lock twice in a row. */ wake_lock(&vm->wakeLock); #endif /* * On a UP system, we ensure the monitor thread isn't blocked. * * On an MP system the other CPU might be running the guest. This * is noop on UP. * * When the guest is running, it is an invariant that monThreadTaskSem * is not held as a write lock, so we should not fail to acquire the * lock. * Mvpkm_WakeGuest may be called from an atomic context, so we can't * sleep here. */ if (down_read_trylock(&vm->monThreadTaskSem)) { if (vm->monThreadTask) { wake_up_process(vm->monThreadTask); kick_process(vm->monThreadTask); } up_read(&vm->monThreadTaskSem); } else { pr_warn("Unexpected failure to acquire monThreadTaskSem!\n"); } }
gpl-2.0
Shkerzy/alcatelOT990-kernel-msm7x27
net/socket.c
164
76423
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/wanrouter.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use) = 0; /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ #define MAX_SOCK_ADDR 128 /* 108 for Unix domain - 16 for IP, 16 for IPX, 24 for IPv6, about 80 for AX.25 must be at least one bigger than the AF_UNIX size (see net/unix/af_unix.c :unix_mkname()). */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL); if (!ei->socket.wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&ei->socket.wq->wait); ei->socket.wq->fasync_list = NULL; ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void wq_free_rcu(struct rcu_head *head) { struct socket_wq *wq = container_of(head, struct socket_wq, rcu); kfree(wq); } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; ei = container_of(inode, struct socket_alloc, vfs_inode); call_rcu(&ei->socket.wq->rcu, wq_free_rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode =sock_destroy_inode, .statfs = simple_statfs, }; static int sockfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC, mnt); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .get_sb = sockfs_get_sb, .kill_sb = kill_anon_super, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) { struct qstr name = { .name = "" }; struct path path; struct file *file; int fd; fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); if (unlikely(!path.dentry)) { put_unused_fd(fd); return -ENOMEM; } path.mnt = mntget(sock_mnt); path.dentry->d_op = &sockfs_dentry_operations; d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(!file)) { /* drop dentry, keep inode */ atomic_inc(&path.dentry->d_inode->i_count); path_put(&path); put_unused_fd(fd); return -ENFILE; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; *f = file; return fd; } int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = sock_alloc_file(sock, &newfile, flags); if (likely(fd >= 0)) fd_install(fd, newfile); return fd; } static struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); percpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (sock->wq->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); percpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, union skb_shared_tx *shtx) { shtx->flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) shtx->hardware = 1; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) shtx->software = 1; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); int err; sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; err = security_socket_sendmsg(sock, msg, size); if (err) return err; err = sock->ops->sendmsg(iocb, sock, msg, size); return err; } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } static int ktime2ts(ktime_t kt, struct timespec *ts) { if (kt.tv64) { *ts = ktime_to_timespec(kt); return 1; } else { return 0; } } /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (skb->tstamp.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) { skb_get_timestampns(skb, ts + 0); empty = 0; } if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime2ts(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime2ts(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err; struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; err = sock->ops->recvmsg(iocb, sock, msg, size, flags); return err; } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } static void sock_aio_dtor(struct kiocb *iocb) { kfree(iocb->private); } static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; if (more) flags |= MSG_MORE; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; sock_update_classid(sock->sk); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) { siocb = kmalloc(sizeof(*siocb), GFP_KERNEL); if (!siocb) return NULL; iocb->ki_dtor = sock_aio_dtor; } siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_left == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL; void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; return sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { /* * It was possible the inode is NULL we were * closing an unfinished socket. */ if (!inode) { printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; if (sk == NULL) return -EINVAL; lock_sock(sk); fasync_helper(fd, filp, on, &sock->wq->fasync_list); if (!sock->wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } static int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { if (net_ratelimit()) printk(KERN_WARNING "socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (net_families[family] == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = sock_alloc_file(sock1, &newfile1, flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = sock_alloc_file(sock2, &newfile2, flags); if (unlikely(fd2 < 0)) { err = fd2; fput(newfile1); put_unused_fd(fd1); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; if (!(newsock = sock_alloc())) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user((struct sockaddr *)&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user((struct sockaddr *)&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) /* * BSD sendmsg interface */ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct socket *sock; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; struct msghdr msg_sys; int err, ctl_len, iov_size, total_len; int fput_needed; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(&msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; /* do not move before msg_sys is valid */ err = -EMSGSIZE; if (msg_sys.msg_iovlen > UIO_MAXIOV) goto out_put; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); if (msg_sys.msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out_put; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(&msg_sys, iov, (struct sockaddr *)&address, VERIFY_READ); } else err = verify_iovec(&msg_sys, iov, (struct sockaddr *)&address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys.msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys.msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys.msg_control; ctl_len = msg_sys.msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys.msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control, ctl_len)) goto out_freectl; msg_sys.msg_control = ctl_buf; } msg_sys.msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys.msg_flags |= MSG_DONTWAIT; err = sock_sendmsg(sock, &msg_sys, total_len); out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out_put: fput_light(sock->file, fput_needed); out: return err; } static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, iov_size, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user((struct sockaddr *)&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; } /* * BSD recvmsg interface */ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[20] = { AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), AL(4),AL(5) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[6]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_RECVMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; audit_socketcall(nargs[call] / sizeof(unsigned long), a); a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (net_families[ops->family]) err = -EEXIST; else { net_families[ops->family] = ops; err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); net_families[family] = NULL; spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } static int __init sock_init(void) { /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); register_filesystem(&sock_fs_type); sock_mnt = kern_mount(&sock_fs_type); /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif return 0; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timeval __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) { err = put_user(ktv.tv_sec, &up->tv_sec); err |= __put_user(ktv.tv_usec, &up->tv_usec); } return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timespec __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) { err = put_user(kts.tv_sec, &up->tv_sec); err |= __put_user(kts.tv_nsec, &up->tv_nsec); } return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) * sizeof (struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct ifreq __user *ifr; u32 data; void __user *datap; ifr = compat_alloc_user_space(sizeof(*ifr)); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &ifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, SIOCETHTOOL, ifr); } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof (*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs (KERNEL_DS); err = dev_ioctl(net, cmd, &kifr); set_fs (old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -EINVAL; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs (KERNEL_DS); err = dev_ioctl(net, cmd, (void __user *)&ifr); set_fs (old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof (*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= __get_user (r4.rt_flags, &(ur4->rt_flags)); ret |= __get_user (r4.rt_metric, &(ur4->rt_metric)); ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu)); ret |= __get_user (r4.rt_window, &(ur4->rt_window)); ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt)); ret |= __get_user (rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user (devname, compat_ptr(rtdev), 15); r4.rt_dev = devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs (KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs (old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatiable ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } /* Prevent warning from compat_sys_ioctl, these always * result in -EINVAL in the native case anyway. */ switch (cmd) { case SIOCRTMSG: case SIOCGIFCOUNT: case SIOCSRARP: case SIOCGRARP: case SIOCDRARP: case SIOCSIFLINK: case SIOCGIFSLAVE: case SIOCSIFSLAVE: return -EINVAL; } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); set_fs(oldfs); return err; } int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); set_fs(oldfs); return err; } int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { sock_update_classid(sock->sk); if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(sock_create); EXPORT_SYMBOL(sock_create_kern); EXPORT_SYMBOL(sock_create_lite); EXPORT_SYMBOL(sock_map_fd); EXPORT_SYMBOL(sock_recvmsg); EXPORT_SYMBOL(sock_register); EXPORT_SYMBOL(sock_release); EXPORT_SYMBOL(sock_sendmsg); EXPORT_SYMBOL(sock_unregister); EXPORT_SYMBOL(sock_wake_async); EXPORT_SYMBOL(sockfd_lookup); EXPORT_SYMBOL(kernel_sendmsg); EXPORT_SYMBOL(kernel_recvmsg); EXPORT_SYMBOL(kernel_bind); EXPORT_SYMBOL(kernel_listen); EXPORT_SYMBOL(kernel_accept); EXPORT_SYMBOL(kernel_connect); EXPORT_SYMBOL(kernel_getsockname); EXPORT_SYMBOL(kernel_getpeername); EXPORT_SYMBOL(kernel_getsockopt); EXPORT_SYMBOL(kernel_setsockopt); EXPORT_SYMBOL(kernel_sendpage); EXPORT_SYMBOL(kernel_sock_ioctl); EXPORT_SYMBOL(kernel_sock_shutdown);
gpl-2.0
ronsaldo/linux-amdgpu-si
arch/arm/mach-shmobile/pm-rcar.c
420
4188
/* * R-Car SYSC Power management support * * Copyright (C) 2014 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/io.h> #include "pm-rcar.h" /* SYSC Common */ #define SYSCSR 0x00 /* SYSC Status Register */ #define SYSCISR 0x04 /* Interrupt Status Register */ #define SYSCISCR 0x08 /* Interrupt Status Clear Register */ #define SYSCIER 0x0c /* Interrupt Enable Register */ #define SYSCIMR 0x10 /* Interrupt Mask Register */ /* SYSC Status Register */ #define SYSCSR_PONENB 1 /* Ready for power resume requests */ #define SYSCSR_POFFENB 0 /* Ready for power shutoff requests */ /* * Power Control Register Offsets inside the register block for each domain * Note: The "CR" registers for ARM cores exist on H1 only * Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2 */ #define PWRSR_OFFS 0x00 /* Power Status Register */ #define PWROFFCR_OFFS 0x04 /* Power Shutoff Control Register */ #define PWROFFSR_OFFS 0x08 /* Power Shutoff Status Register */ #define PWRONCR_OFFS 0x0c /* Power Resume Control Register */ #define PWRONSR_OFFS 0x10 /* Power Resume Status Register */ #define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */ #define SYSCSR_RETRIES 100 #define SYSCSR_DELAY_US 1 #define PWRER_RETRIES 100 #define PWRER_DELAY_US 1 #define SYSCISR_RETRIES 1000 #define SYSCISR_DELAY_US 1 static void __iomem *rcar_sysc_base; static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */ static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on) { unsigned int sr_bit, reg_offs; int k; if (on) { sr_bit = SYSCSR_PONENB; reg_offs = PWRONCR_OFFS; } else { sr_bit = SYSCSR_POFFENB; reg_offs = PWROFFCR_OFFS; } /* Wait until SYSC is ready to accept a power request */ for (k = 0; k < SYSCSR_RETRIES; k++) { if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit)) break; udelay(SYSCSR_DELAY_US); } if (k == SYSCSR_RETRIES) return -EAGAIN; /* Submit power shutoff or power resume request */ iowrite32(BIT(sysc_ch->chan_bit), rcar_sysc_base + sysc_ch->chan_offs + reg_offs); return 0; } static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on) { unsigned int isr_mask = BIT(sysc_ch->isr_bit); unsigned int chan_mask = BIT(sysc_ch->chan_bit); unsigned int status; unsigned long flags; int ret = 0; int k; spin_lock_irqsave(&rcar_sysc_lock, flags); iowrite32(isr_mask, rcar_sysc_base + SYSCISCR); /* Submit power shutoff or resume request until it was accepted */ for (k = 0; k < PWRER_RETRIES; k++) { ret = rcar_sysc_pwr_on_off(sysc_ch, on); if (ret) goto out; status = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRER_OFFS); if (!(status & chan_mask)) break; udelay(PWRER_DELAY_US); } if (k == PWRER_RETRIES) { ret = -EIO; goto out; } /* Wait until the power shutoff or resume request has completed * */ for (k = 0; k < SYSCISR_RETRIES; k++) { if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask) break; udelay(SYSCISR_DELAY_US); } if (k == SYSCISR_RETRIES) ret = -EIO; iowrite32(isr_mask, rcar_sysc_base + SYSCISCR); out: spin_unlock_irqrestore(&rcar_sysc_lock, flags); pr_debug("sysc power domain %d: %08x -> %d\n", sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret); return ret; } int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch) { return rcar_sysc_power(sysc_ch, false); } int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch) { return rcar_sysc_power(sysc_ch, true); } bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch) { unsigned int st; st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS); if (st & BIT(sysc_ch->chan_bit)) return true; return false; } void __iomem *rcar_sysc_init(phys_addr_t base) { rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE); if (!rcar_sysc_base) panic("unable to ioremap R-Car SYSC hardware block\n"); return rcar_sysc_base; }
gpl-2.0
sgs3/SGH-T999V_Kernel
net/sched/sch_netem.c
932
23125
/* * net/sched/sch_netem.c Network emulator * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License. * * Many of the algorithms and ideas for this came from * NIST Net which is not copyrighted. * * Authors: Stephen Hemminger <shemminger@osdl.org> * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #define VERSION "1.3" /* Network Emulation Queuing algorithm. ==================================== Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based Network Emulation Tool [2] Luigi Rizzo, DummyNet for FreeBSD ---------------------------------------------------------------- This started out as a simple way to delay outgoing packets to test TCP but has grown to include most of the functionality of a full blown network emulator like NISTnet. It can delay packets and add random jitter (and correlation). The random distribution can be loaded from a table as well to provide normal, Pareto, or experimental curves. Packet loss, duplication, and reordering can also be emulated. This qdisc does not do classification that can be handled in layering other disciplines. It does not need to do bandwidth control either since that can be handled by using token bucket or other rate control. Correlated Loss Generator models Added generation of correlated loss according to the "Gilbert-Elliot" model, a 4-state markov model. References: [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general and intuitive loss model for packet networks and its implementation in the Netem module in the Linux kernel", available in [1] Authors: Stefano Salsano <stefano.salsano at uniroma2.it Fabio Ludovici <fabio.ludovici at yahoo.it> */ struct netem_sched_data { struct Qdisc *qdisc; struct qdisc_watchdog watchdog; psched_tdiff_t latency; psched_tdiff_t jitter; u32 loss; u32 limit; u32 counter; u32 gap; u32 duplicate; u32 reorder; u32 corrupt; struct crndstate { u32 last; u32 rho; } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; struct disttable { u32 size; s16 table[0]; } *delay_dist; enum { CLG_RANDOM, CLG_4_STATES, CLG_GILB_ELL, } loss_model; /* Correlated Loss Generation models */ struct clgstate { /* state of the Markov chain */ u8 state; /* 4-states and Gilbert-Elliot models */ u32 a1; /* p13 for 4-states or p for GE */ u32 a2; /* p31 for 4-states or r for GE */ u32 a3; /* p32 for 4-states or h for GE */ u32 a4; /* p14 for 4-states or 1-k for GE */ u32 a5; /* p23 used only in 4-states */ } clg; }; /* Time stamp put into socket buffer control block */ struct netem_skb_cb { psched_time_t time_to_send; }; static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; } /* init_crandom - initialize correlated random number generator * Use entropy source for initial seed. */ static void init_crandom(struct crndstate *state, unsigned long rho) { state->rho = rho; state->last = net_random(); } /* get_crandom - correlated random number generator * Next number depends on last value. * rho is scaled to avoid floating point. */ static u32 get_crandom(struct crndstate *state) { u64 value, rho; unsigned long answer; if (state->rho == 0) /* no correlation */ return net_random(); value = net_random(); rho = (u64)state->rho + 1; answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; state->last = answer; return answer; } /* loss_4state - 4-state model loss generator * Generates losses according to the 4-state Markov chain adopted in * the GI (General and Intuitive) loss model. */ static bool loss_4state(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; u32 rnd = net_random(); /* * Makes a comparison between rnd and the transition * probabilities outgoing from the current state, then decides the * next state and if the next packet has to be transmitted or lost. * The four states correspond to: * 1 => successfully transmitted packets within a gap period * 4 => isolated losses within a gap period * 3 => lost packets within a burst period * 2 => successfully transmitted packets within a burst period */ switch (clg->state) { case 1: if (rnd < clg->a4) { clg->state = 4; return true; } else if (clg->a4 < rnd && rnd < clg->a1) { clg->state = 3; return true; } else if (clg->a1 < rnd) clg->state = 1; break; case 2: if (rnd < clg->a5) { clg->state = 3; return true; } else clg->state = 2; break; case 3: if (rnd < clg->a3) clg->state = 2; else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { clg->state = 1; return true; } else if (clg->a2 + clg->a3 < rnd) { clg->state = 3; return true; } break; case 4: clg->state = 1; break; } return false; } /* loss_gilb_ell - Gilbert-Elliot model loss generator * Generates losses according to the Gilbert-Elliot loss model or * its special cases (Gilbert or Simple Gilbert) * * Makes a comparison between random number and the transition * probabilities outgoing from the current state, then decides the * next state. A second random number is extracted and the comparison * with the loss probability of the current state decides if the next * packet will be transmitted or lost. */ static bool loss_gilb_ell(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; switch (clg->state) { case 1: if (net_random() < clg->a1) clg->state = 2; if (net_random() < clg->a4) return true; case 2: if (net_random() < clg->a2) clg->state = 1; if (clg->a3 > net_random()) return true; } return false; } static bool loss_event(struct netem_sched_data *q) { switch (q->loss_model) { case CLG_RANDOM: /* Random packet drop 0 => none, ~0 => all */ return q->loss && q->loss >= get_crandom(&q->loss_cor); case CLG_4_STATES: /* 4state loss model algorithm (used also for GI model) * Extracts a value from the markov 4 state loss generator, * if it is 1 drops a packet and if needed writes the event in * the kernel logs */ return loss_4state(q); case CLG_GILB_ELL: /* Gilbert-Elliot loss model algorithm * Extracts a value from the Gilbert-Elliot loss generator, * if it is 1 drops a packet and if needed writes the event in * the kernel logs */ return loss_gilb_ell(q); } return false; /* not reached */ } /* tabledist - return a pseudo-randomly distributed value with mean mu and * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, struct crndstate *state, const struct disttable *dist) { psched_tdiff_t x; long t; u32 rnd; if (sigma == 0) return mu; rnd = get_crandom(state); /* default uniform distribution */ if (dist == NULL) return (rnd % (2*sigma)) - sigma + mu; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; if (x >= 0) x += NETEM_DIST_SCALE/2; else x -= NETEM_DIST_SCALE/2; return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } /* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); /* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; struct sk_buff *skb2; int ret; int count = 1; /* Random duplication */ if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) ++count; /* Drop packet? */ if (loss_event(q)) --count; if (count == 0) { sch->qstats.drops++; kfree_skb(skb); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } skb_orphan(skb); /* * If we need to duplicate packet, then re-insert at top of the * qdisc tree, since parent queuer expects that only one * skb will be queued. */ if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { struct Qdisc *rootq = qdisc_root(sch); u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; qdisc_enqueue_root(skb2, rootq); q->duplicate = dupsave; } /* * Randomized packet corruption. * Make copy if needed since we are modifying * If packet is going to be hardware checksummed, then * do it now in software before we mangle it. */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))) { sch->qstats.drops++; return NET_XMIT_DROP; } skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { psched_time_t now; psched_tdiff_t delay; delay = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); now = psched_get_time(); cb->time_to_send = now + delay; ++q->counter; ret = qdisc_enqueue(skb, q->qdisc); } else { /* * Do re-ordering by putting one out of N packets at the front * of the queue. */ cb->time_to_send = psched_get_time(); q->counter = 0; __skb_queue_head(&q->qdisc->q, skb); q->qdisc->qstats.backlog += qdisc_pkt_len(skb); q->qdisc->qstats.requeues++; ret = NET_XMIT_SUCCESS; } if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { sch->qstats.drops++; return ret; } } sch->q.qlen++; return NET_XMIT_SUCCESS; } static unsigned int netem_drop(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); unsigned int len = 0; if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { sch->q.qlen--; sch->qstats.drops++; } return len; } static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (qdisc_is_throttled(sch)) return NULL; skb = q->qdisc->ops->peek(q->qdisc); if (skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); psched_time_t now = psched_get_time(); /* if more time remaining? */ if (cb->time_to_send <= now) { skb = qdisc_dequeue_peeked(q->qdisc); if (unlikely(!skb)) return NULL; #ifdef CONFIG_NET_CLS_ACT /* * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) skb->tstamp.tv64 = 0; #endif sch->q.qlen--; qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } return NULL; } static void netem_reset(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); sch->q.qlen = 0; qdisc_watchdog_cancel(&q->watchdog); } static void dist_free(struct disttable *d) { if (d) { if (is_vmalloc_addr(d)) vfree(d); else kfree(d); } } /* * Distribution data is a variable size payload containing * signed 16 bit values. */ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); spinlock_t *root_lock; struct disttable *d; int i; size_t s; if (n > NETEM_DIST_MAX) return -EINVAL; s = sizeof(struct disttable) + n * sizeof(s16); d = kmalloc(s, GFP_KERNEL); if (!d) d = vmalloc(s); if (!d) return -ENOMEM; d->size = n; for (i = 0; i < n; i++) d->table[i] = data[i]; root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); dist_free(q->delay_dist); q->delay_dist = d; spin_unlock_bh(root_lock); return 0; } static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_corr *c = nla_data(attr); init_crandom(&q->delay_cor, c->delay_corr); init_crandom(&q->loss_cor, c->loss_corr); init_crandom(&q->dup_cor, c->dup_corr); } static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_reorder *r = nla_data(attr); q->reorder = r->probability; init_crandom(&q->reorder_cor, r->correlation); } static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_corrupt *r = nla_data(attr); q->corrupt = r->probability; init_crandom(&q->corrupt_cor, r->correlation); } static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct nlattr *la; int rem; nla_for_each_nested(la, attr, rem) { u16 type = nla_type(la); switch(type) { case NETEM_LOSS_GI: { const struct tc_netem_gimodel *gi = nla_data(la); if (nla_len(la) != sizeof(struct tc_netem_gimodel)) { pr_info("netem: incorrect gi model size\n"); return -EINVAL; } q->loss_model = CLG_4_STATES; q->clg.state = 1; q->clg.a1 = gi->p13; q->clg.a2 = gi->p31; q->clg.a3 = gi->p32; q->clg.a4 = gi->p14; q->clg.a5 = gi->p23; break; } case NETEM_LOSS_GE: { const struct tc_netem_gemodel *ge = nla_data(la); if (nla_len(la) != sizeof(struct tc_netem_gemodel)) { pr_info("netem: incorrect gi model size\n"); return -EINVAL; } q->loss_model = CLG_GILB_ELL; q->clg.state = 1; q->clg.a1 = ge->p; q->clg.a2 = ge->r; q->clg.a3 = ge->h; q->clg.a4 = ge->k1; break; } default: pr_info("netem: unknown loss type %u\n", type); return -EINVAL; } } return 0; } static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, }; static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy, int len) { int nested_len = nla_len(nla) - NLA_ALIGN(len); if (nested_len < 0) { pr_info("netem: invalid attributes len %d\n", nested_len); return -EINVAL; } if (nested_len >= nla_attr_size(0)) return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), nested_len, policy); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; } /* Parse netlink message to set options */ static int netem_change(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; struct tc_netem_qopt *qopt; int ret; if (opt == NULL) return -EINVAL; qopt = nla_data(opt); ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); if (ret < 0) return ret; ret = fifo_set_limit(q->qdisc, qopt->limit); if (ret) { pr_info("netem: can't set fifo limit\n"); return ret; } q->latency = qopt->latency; q->jitter = qopt->jitter; q->limit = qopt->limit; q->gap = qopt->gap; q->counter = 0; q->loss = qopt->loss; q->duplicate = qopt->duplicate; /* for compatibility with earlier versions. * if gap is set, need to assume 100% probability */ if (q->gap) q->reorder = ~0; if (tb[TCA_NETEM_CORR]) get_correlation(sch, tb[TCA_NETEM_CORR]); if (tb[TCA_NETEM_DELAY_DIST]) { ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); if (ret) return ret; } if (tb[TCA_NETEM_REORDER]) get_reorder(sch, tb[TCA_NETEM_REORDER]); if (tb[TCA_NETEM_CORRUPT]) get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); return ret; } /* * Special case version of FIFO queue for use by netem. * It queues in order based on timestamps in skb's */ struct fifo_sched_data { u32 limit; psched_time_t oldest; }; static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct fifo_sched_data *q = qdisc_priv(sch); struct sk_buff_head *list = &sch->q; psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; struct sk_buff *skb; if (likely(skb_queue_len(list) < q->limit)) { /* Optimize for add at tail */ if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { q->oldest = tnext; return qdisc_enqueue_tail(nskb, sch); } skb_queue_reverse_walk(list, skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); if (tnext >= cb->time_to_send) break; } __skb_queue_after(list, skb, nskb); sch->qstats.backlog += qdisc_pkt_len(nskb); return NET_XMIT_SUCCESS; } return qdisc_reshape_fail(nskb, sch); } static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) { struct fifo_sched_data *q = qdisc_priv(sch); if (opt) { struct tc_fifo_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) return -EINVAL; q->limit = ctl->limit; } else q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); q->oldest = PSCHED_PASTPERFECT; return 0; } static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fifo_sched_data *q = qdisc_priv(sch); struct tc_fifo_qopt opt = { .limit = q->limit }; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: return -1; } static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { .id = "tfifo", .priv_size = sizeof(struct fifo_sched_data), .enqueue = tfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .drop = qdisc_queue_drop, .init = tfifo_init, .reset = qdisc_reset_queue, .change = tfifo_init, .dump = tfifo_dump, }; static int netem_init(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); int ret; if (!opt) return -EINVAL; qdisc_watchdog_init(&q->watchdog, sch); q->loss_model = CLG_RANDOM; q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops, TC_H_MAKE(sch->handle, 1)); if (!q->qdisc) { pr_notice("netem: qdisc create tfifo qdisc failed\n"); return -ENOMEM; } ret = netem_change(sch, opt); if (ret) { pr_info("netem: change failed\n"); qdisc_destroy(q->qdisc); } return ret; } static void netem_destroy(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); qdisc_destroy(q->qdisc); dist_free(q->delay_dist); } static int dump_loss_model(const struct netem_sched_data *q, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start(skb, TCA_NETEM_LOSS); if (nest == NULL) goto nla_put_failure; switch (q->loss_model) { case CLG_RANDOM: /* legacy loss model */ nla_nest_cancel(skb, nest); return 0; /* no data */ case CLG_4_STATES: { struct tc_netem_gimodel gi = { .p13 = q->clg.a1, .p31 = q->clg.a2, .p32 = q->clg.a3, .p14 = q->clg.a4, .p23 = q->clg.a5, }; NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); break; } case CLG_GILB_ELL: { struct tc_netem_gemodel ge = { .p = q->clg.a1, .r = q->clg.a2, .h = q->clg.a3, .k1 = q->clg.a4, }; NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); break; } } nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) { const struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); struct tc_netem_qopt qopt; struct tc_netem_corr cor; struct tc_netem_reorder reorder; struct tc_netem_corrupt corrupt; qopt.latency = q->latency; qopt.jitter = q->jitter; qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); reorder.probability = q->reorder; reorder.correlation = q->reorder_cor.rho; NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); corrupt.probability = q->corrupt; corrupt.correlation = q->corrupt_cor.rho; NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); if (dump_loss_model(q, skb) != 0) goto nla_put_failure; return nla_nest_end(skb, nla); nla_put_failure: nlmsg_trim(skb, nla); return -1; } static int netem_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct netem_sched_data *q = qdisc_priv(sch); if (cl != 1) /* only one class */ return -ENOENT; tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct netem_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) { struct netem_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long netem_get(struct Qdisc *sch, u32 classid) { return 1; } static void netem_put(struct Qdisc *sch, unsigned long arg) { } static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { if (walker->count >= walker->skip) if (walker->fn(sch, 1, walker) < 0) { walker->stop = 1; return; } walker->count++; } } static const struct Qdisc_class_ops netem_class_ops = { .graft = netem_graft, .leaf = netem_leaf, .get = netem_get, .put = netem_put, .walk = netem_walk, .dump = netem_dump_class, }; static struct Qdisc_ops netem_qdisc_ops __read_mostly = { .id = "netem", .cl_ops = &netem_class_ops, .priv_size = sizeof(struct netem_sched_data), .enqueue = netem_enqueue, .dequeue = netem_dequeue, .peek = qdisc_peek_dequeued, .drop = netem_drop, .init = netem_init, .reset = netem_reset, .destroy = netem_destroy, .change = netem_change, .dump = netem_dump, .owner = THIS_MODULE, }; static int __init netem_module_init(void) { pr_info("netem: version " VERSION "\n"); return register_qdisc(&netem_qdisc_ops); } static void __exit netem_module_exit(void) { unregister_qdisc(&netem_qdisc_ops); } module_init(netem_module_init) module_exit(netem_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
Fevax/android_kernel_samsung_universal8890-N
sound/pci/echoaudio/indigoiox.c
1956
3016
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2009 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_IOX #define ECHOCARD_NAME "Indigo IOx" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 2 */ #define PX_DIGITAL_IN 10 /* 0 */ #define PX_NUM 10 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 2 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_iox_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_IOX_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_iox_dsp.fw"} }; static const struct pci_device_id snd_echo_ids[] = { {0x1057, 0x3410, 0xECC0, 0x00D0, 0, 0, 0}, /* Indigo IOx */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigoiox_dsp.c" #include "indigo_express_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
Tim1928/DBK-3.0_4.1
drivers/net/wireless/b43legacy/dma.c
2724
44525
/* Broadcom B43legacy wireless driver DMA ringbuffer and descriptor allocation/management Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller Copyright (C) Pekka Pietikainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43legacy.h" #include "dma.h" #include "main.h" #include "debugfs.h" #include "xmit.h" #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/dst.h> /* 32bit DMA ops. */ static struct b43legacy_dmadesc_generic *op32_idx2desc( struct b43legacy_dmaring *ring, int slot, struct b43legacy_dmadesc_meta **meta) { struct b43legacy_dmadesc32 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43legacy_dmadesc_generic *)desc; } static void op32_fill_descriptor(struct b43legacy_dmaring *ring, struct b43legacy_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43legacy_dmadesc32 *descbase = ring->descbase; int slot; u32 ctl; u32 addr; u32 addrext; slot = (int)(&(desc->dma32) - descbase); B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addr |= ssb_dma_translation(ring->dev->dev); ctl = (bufsize - ring->frameoffset) & B43legacy_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43legacy_DMA32_DCTL_DTABLEEND; if (start) ctl |= B43legacy_DMA32_DCTL_FRAMESTART; if (end) ctl |= B43legacy_DMA32_DCTL_FRAMEEND; if (irq) ctl |= B43legacy_DMA32_DCTL_IRQ; ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) & B43legacy_DMA32_DCTL_ADDREXT_MASK; desc->dma32.control = cpu_to_le32(ctl); desc->dma32.address = cpu_to_le32(addr); } static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) { b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, (u32)(slot * sizeof(struct b43legacy_dmadesc32))); } static void op32_tx_suspend(struct b43legacy_dmaring *ring) { b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) | B43legacy_DMA32_TXSUSPEND); } static void op32_tx_resume(struct b43legacy_dmaring *ring) { b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) & ~B43legacy_DMA32_TXSUSPEND); } static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) { u32 val; val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); val &= B43legacy_DMA32_RXDPTR; return (val / sizeof(struct b43legacy_dmadesc32)); } static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, int slot) { b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, (u32)(slot * sizeof(struct b43legacy_dmadesc32))); } static const struct b43legacy_dma_ops dma32_ops = { .idx2desc = op32_idx2desc, .fill_descriptor = op32_fill_descriptor, .poke_tx = op32_poke_tx, .tx_suspend = op32_tx_suspend, .tx_resume = op32_tx_resume, .get_current_rxslot = op32_get_current_rxslot, .set_current_rxslot = op32_set_current_rxslot, }; /* 64bit DMA ops. */ static struct b43legacy_dmadesc_generic *op64_idx2desc( struct b43legacy_dmaring *ring, int slot, struct b43legacy_dmadesc_meta **meta) { struct b43legacy_dmadesc64 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43legacy_dmadesc_generic *)desc; } static void op64_fill_descriptor(struct b43legacy_dmaring *ring, struct b43legacy_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43legacy_dmadesc64 *descbase = ring->descbase; int slot; u32 ctl0 = 0; u32 ctl1 = 0; u32 addrlo; u32 addrhi; u32 addrext; slot = (int)(&(desc->dma64) - descbase); B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addrlo = (u32)(dmaaddr & 0xFFFFFFFF); addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addrhi |= ssb_dma_translation(ring->dev->dev); if (slot == ring->nr_slots - 1) ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= B43legacy_DMA64_DCTL0_IRQ; ctl1 |= (bufsize - ring->frameoffset) & B43legacy_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT) & B43legacy_DMA64_DCTL1_ADDREXT_MASK; desc->dma64.control0 = cpu_to_le32(ctl0); desc->dma64.control1 = cpu_to_le32(ctl1); desc->dma64.address_low = cpu_to_le32(addrlo); desc->dma64.address_high = cpu_to_le32(addrhi); } static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot) { b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX, (u32)(slot * sizeof(struct b43legacy_dmadesc64))); } static void op64_tx_suspend(struct b43legacy_dmaring *ring) { b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) | B43legacy_DMA64_TXSUSPEND); } static void op64_tx_resume(struct b43legacy_dmaring *ring) { b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) & ~B43legacy_DMA64_TXSUSPEND); } static int op64_get_current_rxslot(struct b43legacy_dmaring *ring) { u32 val; val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS); val &= B43legacy_DMA64_RXSTATDPTR; return (val / sizeof(struct b43legacy_dmadesc64)); } static void op64_set_current_rxslot(struct b43legacy_dmaring *ring, int slot) { b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, (u32)(slot * sizeof(struct b43legacy_dmadesc64))); } static const struct b43legacy_dma_ops dma64_ops = { .idx2desc = op64_idx2desc, .fill_descriptor = op64_fill_descriptor, .poke_tx = op64_poke_tx, .tx_suspend = op64_tx_suspend, .tx_resume = op64_tx_resume, .get_current_rxslot = op64_get_current_rxslot, .set_current_rxslot = op64_set_current_rxslot, }; static inline int free_slots(struct b43legacy_dmaring *ring) { return (ring->nr_slots - ring->used_slots); } static inline int next_slot(struct b43legacy_dmaring *ring, int slot) { B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); if (slot == ring->nr_slots - 1) return 0; return slot + 1; } static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) { B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); if (slot == 0) return ring->nr_slots - 1; return slot - 1; } #ifdef CONFIG_B43LEGACY_DEBUG static void update_max_used_slots(struct b43legacy_dmaring *ring, int current_used_slots) { if (current_used_slots <= ring->max_used_slots) return; ring->max_used_slots = current_used_slots; if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(ring->dev->wl, "max_used_slots increased to %d on %s ring %d\n", ring->max_used_slots, ring->tx ? "TX" : "RX", ring->index); } #else static inline void update_max_used_slots(struct b43legacy_dmaring *ring, int current_used_slots) { } #endif /* DEBUG */ /* Request a slot for usage. */ static inline int request_slot(struct b43legacy_dmaring *ring) { int slot; B43legacy_WARN_ON(!ring->tx); B43legacy_WARN_ON(ring->stopped); B43legacy_WARN_ON(free_slots(ring) == 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; update_max_used_slots(ring, ring->used_slots); return slot; } /* Mac80211-queue to b43legacy-ring mapping */ static struct b43legacy_dmaring *priority_to_txring( struct b43legacy_wldev *dev, int queue_priority) { struct b43legacy_dmaring *ring; /*FIXME: For now we always run on TX-ring-1 */ return dev->dma.tx_ring1; /* 0 = highest priority */ switch (queue_priority) { default: B43legacy_WARN_ON(1); /* fallthrough */ case 0: ring = dev->dma.tx_ring3; break; case 1: ring = dev->dma.tx_ring2; break; case 2: ring = dev->dma.tx_ring1; break; case 3: ring = dev->dma.tx_ring0; break; case 4: ring = dev->dma.tx_ring4; break; case 5: ring = dev->dma.tx_ring5; break; } return ring; } /* Bcm4301-ring to mac80211-queue mapping */ static inline int txring_to_priority(struct b43legacy_dmaring *ring) { static const u8 idx_to_prio[] = { 3, 2, 1, 0, 4, 5, }; /*FIXME: have only one queue, for now */ return 0; return idx_to_prio[ring->index]; } static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, int controller_idx) { static const u16 map64[] = { B43legacy_MMIO_DMA64_BASE0, B43legacy_MMIO_DMA64_BASE1, B43legacy_MMIO_DMA64_BASE2, B43legacy_MMIO_DMA64_BASE3, B43legacy_MMIO_DMA64_BASE4, B43legacy_MMIO_DMA64_BASE5, }; static const u16 map32[] = { B43legacy_MMIO_DMA32_BASE0, B43legacy_MMIO_DMA32_BASE1, B43legacy_MMIO_DMA32_BASE2, B43legacy_MMIO_DMA32_BASE3, B43legacy_MMIO_DMA32_BASE4, B43legacy_MMIO_DMA32_BASE5, }; if (type == B43legacy_DMA_64BIT) { B43legacy_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map64))); return map64[controller_idx]; } B43legacy_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map32))); return map32[controller_idx]; } static inline dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, unsigned char *buf, size_t len, int tx) { dma_addr_t dmaaddr; if (tx) dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_TO_DEVICE); else dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_FROM_DEVICE); return dmaaddr; } static inline void unmap_descbuffer(struct b43legacy_dmaring *ring, dma_addr_t addr, size_t len, int tx) { if (tx) dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_TO_DEVICE); else dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, dma_addr_t addr, size_t len) { B43legacy_WARN_ON(ring->tx); dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, dma_addr_t addr, size_t len) { B43legacy_WARN_ON(ring->tx); dma_sync_single_for_device(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void free_descriptor_buffer(struct b43legacy_dmaring *ring, struct b43legacy_dmadesc_meta *meta, int irq_context) { if (meta->skb) { if (irq_context) dev_kfree_skb_irq(meta->skb); else dev_kfree_skb(meta->skb); meta->skb = NULL; } } static int alloc_ringmemory(struct b43legacy_dmaring *ring) { /* GFP flags must match the flags in free_ringmemory()! */ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, &(ring->dmabase), GFP_KERNEL); if (!ring->descbase) { b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" " failed\n"); return -ENOMEM; } memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); return 0; } static void free_ringmemory(struct b43legacy_dmaring *ring) { dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, u16 mmio_base, enum b43legacy_dmatype type) { int i; u32 value; u16 offset; might_sleep(); offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; b43legacy_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS; value = b43legacy_read32(dev, mmio_base + offset); if (type == B43legacy_DMA_64BIT) { value &= B43legacy_DMA64_RXSTAT; if (value == B43legacy_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= B43legacy_DMA32_RXSTATE; if (value == B43legacy_DMA32_RXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43legacyerr(dev->wl, "DMA RX reset timed out\n"); return -ENODEV; } return 0; } /* Reset the RX DMA channel */ static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, u16 mmio_base, enum b43legacy_dmatype type) { int i; u32 value; u16 offset; might_sleep(); for (i = 0; i < 10; i++) { offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; value = b43legacy_read32(dev, mmio_base + offset); if (type == B43legacy_DMA_64BIT) { value &= B43legacy_DMA64_TXSTAT; if (value == B43legacy_DMA64_TXSTAT_DISABLED || value == B43legacy_DMA64_TXSTAT_IDLEWAIT || value == B43legacy_DMA64_TXSTAT_STOPPED) break; } else { value &= B43legacy_DMA32_TXSTATE; if (value == B43legacy_DMA32_TXSTAT_DISABLED || value == B43legacy_DMA32_TXSTAT_IDLEWAIT || value == B43legacy_DMA32_TXSTAT_STOPPED) break; } msleep(1); } offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL; b43legacy_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; value = b43legacy_read32(dev, mmio_base + offset); if (type == B43legacy_DMA_64BIT) { value &= B43legacy_DMA64_TXSTAT; if (value == B43legacy_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= B43legacy_DMA32_TXSTATE; if (value == B43legacy_DMA32_TXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43legacyerr(dev->wl, "DMA TX reset timed out\n"); return -ENODEV; } /* ensure the reset is completed. */ msleep(1); return 0; } /* Check if a DMA mapping address is invalid. */ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { case B43legacy_DMA_30BIT: if ((u64)addr + buffersize > (1ULL << 30)) goto address_error; break; case B43legacy_DMA_32BIT: if ((u64)addr + buffersize > (1ULL << 32)) goto address_error; break; case B43legacy_DMA_64BIT: /* Currently we can't have addresses beyond 64 bits in the kernel. */ break; } /* The address is OK. */ return 0; address_error: /* We can't support this address. Unmap it again. */ unmap_descbuffer(ring, addr, buffersize, dma_to_device); return 1; } static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, struct b43legacy_dmadesc_generic *desc, struct b43legacy_dmadesc_meta *meta, gfp_t gfp_flags) { struct b43legacy_rxhdr_fw3 *rxhdr; struct b43legacy_hwtxstatus *txstat; dma_addr_t dmaaddr; struct sk_buff *skb; B43legacy_WARN_ON(ring->tx); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { /* ugh. try to realloc in zone_dma */ gfp_flags |= GFP_DMA; dev_kfree_skb_any(skb); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); } if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { dev_kfree_skb_any(skb); return -EIO; } meta->skb = skb; meta->dmaaddr = dmaaddr; ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); rxhdr->frame_len = 0; txstat = (struct b43legacy_hwtxstatus *)(skb->data); txstat->cookie = 0; return 0; } /* Allocate the initial descbuffers. * This is used for an RX ring only. */ static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) { int i; int err = -ENOMEM; struct b43legacy_dmadesc_generic *desc; struct b43legacy_dmadesc_meta *meta; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); if (err) { b43legacyerr(ring->dev->wl, "Failed to allocate initial descbuffers\n"); goto err_unwind; } } mb(); /* all descbuffer setup before next line */ ring->used_slots = ring->nr_slots; err = 0; out: return err; err_unwind: for (i--; i >= 0; i--) { desc = ring->ops->idx2desc(ring, i, &meta); unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); dev_kfree_skb(meta->skb); } goto out; } /* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on. */ static int dmacontroller_setup(struct b43legacy_dmaring *ring) { int err = 0; u32 value; u32 addrext; u32 trans = ssb_dma_translation(ring->dev->dev); if (ring->tx) { if (ring->type == B43legacy_DMA_64BIT) { u64 ringbase = (u64)(ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43legacy_DMA64_TXENABLE; value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) & B43legacy_DMA64_TXADDREXT_MASK; b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, value); b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, (ringbase & 0xFFFFFFFF)); b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | trans); } else { u32 ringbase = (u32)(ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43legacy_DMA32_TXENABLE; value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) & B43legacy_DMA32_TXADDREXT_MASK; b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value); b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); } } else { err = alloc_initial_descbuffers(ring); if (err) goto out; if (ring->type == B43legacy_DMA_64BIT) { u64 ringbase = (u64)(ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43legacy_DMA64_RXFROFF_SHIFT); value |= B43legacy_DMA64_RXENABLE; value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) & B43legacy_DMA64_RXADDREXT_MASK; b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, value); b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, (ringbase & 0xFFFFFFFF)); b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | trans); b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, 200); } else { u32 ringbase = (u32)(ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43legacy_DMA32_RXFROFF_SHIFT); value |= B43legacy_DMA32_RXENABLE; value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT) & B43legacy_DMA32_RXADDREXT_MASK; b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value); b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200); } } out: return err; } /* Shutdown the DMA controller. */ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) { if (ring->tx) { b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43legacy_DMA_64BIT) { b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); } else b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); } else { b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43legacy_DMA_64BIT) { b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); } else b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); } } static void free_all_descbuffers(struct b43legacy_dmaring *ring) { struct b43legacy_dmadesc_generic *desc; struct b43legacy_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); if (!meta->skb) { B43legacy_WARN_ON(!ring->tx); continue; } if (ring->tx) unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); else unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); free_descriptor_buffer(ring, meta, 0); } } static u64 supported_dma_mask(struct b43legacy_wldev *dev) { u32 tmp; u16 mmio_base; tmp = b43legacy_read32(dev, SSB_TMSHIGH); if (tmp & SSB_TMSHIGH_DMA64) return DMA_BIT_MASK(64); mmio_base = b43legacy_dmacontroller_base(0, 0); b43legacy_write32(dev, mmio_base + B43legacy_DMA32_TXCTL, B43legacy_DMA32_TXADDREXT_MASK); tmp = b43legacy_read32(dev, mmio_base + B43legacy_DMA32_TXCTL); if (tmp & B43legacy_DMA32_TXADDREXT_MASK) return DMA_BIT_MASK(32); return DMA_BIT_MASK(30); } static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) { if (dmamask == DMA_BIT_MASK(30)) return B43legacy_DMA_30BIT; if (dmamask == DMA_BIT_MASK(32)) return B43legacy_DMA_32BIT; if (dmamask == DMA_BIT_MASK(64)) return B43legacy_DMA_64BIT; B43legacy_WARN_ON(1); return B43legacy_DMA_30BIT; } /* Main initialization function. */ static struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, int controller_index, int for_tx, enum b43legacy_dmatype type) { struct b43legacy_dmaring *ring; int err; int nr_slots; dma_addr_t dma_test; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out; ring->type = type; ring->dev = dev; nr_slots = B43legacy_RXRING_SLOTS; if (for_tx) nr_slots = B43legacy_TXRING_SLOTS; ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; if (for_tx) { ring->txhdr_cache = kcalloc(nr_slots, sizeof(struct b43legacy_txhdr_fw3), GFP_KERNEL); if (!ring->txhdr_cache) goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, sizeof(struct b43legacy_txhdr_fw3), DMA_TO_DEVICE); if (b43legacy_dma_mapping_error(ring, dma_test, sizeof(struct b43legacy_txhdr_fw3), 1)) { /* ugh realloc */ kfree(ring->txhdr_cache); ring->txhdr_cache = kcalloc(nr_slots, sizeof(struct b43legacy_txhdr_fw3), GFP_KERNEL | GFP_DMA); if (!ring->txhdr_cache) goto err_kfree_meta; dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, sizeof(struct b43legacy_txhdr_fw3), DMA_TO_DEVICE); if (b43legacy_dma_mapping_error(ring, dma_test, sizeof(struct b43legacy_txhdr_fw3), 1)) goto err_kfree_txhdr_cache; } dma_unmap_single(dev->dev->dma_dev, dma_test, sizeof(struct b43legacy_txhdr_fw3), DMA_TO_DEVICE); } ring->nr_slots = nr_slots; ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); ring->index = controller_index; if (type == B43legacy_DMA_64BIT) ring->ops = &dma64_ops; else ring->ops = &dma32_ops; if (for_tx) { ring->tx = 1; ring->current_slot = -1; } else { if (ring->index == 0) { ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; } else if (ring->index == 3) { ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; } else B43legacy_WARN_ON(1); } spin_lock_init(&ring->lock); #ifdef CONFIG_B43LEGACY_DEBUG ring->last_injected_overflow = jiffies; #endif err = alloc_ringmemory(ring); if (err) goto err_kfree_txhdr_cache; err = dmacontroller_setup(ring); if (err) goto err_free_ringmemory; out: return ring; err_free_ringmemory: free_ringmemory(ring); err_kfree_txhdr_cache: kfree(ring->txhdr_cache); err_kfree_meta: kfree(ring->meta); err_kfree_ring: kfree(ring); ring = NULL; goto out; } /* Main cleanup function. */ static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) { if (!ring) return; b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); /* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff. */ dmacontroller_cleanup(ring); free_all_descbuffers(ring); free_ringmemory(ring); kfree(ring->txhdr_cache); kfree(ring->meta); kfree(ring); } void b43legacy_dma_free(struct b43legacy_wldev *dev) { struct b43legacy_dma *dma; if (b43legacy_using_pio(dev)) return; dma = &dev->dma; b43legacy_destroy_dmaring(dma->rx_ring3); dma->rx_ring3 = NULL; b43legacy_destroy_dmaring(dma->rx_ring0); dma->rx_ring0 = NULL; b43legacy_destroy_dmaring(dma->tx_ring5); dma->tx_ring5 = NULL; b43legacy_destroy_dmaring(dma->tx_ring4); dma->tx_ring4 = NULL; b43legacy_destroy_dmaring(dma->tx_ring3); dma->tx_ring3 = NULL; b43legacy_destroy_dmaring(dma->tx_ring2); dma->tx_ring2 = NULL; b43legacy_destroy_dmaring(dma->tx_ring1); dma->tx_ring1 = NULL; b43legacy_destroy_dmaring(dma->tx_ring0); dma->tx_ring0 = NULL; } static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) { u64 orig_mask = mask; bool fallback = 0; int err; /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { err = dma_set_mask(dev->dev->dma_dev, mask); if (!err) { err = dma_set_coherent_mask(dev->dev->dma_dev, mask); if (!err) break; } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); fallback = 1; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); fallback = 1; continue; } b43legacyerr(dev->wl, "The machine/kernel does not support " "the required %u-bit DMA mask\n", (unsigned int)dma_mask_to_engine_type(orig_mask)); return -EOPNOTSUPP; } if (fallback) { b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" "bit\n", (unsigned int)dma_mask_to_engine_type(orig_mask), (unsigned int)dma_mask_to_engine_type(mask)); } return 0; } int b43legacy_dma_init(struct b43legacy_wldev *dev) { struct b43legacy_dma *dma = &dev->dma; struct b43legacy_dmaring *ring; int err; u64 dmamask; enum b43legacy_dmatype type; dmamask = supported_dma_mask(dev); type = dma_mask_to_engine_type(dmamask); err = b43legacy_dma_set_mask(dev, dmamask); if (err) { #ifdef CONFIG_B43LEGACY_PIO b43legacywarn(dev->wl, "DMA for this device not supported. " "Falling back to PIO\n"); dev->__using_pio = 1; return -EAGAIN; #else b43legacyerr(dev->wl, "DMA for this device not supported and " "no PIO support compiled in\n"); return -EOPNOTSUPP; #endif } err = -ENOMEM; /* setup TX DMA channels. */ ring = b43legacy_setup_dmaring(dev, 0, 1, type); if (!ring) goto out; dma->tx_ring0 = ring; ring = b43legacy_setup_dmaring(dev, 1, 1, type); if (!ring) goto err_destroy_tx0; dma->tx_ring1 = ring; ring = b43legacy_setup_dmaring(dev, 2, 1, type); if (!ring) goto err_destroy_tx1; dma->tx_ring2 = ring; ring = b43legacy_setup_dmaring(dev, 3, 1, type); if (!ring) goto err_destroy_tx2; dma->tx_ring3 = ring; ring = b43legacy_setup_dmaring(dev, 4, 1, type); if (!ring) goto err_destroy_tx3; dma->tx_ring4 = ring; ring = b43legacy_setup_dmaring(dev, 5, 1, type); if (!ring) goto err_destroy_tx4; dma->tx_ring5 = ring; /* setup RX DMA channels. */ ring = b43legacy_setup_dmaring(dev, 0, 0, type); if (!ring) goto err_destroy_tx5; dma->rx_ring0 = ring; if (dev->dev->id.revision < 5) { ring = b43legacy_setup_dmaring(dev, 3, 0, type); if (!ring) goto err_destroy_rx0; dma->rx_ring3 = ring; } b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); err = 0; out: return err; err_destroy_rx0: b43legacy_destroy_dmaring(dma->rx_ring0); dma->rx_ring0 = NULL; err_destroy_tx5: b43legacy_destroy_dmaring(dma->tx_ring5); dma->tx_ring5 = NULL; err_destroy_tx4: b43legacy_destroy_dmaring(dma->tx_ring4); dma->tx_ring4 = NULL; err_destroy_tx3: b43legacy_destroy_dmaring(dma->tx_ring3); dma->tx_ring3 = NULL; err_destroy_tx2: b43legacy_destroy_dmaring(dma->tx_ring2); dma->tx_ring2 = NULL; err_destroy_tx1: b43legacy_destroy_dmaring(dma->tx_ring1); dma->tx_ring1 = NULL; err_destroy_tx0: b43legacy_destroy_dmaring(dma->tx_ring0); dma->tx_ring0 = NULL; goto out; } /* Generate a cookie for the TX header. */ static u16 generate_cookie(struct b43legacy_dmaring *ring, int slot) { u16 cookie = 0x1000; /* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path. */ switch (ring->index) { case 0: cookie = 0xA000; break; case 1: cookie = 0xB000; break; case 2: cookie = 0xC000; break; case 3: cookie = 0xD000; break; case 4: cookie = 0xE000; break; case 5: cookie = 0xF000; break; } B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); cookie |= (u16)slot; return cookie; } /* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, u16 cookie, int *slot) { struct b43legacy_dma *dma = &dev->dma; struct b43legacy_dmaring *ring = NULL; switch (cookie & 0xF000) { case 0xA000: ring = dma->tx_ring0; break; case 0xB000: ring = dma->tx_ring1; break; case 0xC000: ring = dma->tx_ring2; break; case 0xD000: ring = dma->tx_ring3; break; case 0xE000: ring = dma->tx_ring4; break; case 0xF000: ring = dma->tx_ring5; break; default: B43legacy_WARN_ON(1); } *slot = (cookie & 0x0FFF); B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); return ring; } static int dma_tx_fragment(struct b43legacy_dmaring *ring, struct sk_buff **in_skb) { struct sk_buff *skb = *in_skb; const struct b43legacy_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43legacy_dmadesc_generic *desc; struct b43legacy_dmadesc_meta *meta; struct b43legacy_dmadesc_meta *meta_hdr; struct sk_buff *bounce_skb; #define SLOTS_PER_PACKET 2 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); old_top_slot = ring->current_slot; old_used_slots = ring->used_slots; /* Get a slot for the header. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta_hdr); memset(meta_hdr, 0, sizeof(*meta_hdr)); header = &(ring->txhdr_cache[slot * sizeof( struct b43legacy_txhdr_fw3)]); err = b43legacy_generate_txhdr(ring->dev, header, skb->data, skb->len, info, generate_cookie(ring, slot)); if (unlikely(err)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return err; } meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, sizeof(struct b43legacy_txhdr_fw3), 1); if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, sizeof(struct b43legacy_txhdr_fw3), 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return -EIO; } ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); /* Get a slot for the payload. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta); memset(meta, 0, sizeof(*meta)); meta->skb = skb; meta->is_last_fragment = 1; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); bounce_skb->dev = skb->dev; skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); info = IEEE80211_SKB_CB(bounce_skb); dev_kfree_skb_any(skb); skb = bounce_skb; *in_skb = bounce_skb; meta->skb = skb; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; goto out_free_bounce; } } ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); wmb(); /* previous stuff MUST be done */ /* Now transfer the whole frame. */ ops->poke_tx(ring, next_slot(ring, slot)); return 0; out_free_bounce: dev_kfree_skb_any(skb); out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, sizeof(struct b43legacy_txhdr_fw3), 1); return err; } static inline int should_inject_overflow(struct b43legacy_dmaring *ring) { #ifdef CONFIG_B43LEGACY_DEBUG if (unlikely(b43legacy_debug(ring->dev, B43legacy_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow * to test handling of this situation in the stack. */ unsigned long next_overflow; next_overflow = ring->last_injected_overflow + HZ; if (time_after(jiffies, next_overflow)) { ring->last_injected_overflow = jiffies; b43legacydbg(ring->dev->wl, "Injecting TX ring overflow on " "DMA controller %d\n", ring->index); return 1; } } #endif /* CONFIG_B43LEGACY_DEBUG */ return 0; } int b43legacy_dma_tx(struct b43legacy_wldev *dev, struct sk_buff *skb) { struct b43legacy_dmaring *ring; struct ieee80211_hdr *hdr; int err = 0; unsigned long flags; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't. * For now, just refuse the transmit. */ if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacyerr(dev->wl, "Packet after queue stopped\n"); err = -ENOSPC; goto out_unlock; } if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43legacyerr(dev->wl, "DMA queue overflow\n"); err = -ENOSPC; goto out_unlock; } /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing * into the skb data or cb now. */ hdr = NULL; info = NULL; err = dma_tx_fragment(ring, &skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); err = 0; goto out_unlock; } if (unlikely(err)) { b43legacyerr(dev->wl, "DMA tx mapping failure\n"); goto out_unlock; } if ((free_slots(ring) < SLOTS_PER_PACKET) || should_inject_overflow(ring)) { /* This TX ring is full. */ ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); ring->stopped = 1; if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(dev->wl, "Stopped TX ring %d\n", ring->index); } out_unlock: spin_unlock_irqrestore(&ring->lock, flags); return err; } void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, const struct b43legacy_txstatus *status) { const struct b43legacy_dma_ops *ops; struct b43legacy_dmaring *ring; struct b43legacy_dmadesc_generic *desc; struct b43legacy_dmadesc_meta *meta; int retry_limit; int slot; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43legacy_WARN_ON(!irqs_disabled()); spin_lock(&ring->lock); B43legacy_WARN_ON(!ring->tx); ops = ring->ops; while (1) { B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); desc = ops->idx2desc(ring, slot, &meta); if (meta->skb) unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); else unmap_descbuffer(ring, meta->dmaaddr, sizeof(struct b43legacy_txhdr_fw3), 1); if (meta->is_last_fragment) { struct ieee80211_tx_info *info; BUG_ON(!meta->skb); info = IEEE80211_SKB_CB(meta->skb); /* preserve the confiured retry limit before clearing the status * The xmit function has overwritten the rc's value with the actual * retry limit done by the hardware */ retry_limit = info->status.rates[0].count; ieee80211_tx_info_clear_status(info); if (status->acked) info->flags |= IEEE80211_TX_STAT_ACK; if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { /* * If the short retries (RTS, not data frame) have exceeded * the limit, the hw will not have tried the selected rate, * but will have used the fallback rate instead. * Don't let the rate control count attempts for the selected * rate in this case, otherwise the statistics will be off. */ info->status.rates[0].count = 0; info->status.rates[1].count = status->frame_count; } else { if (status->frame_count > retry_limit) { info->status.rates[0].count = retry_limit; info->status.rates[1].count = status->frame_count - retry_limit; } else { info->status.rates[0].count = status->frame_count; info->status.rates[1].idx = -1; } } /* Call back to inform the ieee80211 subsystem about the * status of the transmission. * Some fields of txstat are already filled in dma_tx(). */ ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); /* skb is freed by ieee80211_tx_status_irqsafe() */ meta->skb = NULL; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ B43legacy_WARN_ON(meta->skb != NULL); } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment) break; slot = next_slot(ring, slot); } dev->stats.last_tx = jiffies; if (ring->stopped) { B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); ring->stopped = 0; if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) b43legacydbg(dev->wl, "Woke up TX ring %d\n", ring->index); } spin_unlock(&ring->lock); } static void dma_rx(struct b43legacy_dmaring *ring, int *slot) { const struct b43legacy_dma_ops *ops = ring->ops; struct b43legacy_dmadesc_generic *desc; struct b43legacy_dmadesc_meta *meta; struct b43legacy_rxhdr_fw3 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; if (ring->index == 3) { /* We received an xmit status. */ struct b43legacy_hwtxstatus *hw = (struct b43legacy_hwtxstatus *)skb->data; int i = 0; while (hw->cookie == 0) { if (i > 100) break; i++; udelay(2); barrier(); } b43legacy_handle_hwtxstatus(ring->dev, hw); /* recycle the descriptor buffer. */ sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); return; } rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { /* recycle the descriptor buffer. */ sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); goto drop; } } if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43legacyerr(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" " failed\n"); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); goto drop; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43legacy_rx(ring->dev, skb, rxhdr); drop: return; } void b43legacy_dma_rx(struct b43legacy_dmaring *ring) { const struct b43legacy_dma_ops *ops = ring->ops; int slot; int current_slot; int used_slots = 0; B43legacy_WARN_ON(ring->tx); current_slot = ops->get_current_rxslot(ring); B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); slot = ring->current_slot; for (; slot != current_slot; slot = next_slot(ring, slot)) { dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } ops->set_current_rxslot(ring, slot); ring->current_slot = slot; } static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) { unsigned long flags; spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); ring->ops->tx_suspend(ring); spin_unlock_irqrestore(&ring->lock, flags); } static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) { unsigned long flags; spin_lock_irqsave(&ring->lock, flags); B43legacy_WARN_ON(!ring->tx); ring->ops->tx_resume(ring); spin_unlock_irqrestore(&ring->lock, flags); } void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) { b43legacy_power_saving_ctl_bits(dev, -1, 1); b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); } void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) { b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); b43legacy_power_saving_ctl_bits(dev, -1, -1); }
gpl-2.0
AdrielVelazquez/Moto_XT1058
arch/mips/netlogic/xlp/setup.c
4516
3124
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/kernel.h> #include <linux/serial_8250.h> #include <linux/pm.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/bootinfo.h> #include <linux/of_fdt.h> #include <asm/netlogic/haldefs.h> #include <asm/netlogic/common.h> #include <asm/netlogic/xlp-hal/iomap.h> #include <asm/netlogic/xlp-hal/xlp.h> #include <asm/netlogic/xlp-hal/sys.h> unsigned long nlm_common_ebase = 0x0; /* default to uniprocessor */ uint32_t nlm_coremask = 1, nlm_cpumask = 1; int nlm_threads_per_core = 1; static void nlm_linux_exit(void) { nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1); for ( ; ; ) cpu_wait(); } void __init plat_mem_setup(void) { panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; } const char *get_system_type(void) { return "Netlogic XLP Series"; } void __init prom_free_prom_memory(void) { /* Nothing yet */ } void xlp_mmu_init(void) { write_c0_config6(read_c0_config6() | 0x24); current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; write_c0_config7(PM_DEFAULT_MASK >> (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2))); } void __init prom_init(void) { void *fdtp; fdtp = (void *)(long)fw_arg0; xlp_mmu_init(); nlm_hal_init(); early_init_devtree(fdtp); nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); #ifdef CONFIG_SMP nlm_wakeup_secondary_cpus(0xffffffff); register_smp_ops(&nlm_smp_ops); #endif }
gpl-2.0
chrnueve/udooImaxdi
kernel_oficial_source/arch/ia64/sn/kernel/huberror.c
4772
6371
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/interrupt.h> #include <asm/delay.h> #include <asm/sn/sn_sal.h> #include "ioerror.h" #include <asm/sn/addrs.h> #include <asm/sn/shubio.h> #include <asm/sn/geo.h> #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" #include <asm/sn/bte.h> void hubiio_crb_error_handler(struct hubdev_info *hubdev_info); extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *, int); static irqreturn_t hub_eint_handler(int irq, void *arg) { struct hubdev_info *hubdev_info; struct ia64_sal_retval ret_stuff; nasid_t nasid; ret_stuff.status = 0; ret_stuff.v0 = 0; hubdev_info = (struct hubdev_info *)arg; nasid = hubdev_info->hdi_nasid; if (is_shub1()) { SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal %s Error", __func__, ((nasid & 1) ? "TIO" : "HUBII")); if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ (void)hubiio_crb_error_handler(hubdev_info); } else if (nasid & 1) { /* TIO errors */ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal TIO Error", __func__); } else bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); return IRQ_HANDLED; } /* * Free the hub CRB "crbnum" which encountered an error. * Assumption is, error handling was successfully done, * and we now want to return the CRB back to Hub for normal usage. * * In order to free the CRB, all that's needed is to de-allocate it * * Assumption: * No other processor is mucking around with the hub control register. * So, upper layer has to single thread this. */ void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum) { ii_icrb0_b_u_t icrbb; /* * The hardware does NOT clear the mark bit, so it must get cleared * here to be sure the error is not processed twice. */ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum)); icrbb.b_mark = 0; REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum), icrbb.ii_icrb0_b_regval); /* * Deallocate the register wait till hub indicates it's done. */ REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum)); while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND) cpu_relax(); } /* * hubiio_crb_error_handler * * This routine gets invoked when a hub gets an error * interrupt. So, the routine is running in interrupt context * at error interrupt level. * Action: * It's responsible for identifying ALL the CRBs that are marked * with error, and process them. * * If you find the CRB that's marked with error, map this to the * reason it caused error, and invoke appropriate error handler. * * XXX Be aware of the information in the context register. * * NOTE: * Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt * handler can be run on any node. (not necessarily the node * corresponding to the hub that encountered error). */ void hubiio_crb_error_handler(struct hubdev_info *hubdev_info) { nasid_t nasid; ii_icrb0_a_u_t icrba; /* II CRB Register A */ ii_icrb0_b_u_t icrbb; /* II CRB Register B */ ii_icrb0_c_u_t icrbc; /* II CRB Register C */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_icrb0_e_u_t icrbe; /* II CRB Register D */ int i; int num_errors = 0; /* Num of errors handled */ ioerror_t ioerror; nasid = hubdev_info->hdi_nasid; /* * XXX - Add locking for any recovery actions */ /* * Scan through all CRBs in the Hub, and handle the errors * in any of the CRBs marked. */ for (i = 0; i < IIO_NUM_CRBS; i++) { /* Check this crb entry to see if it is in error. */ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i)); if (icrbb.b_mark == 0) { continue; } icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i)); IOERROR_INIT(&ioerror); /* read other CRB error registers. */ icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i)); icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i)); IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode); /* Check if this error is due to BTE operation, * and handle it separately. */ if (icrbd.d_bteop || ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 || icrbb.b_initiator == IIO_ICRB_INIT_BTE1) && (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE || icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) { int bte_num; if (icrbd.d_bteop) bte_num = icrbc.c_btenum; else /* b_initiator bit 2 gives BTE number */ bte_num = (icrbb.b_initiator & 0x4) >> 2; hubiio_crb_free(hubdev_info, i); bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num, i, &ioerror, icrbd.d_bteop); num_errors++; continue; } } } /* * Function : hub_error_init * Purpose : initialize the error handling requirements for a given hub. * Parameters : cnode, the compact nodeid. * Assumptions : Called only once per hub, either by a local cpu. Or by a * remote cpu, when this hub is headless.(cpuless) * Returns : None */ void hub_error_init(struct hubdev_info *hubdev_info) { if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, "SN_hub_error", hubdev_info)) { printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n", hubdev_info); return; } sn_set_err_irq_affinity(SGI_II_ERROR); } /* * Function : ice_error_init * Purpose : initialize the error handling requirements for a given tio. * Parameters : cnode, the compact nodeid. * Assumptions : Called only once per tio. * Returns : None */ void ice_error_init(struct hubdev_info *hubdev_info) { if (request_irq (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error", (void *)hubdev_info)) { printk("ice_error_init: request_irq() error hubdev_info 0x%p\n", hubdev_info); return; } sn_set_err_irq_affinity(SGI_TIO_ERROR); }
gpl-2.0
MyAOSP/kernel_asus_tf300t
arch/powerpc/platforms/embedded6xx/prpmc2800.c
4772
4045
/* * Board setup routines for the Motorola PrPMC2800 * * Author: Dale Farnsworth <dale@farnsworth.org> * * 2007 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/system.h> #include <asm/time.h> #include <mm/mmu_decl.h> #include <sysdev/mv64x60.h> #define MV64x60_MPP_CNTL_0 0x0000 #define MV64x60_MPP_CNTL_2 0x0008 #define MV64x60_GPP_IO_CNTL 0x0000 #define MV64x60_GPP_LEVEL_CNTL 0x0010 #define MV64x60_GPP_VALUE_SET 0x0018 #define PLATFORM_NAME_MAX 32 static char prpmc2800_platform_name[PLATFORM_NAME_MAX]; static void __iomem *mv64x60_mpp_reg_base; static void __iomem *mv64x60_gpp_reg_base; static void __init prpmc2800_setup_arch(void) { struct device_node *np; phys_addr_t paddr; const unsigned int *reg; /* * ioremap mpp and gpp registers in case they are later * needed by prpmc2800_reset_board(). */ np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-mpp"); reg = of_get_property(np, "reg", NULL); paddr = of_translate_address(np, reg); of_node_put(np); mv64x60_mpp_reg_base = ioremap(paddr, reg[1]); np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); reg = of_get_property(np, "reg", NULL); paddr = of_translate_address(np, reg); of_node_put(np); mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); #ifdef CONFIG_PCI mv64x60_pci_init(); #endif printk("Motorola %s\n", prpmc2800_platform_name); } static void prpmc2800_reset_board(void) { u32 temp; local_irq_disable(); temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0); temp &= 0xFFFF0FFF; out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0, temp); temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL); temp |= 0x00000004; out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp); temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL); temp |= 0x00000004; out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp); temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2); temp &= 0xFFFF0FFF; out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2, temp); temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL); temp |= 0x00080000; out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp); temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL); temp |= 0x00080000; out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_VALUE_SET, 0x00080004); } static void prpmc2800_restart(char *cmd) { volatile ulong i = 10000000; prpmc2800_reset_board(); while (i-- > 0); panic("restart failed\n"); } #ifdef CONFIG_NOT_COHERENT_CACHE #define PPRPM2800_COHERENCY_SETTING "off" #else #define PPRPM2800_COHERENCY_SETTING "on" #endif void prpmc2800_show_cpuinfo(struct seq_file *m) { seq_printf(m, "Vendor\t\t: Motorola\n"); seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING); } /* * Called very early, device-tree isn't unflattened */ static int __init prpmc2800_probe(void) { unsigned long root = of_get_flat_dt_root(); unsigned long len = PLATFORM_NAME_MAX; void *m; if (!of_flat_dt_is_compatible(root, "motorola,PrPMC2800")) return 0; /* Update ppc_md.name with name from dt */ m = of_get_flat_dt_prop(root, "model", &len); if (m) strncpy(prpmc2800_platform_name, m, min((int)len, PLATFORM_NAME_MAX - 1)); _set_L2CR(_get_L2CR() | L2CR_L2E); return 1; } define_machine(prpmc2800){ .name = prpmc2800_platform_name, .probe = prpmc2800_probe, .setup_arch = prpmc2800_setup_arch, .init_early = mv64x60_init_early, .show_cpuinfo = prpmc2800_show_cpuinfo, .init_IRQ = mv64x60_init_irq, .get_irq = mv64x60_get_irq, .restart = prpmc2800_restart, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
Quarx2k/android_kernel_lge_msm8226
drivers/ptp/ptp_pch.c
4772
16597
/* * PTP 1588 clock using the EG20T PCH * * Copyright (C) 2010 OMICRON electronics GmbH * Copyright (C) 2011-2012 LAPIS SEMICONDUCTOR Co., LTD. * * This code was derived from the IXP46X driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ptp_clock_kernel.h> #include <linux/slab.h> #define STATION_ADDR_LEN 20 #define PCI_DEVICE_ID_PCH_1588 0x8819 #define IO_MEM_BAR 1 #define DEFAULT_ADDEND 0xA0000000 #define TICKS_NS_SHIFT 5 #define N_EXT_TS 2 enum pch_status { PCH_SUCCESS, PCH_INVALIDPARAM, PCH_NOTIMESTAMP, PCH_INTERRUPTMODEINUSE, PCH_FAILED, PCH_UNSUPPORTED, }; /** * struct pch_ts_regs - IEEE 1588 registers */ struct pch_ts_regs { u32 control; u32 event; u32 addend; u32 accum; u32 test; u32 ts_compare; u32 rsystime_lo; u32 rsystime_hi; u32 systime_lo; u32 systime_hi; u32 trgt_lo; u32 trgt_hi; u32 asms_lo; u32 asms_hi; u32 amms_lo; u32 amms_hi; u32 ch_control; u32 ch_event; u32 tx_snap_lo; u32 tx_snap_hi; u32 rx_snap_lo; u32 rx_snap_hi; u32 src_uuid_lo; u32 src_uuid_hi; u32 can_status; u32 can_snap_lo; u32 can_snap_hi; u32 ts_sel; u32 ts_st[6]; u32 reserve1[14]; u32 stl_max_set_en; u32 stl_max_set; u32 reserve2[13]; u32 srst; }; #define PCH_TSC_RESET (1 << 0) #define PCH_TSC_TTM_MASK (1 << 1) #define PCH_TSC_ASMS_MASK (1 << 2) #define PCH_TSC_AMMS_MASK (1 << 3) #define PCH_TSC_PPSM_MASK (1 << 4) #define PCH_TSE_TTIPEND (1 << 1) #define PCH_TSE_SNS (1 << 2) #define PCH_TSE_SNM (1 << 3) #define PCH_TSE_PPS (1 << 4) #define PCH_CC_MM (1 << 0) #define PCH_CC_TA (1 << 1) #define PCH_CC_MODE_SHIFT 16 #define PCH_CC_MODE_MASK 0x001F0000 #define PCH_CC_VERSION (1 << 31) #define PCH_CE_TXS (1 << 0) #define PCH_CE_RXS (1 << 1) #define PCH_CE_OVR (1 << 0) #define PCH_CE_VAL (1 << 1) #define PCH_ECS_ETH (1 << 0) #define PCH_ECS_CAN (1 << 1) #define PCH_STATION_BYTES 6 #define PCH_IEEE1588_ETH (1 << 0) #define PCH_IEEE1588_CAN (1 << 1) /** * struct pch_dev - Driver private data */ struct pch_dev { struct pch_ts_regs *regs; struct ptp_clock *ptp_clock; struct ptp_clock_info caps; int exts0_enabled; int exts1_enabled; u32 mem_base; u32 mem_size; u32 irq; struct pci_dev *pdev; spinlock_t register_lock; }; /** * struct pch_params - 1588 module parameter */ struct pch_params { u8 station[STATION_ADDR_LEN]; }; /* structure to hold the module parameters */ static struct pch_params pch_param = { "00:00:00:00:00:00" }; /* * Register access functions */ static inline void pch_eth_enable_set(struct pch_dev *chip) { u32 val; /* SET the eth_enable bit */ val = ioread32(&chip->regs->ts_sel) | (PCH_ECS_ETH); iowrite32(val, (&chip->regs->ts_sel)); } static u64 pch_systime_read(struct pch_ts_regs *regs) { u64 ns; u32 lo, hi; lo = ioread32(&regs->systime_lo); hi = ioread32(&regs->systime_hi); ns = ((u64) hi) << 32; ns |= lo; ns <<= TICKS_NS_SHIFT; return ns; } static void pch_systime_write(struct pch_ts_regs *regs, u64 ns) { u32 hi, lo; ns >>= TICKS_NS_SHIFT; hi = ns >> 32; lo = ns & 0xffffffff; iowrite32(lo, &regs->systime_lo); iowrite32(hi, &regs->systime_hi); } static inline void pch_block_reset(struct pch_dev *chip) { u32 val; /* Reset Hardware Assist block */ val = ioread32(&chip->regs->control) | PCH_TSC_RESET; iowrite32(val, (&chip->regs->control)); val = val & ~PCH_TSC_RESET; iowrite32(val, (&chip->regs->control)); } u32 pch_ch_control_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->ch_control); return val; } EXPORT_SYMBOL(pch_ch_control_read); void pch_ch_control_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); iowrite32(val, (&chip->regs->ch_control)); } EXPORT_SYMBOL(pch_ch_control_write); u32 pch_ch_event_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->ch_event); return val; } EXPORT_SYMBOL(pch_ch_event_read); void pch_ch_event_write(struct pci_dev *pdev, u32 val) { struct pch_dev *chip = pci_get_drvdata(pdev); iowrite32(val, (&chip->regs->ch_event)); } EXPORT_SYMBOL(pch_ch_event_write); u32 pch_src_uuid_lo_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->src_uuid_lo); return val; } EXPORT_SYMBOL(pch_src_uuid_lo_read); u32 pch_src_uuid_hi_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u32 val; val = ioread32(&chip->regs->src_uuid_hi); return val; } EXPORT_SYMBOL(pch_src_uuid_hi_read); u64 pch_rx_snap_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u64 ns; u32 lo, hi; lo = ioread32(&chip->regs->rx_snap_lo); hi = ioread32(&chip->regs->rx_snap_hi); ns = ((u64) hi) << 32; ns |= lo; return ns; } EXPORT_SYMBOL(pch_rx_snap_read); u64 pch_tx_snap_read(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); u64 ns; u32 lo, hi; lo = ioread32(&chip->regs->tx_snap_lo); hi = ioread32(&chip->regs->tx_snap_hi); ns = ((u64) hi) << 32; ns |= lo; return ns; } EXPORT_SYMBOL(pch_tx_snap_read); /* This function enables all 64 bits in system time registers [high & low]. This is a work-around for non continuous value in the SystemTime Register*/ static void pch_set_system_time_count(struct pch_dev *chip) { iowrite32(0x01, &chip->regs->stl_max_set_en); iowrite32(0xFFFFFFFF, &chip->regs->stl_max_set); iowrite32(0x00, &chip->regs->stl_max_set_en); } static void pch_reset(struct pch_dev *chip) { /* Reset Hardware Assist */ pch_block_reset(chip); /* enable all 32 bits in system time registers */ pch_set_system_time_count(chip); } /** * pch_set_station_address() - This API sets the station address used by * IEEE 1588 hardware when looking at PTP * traffic on the ethernet interface * @addr: dress which contain the column separated address to be used. */ static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) { s32 i; struct pch_dev *chip = pci_get_drvdata(pdev); /* Verify the parameter */ if ((chip->regs == 0) || addr == (u8 *)NULL) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } /* For all station address bytes */ for (i = 0; i < PCH_STATION_BYTES; i++) { u32 val; s32 tmp; tmp = hex_to_bin(addr[i * 3]); if (tmp < 0) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } val = tmp * 16; tmp = hex_to_bin(addr[(i * 3) + 1]); if (tmp < 0) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } val += tmp; /* Expects ':' separated addresses */ if ((i < 5) && (addr[(i * 3) + 2] != ':')) { dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n"); return PCH_INVALIDPARAM; } /* Ideally we should set the address only after validating entire string */ dev_dbg(&pdev->dev, "invoking pch_station_set\n"); iowrite32(val, &chip->regs->ts_st[i]); } return 0; } /* * Interrupt service routine */ static irqreturn_t isr(int irq, void *priv) { struct pch_dev *pch_dev = priv; struct pch_ts_regs *regs = pch_dev->regs; struct ptp_clock_event event; u32 ack = 0, lo, hi, val; val = ioread32(&regs->event); if (val & PCH_TSE_SNS) { ack |= PCH_TSE_SNS; if (pch_dev->exts0_enabled) { hi = ioread32(&regs->asms_hi); lo = ioread32(&regs->asms_lo); event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; event.timestamp <<= TICKS_NS_SHIFT; ptp_clock_event(pch_dev->ptp_clock, &event); } } if (val & PCH_TSE_SNM) { ack |= PCH_TSE_SNM; if (pch_dev->exts1_enabled) { hi = ioread32(&regs->amms_hi); lo = ioread32(&regs->amms_lo); event.type = PTP_CLOCK_EXTTS; event.index = 1; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; event.timestamp <<= TICKS_NS_SHIFT; ptp_clock_event(pch_dev->ptp_clock, &event); } } if (val & PCH_TSE_TTIPEND) ack |= PCH_TSE_TTIPEND; /* this bit seems to be always set */ if (ack) { iowrite32(ack, &regs->event); return IRQ_HANDLED; } else return IRQ_NONE; } /* * PTP clock operations */ static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { u64 adj; u32 diff, addend; int neg_adj = 0; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; if (ppb < 0) { neg_adj = 1; ppb = -ppb; } addend = DEFAULT_ADDEND; adj = addend; adj *= ppb; diff = div_u64(adj, 1000000000ULL); addend = neg_adj ? addend - diff : addend + diff; iowrite32(addend, &regs->addend); return 0; } static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta) { s64 now; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; spin_lock_irqsave(&pch_dev->register_lock, flags); now = pch_systime_read(regs); now += delta; pch_systime_write(regs, now); spin_unlock_irqrestore(&pch_dev->register_lock, flags); return 0; } static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts) { u64 ns; u32 remainder; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; spin_lock_irqsave(&pch_dev->register_lock, flags); ns = pch_systime_read(regs); spin_unlock_irqrestore(&pch_dev->register_lock, flags); ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ts->tv_nsec = remainder; return 0; } static int ptp_pch_settime(struct ptp_clock_info *ptp, const struct timespec *ts) { u64 ns; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs *regs = pch_dev->regs; ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; spin_lock_irqsave(&pch_dev->register_lock, flags); pch_systime_write(regs, ns); spin_unlock_irqrestore(&pch_dev->register_lock, flags); return 0; } static int ptp_pch_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); switch (rq->type) { case PTP_CLK_REQ_EXTTS: switch (rq->extts.index) { case 0: pch_dev->exts0_enabled = on ? 1 : 0; break; case 1: pch_dev->exts1_enabled = on ? 1 : 0; break; default: return -EINVAL; } return 0; default: break; } return -EOPNOTSUPP; } static struct ptp_clock_info ptp_pch_caps = { .owner = THIS_MODULE, .name = "PCH timer", .max_adj = 50000000, .n_ext_ts = N_EXT_TS, .pps = 0, .adjfreq = ptp_pch_adjfreq, .adjtime = ptp_pch_adjtime, .gettime = ptp_pch_gettime, .settime = ptp_pch_settime, .enable = ptp_pch_enable, }; #ifdef CONFIG_PM static s32 pch_suspend(struct pci_dev *pdev, pm_message_t state) { pci_disable_device(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); if (pci_save_state(pdev) != 0) { dev_err(&pdev->dev, "could not save PCI config state\n"); return -ENOMEM; } pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static s32 pch_resume(struct pci_dev *pdev) { s32 ret; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device failed\n"); return ret; } pci_enable_wake(pdev, PCI_D3hot, 0); return 0; } #else #define pch_suspend NULL #define pch_resume NULL #endif static void __devexit pch_remove(struct pci_dev *pdev) { struct pch_dev *chip = pci_get_drvdata(pdev); ptp_clock_unregister(chip->ptp_clock); /* free the interrupt */ if (pdev->irq != 0) free_irq(pdev->irq, chip); /* unmap the virtual IO memory space */ if (chip->regs != 0) { iounmap(chip->regs); chip->regs = 0; } /* release the reserved IO memory space */ if (chip->mem_base != 0) { release_mem_region(chip->mem_base, chip->mem_size); chip->mem_base = 0; } pci_disable_device(pdev); kfree(chip); dev_info(&pdev->dev, "complete\n"); } static s32 __devinit pch_probe(struct pci_dev *pdev, const struct pci_device_id *id) { s32 ret; unsigned long flags; struct pch_dev *chip; chip = kzalloc(sizeof(struct pch_dev), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* enable the 1588 pci device */ ret = pci_enable_device(pdev); if (ret != 0) { dev_err(&pdev->dev, "could not enable the pci device\n"); goto err_pci_en; } chip->mem_base = pci_resource_start(pdev, IO_MEM_BAR); if (!chip->mem_base) { dev_err(&pdev->dev, "could not locate IO memory address\n"); ret = -ENODEV; goto err_pci_start; } /* retrieve the available length of the IO memory space */ chip->mem_size = pci_resource_len(pdev, IO_MEM_BAR); /* allocate the memory for the device registers */ if (!request_mem_region(chip->mem_base, chip->mem_size, "1588_regs")) { dev_err(&pdev->dev, "could not allocate register memory space\n"); ret = -EBUSY; goto err_req_mem_region; } /* get the virtual address to the 1588 registers */ chip->regs = ioremap(chip->mem_base, chip->mem_size); if (!chip->regs) { dev_err(&pdev->dev, "Could not get virtual address\n"); ret = -ENOMEM; goto err_ioremap; } chip->caps = ptp_pch_caps; chip->ptp_clock = ptp_clock_register(&chip->caps); if (IS_ERR(chip->ptp_clock)) return PTR_ERR(chip->ptp_clock); spin_lock_init(&chip->register_lock); ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip); if (ret != 0) { dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq); goto err_req_irq; } /* indicate success */ chip->irq = pdev->irq; chip->pdev = pdev; pci_set_drvdata(pdev, chip); spin_lock_irqsave(&chip->register_lock, flags); /* reset the ieee1588 h/w */ pch_reset(chip); iowrite32(DEFAULT_ADDEND, &chip->regs->addend); iowrite32(1, &chip->regs->trgt_lo); iowrite32(0, &chip->regs->trgt_hi); iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */ iowrite32(0x80020000, &chip->regs->ch_control); pch_eth_enable_set(chip); if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) { if (pch_set_station_address(pch_param.station, pdev) != 0) { dev_err(&pdev->dev, "Invalid station address parameter\n" "Module loaded but station address not set correctly\n" ); } } spin_unlock_irqrestore(&chip->register_lock, flags); return 0; err_req_irq: ptp_clock_unregister(chip->ptp_clock); iounmap(chip->regs); chip->regs = 0; err_ioremap: release_mem_region(chip->mem_base, chip->mem_size); err_req_mem_region: chip->mem_base = 0; err_pci_start: pci_disable_device(pdev); err_pci_en: kfree(chip); dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret); return ret; } static DEFINE_PCI_DEVICE_TABLE(pch_ieee1588_pcidev_id) = { { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_PCH_1588 }, {0} }; static struct pci_driver pch_driver = { .name = KBUILD_MODNAME, .id_table = pch_ieee1588_pcidev_id, .probe = pch_probe, .remove = pch_remove, .suspend = pch_suspend, .resume = pch_resume, }; static void __exit ptp_pch_exit(void) { pci_unregister_driver(&pch_driver); } static s32 __init ptp_pch_init(void) { s32 ret; /* register the driver with the pci core */ ret = pci_register_driver(&pch_driver); return ret; } module_init(ptp_pch_init); module_exit(ptp_pch_exit); module_param_string(station, pch_param.station, sizeof pch_param.station, 0444); MODULE_PARM_DESC(station, "IEEE 1588 station address to use - column separated hex values"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); MODULE_DESCRIPTION("PTP clock using the EG20T timer"); MODULE_LICENSE("GPL");
gpl-2.0
n3ocort3x/kernel_oppo_msm8974
sound/arm/pxa2xx-pcm.c
7588
3217
/* * linux/sound/arm/pxa2xx-pcm.c -- ALSA PCM interface for the Intel PXA2xx chip * * Author: Nicolas Pitre * Created: Nov 30, 2004 * Copyright: (C) 2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <sound/core.h> #include <sound/pxa2xx-lib.h> #include "pxa2xx-pcm.h" static int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; __pxa2xx_pcm_prepare(substream); return client->prepare(substream); } static int pxa2xx_pcm_open(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; struct snd_pcm_runtime *runtime = substream->runtime; struct pxa2xx_runtime_data *rtd; int ret; ret = __pxa2xx_pcm_open(substream); if (ret) goto out; rtd = runtime->private_data; rtd->params = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? client->playback_params : client->capture_params; ret = pxa_request_dma(rtd->params->name, DMA_PRIO_LOW, pxa2xx_pcm_dma_irq, substream); if (ret < 0) goto err2; rtd->dma_ch = ret; ret = client->startup(substream); if (!ret) goto out; pxa_free_dma(rtd->dma_ch); err2: __pxa2xx_pcm_close(substream); out: return ret; } static int pxa2xx_pcm_close(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; struct pxa2xx_runtime_data *rtd = substream->runtime->private_data; pxa_free_dma(rtd->dma_ch); client->shutdown(substream); return __pxa2xx_pcm_close(substream); } static struct snd_pcm_ops pxa2xx_pcm_ops = { .open = pxa2xx_pcm_open, .close = pxa2xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = __pxa2xx_pcm_hw_params, .hw_free = __pxa2xx_pcm_hw_free, .prepare = pxa2xx_pcm_prepare, .trigger = pxa2xx_pcm_trigger, .pointer = pxa2xx_pcm_pointer, .mmap = pxa2xx_pcm_mmap, }; static u64 pxa2xx_pcm_dmamask = 0xffffffff; int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int play = client->playback_params ? 1 : 0; int capt = client->capture_params ? 1 : 0; int ret; ret = snd_pcm_new(card, "PXA2xx-PCM", 0, play, capt, &pcm); if (ret) goto out; pcm->private_data = client; pcm->private_free = pxa2xx_pcm_free_dma_buffers; if (!card->dev->dma_mask) card->dev->dma_mask = &pxa2xx_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (play) { int stream = SNDRV_PCM_STREAM_PLAYBACK; snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops); ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream); if (ret) goto out; } if (capt) { int stream = SNDRV_PCM_STREAM_CAPTURE; snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops); ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream); if (ret) goto out; } if (rpcm) *rpcm = pcm; ret = 0; out: return ret; } EXPORT_SYMBOL(pxa2xx_pcm_new); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
D2005-devs/android_kernel_sony_msm8610
arch/powerpc/sysdev/of_rtc.c
10916
1597
/* * Instantiate mmio-mapped RTC chips based on device tree information * * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> #include <linux/of_platform.h> #include <linux/slab.h> static __initdata struct { const char *compatible; char *plat_name; } of_rtc_table[] = { { "ds1743-nvram", "rtc-ds1742" }, }; void __init of_instantiate_rtc(void) { struct device_node *node; int err; int i; for (i = 0; i < ARRAY_SIZE(of_rtc_table); i++) { char *plat_name = of_rtc_table[i].plat_name; for_each_compatible_node(node, NULL, of_rtc_table[i].compatible) { struct resource *res; res = kmalloc(sizeof(*res), GFP_KERNEL); if (!res) { printk(KERN_ERR "OF RTC: Out of memory " "allocating resource structure for %s\n", node->full_name); continue; } err = of_address_to_resource(node, 0, res); if (err) { printk(KERN_ERR "OF RTC: Error " "translating resources for %s\n", node->full_name); continue; } printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n", node->full_name, plat_name, (unsigned long long)res->start, (unsigned long long)res->end); platform_device_register_simple(plat_name, -1, res, 1); } } }
gpl-2.0
HtcLegacy/android_kernel_htc_protou
arch/alpha/kernel/err_titan.c
11940
23439
/* * linux/arch/alpha/kernel/err_titan.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting TITAN systems */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/core_titan.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev6.h> #include <asm/irq_regs.h> #include "err_impl.h" #include "proto.h" static int titan_parse_c_misc(u64 c_misc, int print) { #ifdef CONFIG_VERBOSE_MCHECK char *src; int nxs = 0; #endif int status = MCHK_DISPOSITION_REPORT; #define TITAN__CCHIP_MISC__NXM (1UL << 28) #define TITAN__CCHIP_MISC__NXS__S (29) #define TITAN__CCHIP_MISC__NXS__M (0x7) if (!(c_misc & TITAN__CCHIP_MISC__NXM)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); switch(nxs) { case 0: /* CPU 0 */ case 1: /* CPU 1 */ case 2: /* CPU 2 */ case 3: /* CPU 3 */ src = "CPU"; /* num is already the CPU number */ break; case 4: /* Pchip 0 */ case 5: /* Pchip 1 */ src = "Pchip"; nxs -= 4; break; default:/* reserved */ src = "Unknown, NXS ="; /* leave num untouched */ break; } printk("%s Non-existent memory access from: %s %d\n", err_print_prefix, src, nxs); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_serror(int which, u64 serror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const serror_src[] = { "GPCI", "APCI", "AGP HP", "AGP LP" }; static const char * const serror_cmd[] = { "DMA Read", "DMA RMW", "SGTE Read", "Reserved" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) #define TITAN__PCHIP_SERROR__UECC (1UL << 1) #define TITAN__PCHIP_SERROR__CRE (1UL << 2) #define TITAN__PCHIP_SERROR__NXIO (1UL << 3) #define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) #define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE) #define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE | \ TITAN__PCHIP_SERROR__NXIO | \ TITAN__PCHIP_SERROR__LOST_CRE) #define TITAN__PCHIP_SERROR__SRC__S (52) #define TITAN__PCHIP_SERROR__SRC__M (0x3) #define TITAN__PCHIP_SERROR__CMD__S (54) #define TITAN__PCHIP_SERROR__CMD__M (0x3) #define TITAN__PCHIP_SERROR__SYN__S (56) #define TITAN__PCHIP_SERROR__SYN__M (0xff) #define TITAN__PCHIP_SERROR__ADDR__S (15) #define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); } if (serror & TITAN__PCHIP_SERROR__NXIO) printk("%s Non Existent I/O Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_UECC) printk("%s Lost Uncorrectable ECC Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_CRE) printk("%s Lost Correctable ECC Error\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_perror(int which, int port, u64 perror, int print) { int cmd; unsigned long addr; int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved", "Memory Read", "Memory Write", "Reserved", "Reserved", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address Cycle", "Memory Read Line", "Memory Write and Invalidate" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_PERROR__LOST (1UL << 0) #define TITAN__PCHIP_PERROR__SERR (1UL << 1) #define TITAN__PCHIP_PERROR__PERR (1UL << 2) #define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) #define TITAN__PCHIP_PERROR__SGE (1UL << 4) #define TITAN__PCHIP_PERROR__APE (1UL << 5) #define TITAN__PCHIP_PERROR__TA (1UL << 6) #define TITAN__PCHIP_PERROR__DPE (1UL << 7) #define TITAN__PCHIP_PERROR__NDS (1UL << 8) #define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) #define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) #define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ TITAN__PCHIP_PERROR__SERR | \ TITAN__PCHIP_PERROR__PERR | \ TITAN__PCHIP_PERROR__DCRTO | \ TITAN__PCHIP_PERROR__SGE | \ TITAN__PCHIP_PERROR__APE | \ TITAN__PCHIP_PERROR__TA | \ TITAN__PCHIP_PERROR__DPE | \ TITAN__PCHIP_PERROR__NDS | \ TITAN__PCHIP_PERROR__IPTPR | \ TITAN__PCHIP_PERROR__IPTPW) #define TITAN__PCHIP_PERROR__DAC (1UL << 47) #define TITAN__PCHIP_PERROR__MWIN (1UL << 48) #define TITAN__PCHIP_PERROR__CMD__S (52) #define TITAN__PCHIP_PERROR__CMD__M (0x0f) #define TITAN__PCHIP_PERROR__ADDR__S (14) #define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; /* * Initializing the BIOS on a video card on a bus without * a south bridge (subtractive decode agent) can result in * master aborts as the BIOS probes the capabilities of the * card. XFree86 does such initialization. If the error * is a master abort (No DevSel as PCI Master) and the command * is an I/O read or write below the address where we start * assigning PCI I/O spaces (SRM uses 0x1000), then mark the * error as dismissable so starting XFree86 doesn't result * in a series of uncorrectable errors being reported. Also * dismiss master aborts to VGA frame buffer space * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) * for the same reason. * * Also mark the error dismissible if it looks like the right * error but only the Lost bit is set. Since the BIOS initialization * can cause multiple master aborts and the error interrupt can * be handled on a different CPU than the BIOS code is run on, * it is possible for a second master abort to occur between the * time the PALcode reads PERROR and the time it writes PERROR * to acknowledge the error. If this timing happens, a second * error will be signalled after the first, and if no additional * errors occur, will look like a Lost error with no additional * errors on the same transaction as the previous error. */ if (((perror & TITAN__PCHIP_PERROR__NDS) || ((perror & TITAN__PCHIP_PERROR__ERRMASK) == TITAN__PCHIP_PERROR__LOST)) && ((((cmd & 0xE) == 2) && (addr < 0x1000)) || (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { status = MCHK_DISPOSITION_DISMISS; } #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__IPTPR) printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__NDS) printk("%s No DEVSEL as PCI Master [Master Abort]\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DPE) printk("%s Data Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__TA) printk("%s Target Abort\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__APE) printk("%s Address Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SGE) printk("%s Scatter-Gather Error, Invalid PTE\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DCRTO) printk("%s Delayed-Completion Retry Timeout\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__PERR) printk("%s PERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SERR) printk("%s SERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s\n" " Address: 0x%lx\n", err_print_prefix, cmd, perror_cmd[cmd], addr); if (perror & TITAN__PCHIP_PERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_agperror(int which, u64 agperror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK int cmd, len; unsigned long addr; static const char * const agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", "Write (low-priority)", "Write (high-priority)", "Reserved", "Reserved", "Flush", "Fence" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) #define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) #define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) #define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) #define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) #define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) #define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) #define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ TITAN__PCHIP_AGPERROR__LPQFULL | \ TITAN__PCHIP_AGPERROR__HPQFULL | \ TITAN__PCHIP_AGPERROR__RESCMD | \ TITAN__PCHIP_AGPERROR__IPTE | \ TITAN__PCHIP_AGPERROR__PTP | \ TITAN__PCHIP_AGPERROR__NOWINDOW) #define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) #define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) #define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) #define TITAN__PCHIP_AGPERROR__CMD__S (50) #define TITAN__PCHIP_AGPERROR__CMD__M (0x07) #define TITAN__PCHIP_AGPERROR__ADDR__S (15) #define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) #define TITAN__PCHIP_AGPERROR__LEN__S (53) #define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__PTP) printk("%s Peer-to-Peer set\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__IPTE) printk("%s Invalid PTE\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) printk("%s Reserved Command\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) printk("%s HP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) printk("%s LP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s, %d Quadwords%s\n" " Address: 0x%lx\n", err_print_prefix, cmd, agperror_cmd[cmd], len, (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", addr); if (agperror & TITAN__PCHIP_AGPERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_chip(int which, u64 serror, u64 gperror, u64 aperror, u64 agperror, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_p_serror(which, serror, print); status |= titan_parse_p_perror(which, 0, gperror, print); status |= titan_parse_p_perror(which, 1, aperror, print); status |= titan_parse_p_agperror(which, agperror, print); return status; } int titan_process_logout_frame(struct el_common *mchk_header, int print) { struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_c_misc(tmchk->c_misc, print); status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, tmchk->p0_aperror, tmchk->p0_agperror, print); status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, tmchk->p1_aperror, tmchk->p1_agperror, print); return status; } void titan_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); u64 irqmask; /* * Mask of Titan interrupt sources which are reported as machine checks * * 63 - CChip Error * 62 - PChip 0 H_Error * 61 - PChip 1 H_Error * 60 - PChip 0 C_Error * 59 - PChip 1 C_Error */ #define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL /* * Sync the processor */ mb(); draina(); /* * Only handle system errors here */ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { ev6_machine_check(vector, la_ptr); return; } /* * It's a system error, handle it here * * The PALcode has already cleared the error, so just parse it */ /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ if (titan_process_logout_frame(mchk_header, 0) != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; err_print_prefix = KERN_CRIT; /* * Either a nondismissable error was detected or no * recognized error was detected in the logout frame * -- report the error in either case */ printk("%s" "*System %s Error (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); #ifdef CONFIG_VERBOSE_MCHECK titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); if (alpha_verbose_mcheck) dik_show_regs(get_irq_regs(), NULL); #endif /* CONFIG_VERBOSE_MCHECK */ err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as system * machine checks to interrupts */ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; titan_dispatch_irqs(irqmask); } /* * Release the logout frame */ wrmces(0x7); mb(); } /* * Subpacket Annotations */ static char *el_titan_pchip0_extended_annotation[] = { "Subpacket Header", "P0_SCTL", "P0_SERREN", "P0_APCTL", "P0_APERREN", "P0_AGPERREN", "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", "P0_GTBA3", NULL }; static char *el_titan_pchip1_extended_annotation[] = { "Subpacket Header", "P1_SCTL", "P1_SERREN", "P1_APCTL", "P1_APERREN", "P1_AGPERREN", "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", "P1_GTBA3", NULL }; static char *el_titan_memory_extended_annotation[] = { "Subpacket Header", "AAR0", "AAR1", "AAR2", "AAR3", "P0_SCTL", "P0_GPCTL", "P0_APCTL", "P1_SCTL", "P1_GPCTL", "P1_SCTL", NULL }; static struct el_subpacket_annotation el_titan_annotations[] = { SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, 1, "Titan PChip 0 Extended Frame", el_titan_pchip0_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, 1, "Titan PChip 1 Extended Frame", el_titan_pchip1_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, 1, "Titan Memory Extended Frame", el_titan_memory_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__TERMINATION__TERMINATION, 1, "Termination Subpacket", NULL) }; static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } switch(header->type) { case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: printk("%s ** REGATTA TYPE %d SUBPACKET\n", err_print_prefix, header->type); el_annotate_subpacket(header); break; } return (struct el_subpacket *)((unsigned long)header + header->length); } static struct el_subpacket_handler titan_subpacket_handler = SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, el_process_regatta_subpacket); void __init titan_register_error_handlers(void) { size_t i; for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) cdl_register_subpacket_annotation(&el_titan_annotations[i]); cdl_register_subpacket_handler(&titan_subpacket_handler); ev6_register_error_handlers(); } /* * Privateer */ static int privateer_process_680_frame(struct el_common *mchk_header, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct el_PRIVATEER_envdata_mcheck *emchk = (struct el_PRIVATEER_envdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); /* TODO - categorize errors, for now, no error */ if (!print) return status; /* TODO - decode instead of just dumping... */ printk("%s Summary Flags: %016llx\n" " CChip DIRx: %016llx\n" " System Management IR: %016llx\n" " CPU IR: %016llx\n" " Power Supply IR: %016llx\n" " LM78 Fault Status: %016llx\n" " System Doors: %016llx\n" " Temperature Warning: %016llx\n" " Fan Control: %016llx\n" " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, emchk->smir, emchk->cpuir, emchk->psir, emchk->fault, emchk->sys_doors, emchk->temp_warn, emchk->fan_ctrl, emchk->code); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } int privateer_process_logout_frame(struct el_common *mchk_header, int print) { struct el_common_EV6_mcheck *ev6mchk = (struct el_common_EV6_mcheck *)mchk_header; int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * Machine check codes */ #define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ #define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ #define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ #define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ #define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ #define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ #define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ #define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ switch(ev6mchk->MCHK_Code) { /* * Vector 630 - Processor, Correctable */ case PRIVATEER_MCHK__CORR_ECC: case PRIVATEER_MCHK__DC_TAG_PERR: /* * Fall through to vector 670 for processing... */ /* * Vector 670 - Processor, Uncorrectable */ case PRIVATEER_MCHK__PAL_BUGCHECK: case PRIVATEER_MCHK__OS_BUGCHECK: case PRIVATEER_MCHK__PROC_HRD_ERR: case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: status |= ev6_process_logout_frame(mchk_header, print); break; /* * Vector 620 - System, Correctable */ case PRIVATEER_MCHK__SYS_CORR_ERR: /* * Fall through to vector 660 for processing... */ /* * Vector 660 - System, Uncorrectable */ case PRIVATEER_MCHK__SYS_HRD_ERR: status |= titan_process_logout_frame(mchk_header, print); break; /* * Vector 680 - System, Environmental */ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ status |= privateer_process_680_frame(mchk_header, print); break; /* * Unknown */ default: status |= MCHK_DISPOSITION_REPORT; if (print) { printk("%s** Unknown Error, frame follows\n", err_print_prefix); mchk_dump_logout_frame(mchk_header); } } return status; } void privateer_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) (la_ptr + mchk_header->sys_offset); u64 irqmask; char *saved_err_prefix = err_print_prefix; #define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) #define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) /* * Sync the processor. */ mb(); draina(); /* * Only handle system events here. */ if (vector != SCB_Q_SYSEVENT) return titan_machine_check(vector, la_ptr); /* * Report the event - System Events should be reported even if no * error is indicated since the event could indicate the return * to normal status. */ err_print_prefix = KERN_CRIT; printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (unsigned int)vector, (int)smp_processor_id()); privateer_process_680_frame(mchk_header, 1); err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as 680 machine * checks to interrupts. */ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; /* * Dispatch the interrupt(s). */ titan_dispatch_irqs(irqmask); /* * Release the logout frame. */ wrmces(0x7); mb(); }
gpl-2.0
bbelos/rk3188-kernel
arch/alpha/kernel/err_titan.c
11940
23439
/* * linux/arch/alpha/kernel/err_titan.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting TITAN systems */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/core_titan.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev6.h> #include <asm/irq_regs.h> #include "err_impl.h" #include "proto.h" static int titan_parse_c_misc(u64 c_misc, int print) { #ifdef CONFIG_VERBOSE_MCHECK char *src; int nxs = 0; #endif int status = MCHK_DISPOSITION_REPORT; #define TITAN__CCHIP_MISC__NXM (1UL << 28) #define TITAN__CCHIP_MISC__NXS__S (29) #define TITAN__CCHIP_MISC__NXS__M (0x7) if (!(c_misc & TITAN__CCHIP_MISC__NXM)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); switch(nxs) { case 0: /* CPU 0 */ case 1: /* CPU 1 */ case 2: /* CPU 2 */ case 3: /* CPU 3 */ src = "CPU"; /* num is already the CPU number */ break; case 4: /* Pchip 0 */ case 5: /* Pchip 1 */ src = "Pchip"; nxs -= 4; break; default:/* reserved */ src = "Unknown, NXS ="; /* leave num untouched */ break; } printk("%s Non-existent memory access from: %s %d\n", err_print_prefix, src, nxs); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_serror(int which, u64 serror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const serror_src[] = { "GPCI", "APCI", "AGP HP", "AGP LP" }; static const char * const serror_cmd[] = { "DMA Read", "DMA RMW", "SGTE Read", "Reserved" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) #define TITAN__PCHIP_SERROR__UECC (1UL << 1) #define TITAN__PCHIP_SERROR__CRE (1UL << 2) #define TITAN__PCHIP_SERROR__NXIO (1UL << 3) #define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) #define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE) #define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE | \ TITAN__PCHIP_SERROR__NXIO | \ TITAN__PCHIP_SERROR__LOST_CRE) #define TITAN__PCHIP_SERROR__SRC__S (52) #define TITAN__PCHIP_SERROR__SRC__M (0x3) #define TITAN__PCHIP_SERROR__CMD__S (54) #define TITAN__PCHIP_SERROR__CMD__M (0x3) #define TITAN__PCHIP_SERROR__SYN__S (56) #define TITAN__PCHIP_SERROR__SYN__M (0xff) #define TITAN__PCHIP_SERROR__ADDR__S (15) #define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); } if (serror & TITAN__PCHIP_SERROR__NXIO) printk("%s Non Existent I/O Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_UECC) printk("%s Lost Uncorrectable ECC Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_CRE) printk("%s Lost Correctable ECC Error\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_perror(int which, int port, u64 perror, int print) { int cmd; unsigned long addr; int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved", "Memory Read", "Memory Write", "Reserved", "Reserved", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address Cycle", "Memory Read Line", "Memory Write and Invalidate" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_PERROR__LOST (1UL << 0) #define TITAN__PCHIP_PERROR__SERR (1UL << 1) #define TITAN__PCHIP_PERROR__PERR (1UL << 2) #define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) #define TITAN__PCHIP_PERROR__SGE (1UL << 4) #define TITAN__PCHIP_PERROR__APE (1UL << 5) #define TITAN__PCHIP_PERROR__TA (1UL << 6) #define TITAN__PCHIP_PERROR__DPE (1UL << 7) #define TITAN__PCHIP_PERROR__NDS (1UL << 8) #define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) #define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) #define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ TITAN__PCHIP_PERROR__SERR | \ TITAN__PCHIP_PERROR__PERR | \ TITAN__PCHIP_PERROR__DCRTO | \ TITAN__PCHIP_PERROR__SGE | \ TITAN__PCHIP_PERROR__APE | \ TITAN__PCHIP_PERROR__TA | \ TITAN__PCHIP_PERROR__DPE | \ TITAN__PCHIP_PERROR__NDS | \ TITAN__PCHIP_PERROR__IPTPR | \ TITAN__PCHIP_PERROR__IPTPW) #define TITAN__PCHIP_PERROR__DAC (1UL << 47) #define TITAN__PCHIP_PERROR__MWIN (1UL << 48) #define TITAN__PCHIP_PERROR__CMD__S (52) #define TITAN__PCHIP_PERROR__CMD__M (0x0f) #define TITAN__PCHIP_PERROR__ADDR__S (14) #define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; /* * Initializing the BIOS on a video card on a bus without * a south bridge (subtractive decode agent) can result in * master aborts as the BIOS probes the capabilities of the * card. XFree86 does such initialization. If the error * is a master abort (No DevSel as PCI Master) and the command * is an I/O read or write below the address where we start * assigning PCI I/O spaces (SRM uses 0x1000), then mark the * error as dismissable so starting XFree86 doesn't result * in a series of uncorrectable errors being reported. Also * dismiss master aborts to VGA frame buffer space * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) * for the same reason. * * Also mark the error dismissible if it looks like the right * error but only the Lost bit is set. Since the BIOS initialization * can cause multiple master aborts and the error interrupt can * be handled on a different CPU than the BIOS code is run on, * it is possible for a second master abort to occur between the * time the PALcode reads PERROR and the time it writes PERROR * to acknowledge the error. If this timing happens, a second * error will be signalled after the first, and if no additional * errors occur, will look like a Lost error with no additional * errors on the same transaction as the previous error. */ if (((perror & TITAN__PCHIP_PERROR__NDS) || ((perror & TITAN__PCHIP_PERROR__ERRMASK) == TITAN__PCHIP_PERROR__LOST)) && ((((cmd & 0xE) == 2) && (addr < 0x1000)) || (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { status = MCHK_DISPOSITION_DISMISS; } #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__IPTPR) printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__NDS) printk("%s No DEVSEL as PCI Master [Master Abort]\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DPE) printk("%s Data Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__TA) printk("%s Target Abort\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__APE) printk("%s Address Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SGE) printk("%s Scatter-Gather Error, Invalid PTE\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DCRTO) printk("%s Delayed-Completion Retry Timeout\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__PERR) printk("%s PERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SERR) printk("%s SERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s\n" " Address: 0x%lx\n", err_print_prefix, cmd, perror_cmd[cmd], addr); if (perror & TITAN__PCHIP_PERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_agperror(int which, u64 agperror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK int cmd, len; unsigned long addr; static const char * const agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", "Write (low-priority)", "Write (high-priority)", "Reserved", "Reserved", "Flush", "Fence" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) #define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) #define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) #define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) #define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) #define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) #define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) #define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ TITAN__PCHIP_AGPERROR__LPQFULL | \ TITAN__PCHIP_AGPERROR__HPQFULL | \ TITAN__PCHIP_AGPERROR__RESCMD | \ TITAN__PCHIP_AGPERROR__IPTE | \ TITAN__PCHIP_AGPERROR__PTP | \ TITAN__PCHIP_AGPERROR__NOWINDOW) #define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) #define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) #define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) #define TITAN__PCHIP_AGPERROR__CMD__S (50) #define TITAN__PCHIP_AGPERROR__CMD__M (0x07) #define TITAN__PCHIP_AGPERROR__ADDR__S (15) #define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) #define TITAN__PCHIP_AGPERROR__LEN__S (53) #define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__PTP) printk("%s Peer-to-Peer set\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__IPTE) printk("%s Invalid PTE\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) printk("%s Reserved Command\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) printk("%s HP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) printk("%s LP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s, %d Quadwords%s\n" " Address: 0x%lx\n", err_print_prefix, cmd, agperror_cmd[cmd], len, (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", addr); if (agperror & TITAN__PCHIP_AGPERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_chip(int which, u64 serror, u64 gperror, u64 aperror, u64 agperror, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_p_serror(which, serror, print); status |= titan_parse_p_perror(which, 0, gperror, print); status |= titan_parse_p_perror(which, 1, aperror, print); status |= titan_parse_p_agperror(which, agperror, print); return status; } int titan_process_logout_frame(struct el_common *mchk_header, int print) { struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_c_misc(tmchk->c_misc, print); status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, tmchk->p0_aperror, tmchk->p0_agperror, print); status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, tmchk->p1_aperror, tmchk->p1_agperror, print); return status; } void titan_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); u64 irqmask; /* * Mask of Titan interrupt sources which are reported as machine checks * * 63 - CChip Error * 62 - PChip 0 H_Error * 61 - PChip 1 H_Error * 60 - PChip 0 C_Error * 59 - PChip 1 C_Error */ #define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL /* * Sync the processor */ mb(); draina(); /* * Only handle system errors here */ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { ev6_machine_check(vector, la_ptr); return; } /* * It's a system error, handle it here * * The PALcode has already cleared the error, so just parse it */ /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ if (titan_process_logout_frame(mchk_header, 0) != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; err_print_prefix = KERN_CRIT; /* * Either a nondismissable error was detected or no * recognized error was detected in the logout frame * -- report the error in either case */ printk("%s" "*System %s Error (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); #ifdef CONFIG_VERBOSE_MCHECK titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); if (alpha_verbose_mcheck) dik_show_regs(get_irq_regs(), NULL); #endif /* CONFIG_VERBOSE_MCHECK */ err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as system * machine checks to interrupts */ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; titan_dispatch_irqs(irqmask); } /* * Release the logout frame */ wrmces(0x7); mb(); } /* * Subpacket Annotations */ static char *el_titan_pchip0_extended_annotation[] = { "Subpacket Header", "P0_SCTL", "P0_SERREN", "P0_APCTL", "P0_APERREN", "P0_AGPERREN", "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", "P0_GTBA3", NULL }; static char *el_titan_pchip1_extended_annotation[] = { "Subpacket Header", "P1_SCTL", "P1_SERREN", "P1_APCTL", "P1_APERREN", "P1_AGPERREN", "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", "P1_GTBA3", NULL }; static char *el_titan_memory_extended_annotation[] = { "Subpacket Header", "AAR0", "AAR1", "AAR2", "AAR3", "P0_SCTL", "P0_GPCTL", "P0_APCTL", "P1_SCTL", "P1_GPCTL", "P1_SCTL", NULL }; static struct el_subpacket_annotation el_titan_annotations[] = { SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, 1, "Titan PChip 0 Extended Frame", el_titan_pchip0_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, 1, "Titan PChip 1 Extended Frame", el_titan_pchip1_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, 1, "Titan Memory Extended Frame", el_titan_memory_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__TERMINATION__TERMINATION, 1, "Termination Subpacket", NULL) }; static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } switch(header->type) { case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: printk("%s ** REGATTA TYPE %d SUBPACKET\n", err_print_prefix, header->type); el_annotate_subpacket(header); break; } return (struct el_subpacket *)((unsigned long)header + header->length); } static struct el_subpacket_handler titan_subpacket_handler = SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, el_process_regatta_subpacket); void __init titan_register_error_handlers(void) { size_t i; for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) cdl_register_subpacket_annotation(&el_titan_annotations[i]); cdl_register_subpacket_handler(&titan_subpacket_handler); ev6_register_error_handlers(); } /* * Privateer */ static int privateer_process_680_frame(struct el_common *mchk_header, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct el_PRIVATEER_envdata_mcheck *emchk = (struct el_PRIVATEER_envdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); /* TODO - categorize errors, for now, no error */ if (!print) return status; /* TODO - decode instead of just dumping... */ printk("%s Summary Flags: %016llx\n" " CChip DIRx: %016llx\n" " System Management IR: %016llx\n" " CPU IR: %016llx\n" " Power Supply IR: %016llx\n" " LM78 Fault Status: %016llx\n" " System Doors: %016llx\n" " Temperature Warning: %016llx\n" " Fan Control: %016llx\n" " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, emchk->smir, emchk->cpuir, emchk->psir, emchk->fault, emchk->sys_doors, emchk->temp_warn, emchk->fan_ctrl, emchk->code); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } int privateer_process_logout_frame(struct el_common *mchk_header, int print) { struct el_common_EV6_mcheck *ev6mchk = (struct el_common_EV6_mcheck *)mchk_header; int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * Machine check codes */ #define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ #define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ #define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ #define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ #define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ #define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ #define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ #define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ switch(ev6mchk->MCHK_Code) { /* * Vector 630 - Processor, Correctable */ case PRIVATEER_MCHK__CORR_ECC: case PRIVATEER_MCHK__DC_TAG_PERR: /* * Fall through to vector 670 for processing... */ /* * Vector 670 - Processor, Uncorrectable */ case PRIVATEER_MCHK__PAL_BUGCHECK: case PRIVATEER_MCHK__OS_BUGCHECK: case PRIVATEER_MCHK__PROC_HRD_ERR: case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: status |= ev6_process_logout_frame(mchk_header, print); break; /* * Vector 620 - System, Correctable */ case PRIVATEER_MCHK__SYS_CORR_ERR: /* * Fall through to vector 660 for processing... */ /* * Vector 660 - System, Uncorrectable */ case PRIVATEER_MCHK__SYS_HRD_ERR: status |= titan_process_logout_frame(mchk_header, print); break; /* * Vector 680 - System, Environmental */ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ status |= privateer_process_680_frame(mchk_header, print); break; /* * Unknown */ default: status |= MCHK_DISPOSITION_REPORT; if (print) { printk("%s** Unknown Error, frame follows\n", err_print_prefix); mchk_dump_logout_frame(mchk_header); } } return status; } void privateer_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) (la_ptr + mchk_header->sys_offset); u64 irqmask; char *saved_err_prefix = err_print_prefix; #define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) #define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) /* * Sync the processor. */ mb(); draina(); /* * Only handle system events here. */ if (vector != SCB_Q_SYSEVENT) return titan_machine_check(vector, la_ptr); /* * Report the event - System Events should be reported even if no * error is indicated since the event could indicate the return * to normal status. */ err_print_prefix = KERN_CRIT; printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (unsigned int)vector, (int)smp_processor_id()); privateer_process_680_frame(mchk_header, 1); err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as 680 machine * checks to interrupts. */ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; /* * Dispatch the interrupt(s). */ titan_dispatch_irqs(irqmask); /* * Release the logout frame. */ wrmces(0x7); mb(); }
gpl-2.0
WildfireDEV/s6
drivers/video/kyro/STG4000InitDevice.c
15012
9799
/* * linux/drivers/video/kyro/STG4000InitDevice.c * * Copyright (C) 2000 Imagination Technologies Ltd * Copyright (C) 2002 STMicroelectronics * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include "STG4000Reg.h" #include "STG4000Interface.h" /* SDRAM fixed settings */ #define SDRAM_CFG_0 0x49A1 #define SDRAM_CFG_1 0xA732 #define SDRAM_CFG_2 0x31 #define SDRAM_ARB_CFG 0xA0 #define SDRAM_REFRESH 0x20 /* Reset values */ #define PMX2_SOFTRESET_DAC_RST 0x0001 #define PMX2_SOFTRESET_C1_RST 0x0004 #define PMX2_SOFTRESET_C2_RST 0x0008 #define PMX2_SOFTRESET_3D_RST 0x0010 #define PMX2_SOFTRESET_VIDIN_RST 0x0020 #define PMX2_SOFTRESET_TLB_RST 0x0040 #define PMX2_SOFTRESET_SD_RST 0x0080 #define PMX2_SOFTRESET_VGA_RST 0x0100 #define PMX2_SOFTRESET_ROM_RST 0x0200 /* reserved bit, do not reset */ #define PMX2_SOFTRESET_TA_RST 0x0400 #define PMX2_SOFTRESET_REG_RST 0x4000 #define PMX2_SOFTRESET_ALL 0x7fff /* Core clock freq */ #define CORE_PLL_FREQ 1000000 /* Reference Clock freq */ #define REF_FREQ 14318 /* PCI Registers */ static u16 CorePllControl = 0x70; #define PCI_CONFIG_SUBSYS_ID 0x2e /* Misc */ #define CORE_PLL_MODE_REG_0_7 3 #define CORE_PLL_MODE_REG_8_15 2 #define CORE_PLL_MODE_CONFIG_REG 1 #define DAC_PLL_CONFIG_REG 0 #define STG_MAX_VCO 500000 #define STG_MIN_VCO 100000 /* PLL Clock */ #define STG4K3_PLL_SCALER 8 /* scale numbers by 2^8 for fixed point calc */ #define STG4K3_PLL_MIN_R 2 /* Minimum multiplier */ #define STG4K3_PLL_MAX_R 33 /* Max */ #define STG4K3_PLL_MIN_F 2 /* Minimum divisor */ #define STG4K3_PLL_MAX_F 513 /* Max */ #define STG4K3_PLL_MIN_OD 0 /* Min output divider (shift) */ #define STG4K3_PLL_MAX_OD 2 /* Max */ #define STG4K3_PLL_MIN_VCO_SC (100000000 >> STG4K3_PLL_SCALER) /* Min VCO rate */ #define STG4K3_PLL_MAX_VCO_SC (500000000 >> STG4K3_PLL_SCALER) /* Max VCO rate */ #define STG4K3_PLL_MINR_VCO_SC (100000000 >> STG4K3_PLL_SCALER) /* Min VCO rate (restricted) */ #define STG4K3_PLL_MAXR_VCO_SC (500000000 >> STG4K3_PLL_SCALER) /* Max VCO rate (restricted) */ #define STG4K3_PLL_MINR_VCO 100000000 /* Min VCO rate (restricted) */ #define STG4K3_PLL_MAX_VCO 500000000 /* Max VCO rate */ #define STG4K3_PLL_MAXR_VCO 500000000 /* Max VCO rate (restricted) */ #define OS_DELAY(X) \ { \ volatile u32 i,count=0; \ for(i=0;i<X;i++) count++; \ } static u32 InitSDRAMRegisters(volatile STG4000REG __iomem *pSTGReg, u32 dwSubSysID, u32 dwRevID) { u32 adwSDRAMArgCfg0[] = { 0xa0, 0x80, 0xa0, 0xa0, 0xa0 }; u32 adwSDRAMCfg1[] = { 0x8732, 0x8732, 0xa732, 0xa732, 0x8732 }; u32 adwSDRAMCfg2[] = { 0x87d2, 0x87d2, 0xa7d2, 0x87d2, 0xa7d2 }; u32 adwSDRAMRsh[] = { 36, 39, 40 }; u32 adwChipSpeed[] = { 110, 120, 125 }; u32 dwMemTypeIdx; u32 dwChipSpeedIdx; /* Get memory tpye and chip speed indexs from the SubSysDevID */ dwMemTypeIdx = (dwSubSysID & 0x70) >> 4; dwChipSpeedIdx = (dwSubSysID & 0x180) >> 7; if (dwMemTypeIdx > 4 || dwChipSpeedIdx > 2) return 0; /* Program SD-RAM interface */ STG_WRITE_REG(SDRAMArbiterConf, adwSDRAMArgCfg0[dwMemTypeIdx]); if (dwRevID < 5) { STG_WRITE_REG(SDRAMConf0, 0x49A1); STG_WRITE_REG(SDRAMConf1, adwSDRAMCfg1[dwMemTypeIdx]); } else { STG_WRITE_REG(SDRAMConf0, 0x4DF1); STG_WRITE_REG(SDRAMConf1, adwSDRAMCfg2[dwMemTypeIdx]); } STG_WRITE_REG(SDRAMConf2, 0x31); STG_WRITE_REG(SDRAMRefresh, adwSDRAMRsh[dwChipSpeedIdx]); return adwChipSpeed[dwChipSpeedIdx] * 10000; } u32 ProgramClock(u32 refClock, u32 coreClock, u32 * FOut, u32 * ROut, u32 * POut) { u32 R = 0, F = 0, OD = 0, ODIndex = 0; u32 ulBestR = 0, ulBestF = 0, ulBestOD = 0; u32 ulBestVCO = 0, ulBestClk = 0, ulBestScore = 0; u32 ulScore, ulPhaseScore, ulVcoScore; u32 ulTmp = 0, ulVCO; u32 ulScaleClockReq, ulMinClock, ulMaxClock; u32 ODValues[] = { 1, 2, 0 }; /* Translate clock in Hz */ coreClock *= 100; /* in Hz */ refClock *= 1000; /* in Hz */ /* Work out acceptable clock * The method calculates ~ +- 0.4% (1/256) */ ulMinClock = coreClock - (coreClock >> 8); ulMaxClock = coreClock + (coreClock >> 8); /* Scale clock required for use in calculations */ ulScaleClockReq = coreClock >> STG4K3_PLL_SCALER; /* Iterate through post divider values */ for (ODIndex = 0; ODIndex < 3; ODIndex++) { OD = ODValues[ODIndex]; R = STG4K3_PLL_MIN_R; /* loop for pre-divider from min to max */ while (R <= STG4K3_PLL_MAX_R) { /* estimate required feedback multiplier */ ulTmp = R * (ulScaleClockReq << OD); /* F = ClkRequired * R * (2^OD) / Fref */ F = (u32)(ulTmp / (refClock >> STG4K3_PLL_SCALER)); /* compensate for accuracy */ if (F > STG4K3_PLL_MIN_F) F--; /* * We should be close to our target frequency (if it's * achievable with current OD & R) let's iterate * through F for best fit */ while ((F >= STG4K3_PLL_MIN_F) && (F <= STG4K3_PLL_MAX_F)) { /* Calc VCO at full accuracy */ ulVCO = refClock / R; ulVCO = F * ulVCO; /* * Check it's within restricted VCO range * unless of course the desired frequency is * above the restricted range, then test * against VCO limit */ if ((ulVCO >= STG4K3_PLL_MINR_VCO) && ((ulVCO <= STG4K3_PLL_MAXR_VCO) || ((coreClock > STG4K3_PLL_MAXR_VCO) && (ulVCO <= STG4K3_PLL_MAX_VCO)))) { ulTmp = (ulVCO >> OD); /* Clock = VCO / (2^OD) */ /* Is this clock good enough? */ if ((ulTmp >= ulMinClock) && (ulTmp <= ulMaxClock)) { ulPhaseScore = (((refClock / R) - (refClock / STG4K3_PLL_MAX_R))) / ((refClock - (refClock / STG4K3_PLL_MAX_R)) >> 10); ulVcoScore = ((ulVCO - STG4K3_PLL_MINR_VCO)) / ((STG4K3_PLL_MAXR_VCO - STG4K3_PLL_MINR_VCO) >> 10); ulScore = ulPhaseScore + ulVcoScore; if (!ulBestScore) { ulBestVCO = ulVCO; ulBestOD = OD; ulBestF = F; ulBestR = R; ulBestClk = ulTmp; ulBestScore = ulScore; } /* is this better, ( aim for highest Score) */ /*-------------------------------------------------------------------------- Here we want to use a scoring system which will take account of both the value at the phase comparater and the VCO output to do this we will use a cumulative score between the two The way this ends up is that we choose the first value in the loop anyway but we shall keep this code in case new restrictions come into play --------------------------------------------------------------------------*/ if ((ulScore >= ulBestScore) && (OD > 0)) { ulBestVCO = ulVCO; ulBestOD = OD; ulBestF = F; ulBestR = R; ulBestClk = ulTmp; ulBestScore = ulScore; } } } F++; } R++; } } /* did we find anything? Then return RFOD */ if (ulBestScore) { *ROut = ulBestR; *FOut = ulBestF; if ((ulBestOD == 2) || (ulBestOD == 3)) { *POut = 3; } else *POut = ulBestOD; } return (ulBestClk); } int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev) { u32 F, R, P; u16 core_pll = 0, sub; u32 ulCoreClock; u32 tmp; u32 ulChipSpeed; STG_WRITE_REG(IntMask, 0xFFFF); /* Disable Primary Core Thread0 */ tmp = STG_READ_REG(Thread0Enable); CLEAR_BIT(0); STG_WRITE_REG(Thread0Enable, tmp); /* Disable Primary Core Thread1 */ tmp = STG_READ_REG(Thread1Enable); CLEAR_BIT(0); STG_WRITE_REG(Thread1Enable, tmp); STG_WRITE_REG(SoftwareReset, PMX2_SOFTRESET_REG_RST | PMX2_SOFTRESET_ROM_RST); STG_WRITE_REG(SoftwareReset, PMX2_SOFTRESET_REG_RST | PMX2_SOFTRESET_TA_RST | PMX2_SOFTRESET_ROM_RST); /* Need to play around to reset TA */ STG_WRITE_REG(TAConfiguration, 0); STG_WRITE_REG(SoftwareReset, PMX2_SOFTRESET_REG_RST | PMX2_SOFTRESET_ROM_RST); STG_WRITE_REG(SoftwareReset, PMX2_SOFTRESET_REG_RST | PMX2_SOFTRESET_TA_RST | PMX2_SOFTRESET_ROM_RST); pci_read_config_word(pDev, PCI_CONFIG_SUBSYS_ID, &sub); ulChipSpeed = InitSDRAMRegisters(pSTGReg, (u32)sub, (u32)pDev->revision); if (ulChipSpeed == 0) return -EINVAL; ulCoreClock = ProgramClock(REF_FREQ, CORE_PLL_FREQ, &F, &R, &P); core_pll |= ((P) | ((F - 2) << 2) | ((R - 2) << 11)); /* Set Core PLL Control to Core PLL Mode */ /* Send bits 0:7 of the Core PLL Mode register */ tmp = ((CORE_PLL_MODE_REG_0_7 << 8) | (core_pll & 0x00FF)); pci_write_config_word(pDev, CorePllControl, tmp); /* Without some delay between the PCI config writes the clock does not reliably set when the code is compiled -O3 */ OS_DELAY(1000000); tmp |= SET_BIT(14); pci_write_config_word(pDev, CorePllControl, tmp); OS_DELAY(1000000); /* Send bits 8:15 of the Core PLL Mode register */ tmp = ((CORE_PLL_MODE_REG_8_15 << 8) | ((core_pll & 0xFF00) >> 8)); pci_write_config_word(pDev, CorePllControl, tmp); OS_DELAY(1000000); tmp |= SET_BIT(14); pci_write_config_word(pDev, CorePllControl, tmp); OS_DELAY(1000000); STG_WRITE_REG(SoftwareReset, PMX2_SOFTRESET_ALL); #if 0 /* Enable Primary Core Thread0 */ tmp = ((STG_READ_REG(Thread0Enable)) | SET_BIT(0)); STG_WRITE_REG(Thread0Enable, tmp); /* Enable Primary Core Thread1 */ tmp = ((STG_READ_REG(Thread1Enable)) | SET_BIT(0)); STG_WRITE_REG(Thread1Enable, tmp); #endif return 0; }
gpl-2.0
surengrig/Milstone-XT720-kernel-upgrade
arch/arm/mach-ep93xx/edb9302a.c
165
1826
/* * arch/arm/mach-ep93xx/edb9302a.c * Cirrus Logic EDB9302A support. * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> static struct physmap_flash_data edb9302a_flash_data = { .width = 2, }; static struct resource edb9302a_flash_resource = { .start = EP93XX_CS6_PHYS_BASE, .end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device edb9302a_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &edb9302a_flash_data, }, .num_resources = 1, .resource = &edb9302a_flash_resource, }; static struct ep93xx_eth_data edb9302a_eth_data = { .phy_id = 1, }; static void __init edb9302a_init_machine(void) { ep93xx_init_devices(); platform_device_register(&edb9302a_flash); ep93xx_register_eth(&edb9302a_eth_data, 1); } MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .phys_io = EP93XX_APB_PHYS_BASE, .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, .boot_params = EP93XX_SDCE0_PHYS_BASE + 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .timer = &ep93xx_timer, .init_machine = edb9302a_init_machine, MACHINE_END
gpl-2.0
walac/linux
drivers/staging/comedi/drivers/8255_pci.c
165
7641
// SPDX-License-Identifier: GPL-2.0+ /* * COMEDI driver for generic PCI based 8255 digital i/o boards * Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on the tested adl_pci7296 driver written by: * Jon Grierson <jd@renko.co.uk> * and the experimental cb_pcidio driver written by: * Yoshiya Matsuzaka * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2000 David A. Schleef <ds@schleef.org> */ /* * Driver: 8255_pci * Description: Generic PCI based 8255 Digital I/O boards * Devices: [ADLink] PCI-7224 (adl_pci-7224), PCI-7248 (adl_pci-7248), * PCI-7296 (adl_pci-7296), * [Measurement Computing] PCI-DIO24 (cb_pci-dio24), * PCI-DIO24H (cb_pci-dio24h), PCI-DIO48H (cb_pci-dio48h), * PCI-DIO96H (cb_pci-dio96h), * [National Instruments] PCI-DIO-96 (ni_pci-dio-96), * PCI-DIO-96B (ni_pci-dio-96b), PXI-6508 (ni_pxi-6508), * PCI-6503 (ni_pci-6503), PCI-6503B (ni_pci-6503b), * PCI-6503X (ni_pci-6503x), PXI-6503 (ni_pxi-6503) * Author: H Hartley Sweeten <hsweeten@visionengravers.com> * Updated: Wed, 12 Sep 2012 11:52:01 -0700 * Status: untested * * These boards have one or more 8255 digital I/O chips, each of which * is supported as a separate 24-channel DIO subdevice. * * Boards with 24 DIO channels (1 DIO subdevice): * * PCI-7224, PCI-DIO24, PCI-DIO24H, PCI-6503, PCI-6503B, PCI-6503X, * PXI-6503 * * Boards with 48 DIO channels (2 DIO subdevices): * * PCI-7248, PCI-DIO48H * * Boards with 96 DIO channels (4 DIO subdevices): * * PCI-7296, PCI-DIO96H, PCI-DIO-96, PCI-DIO-96B, PXI-6508 * * Some of these boards also have an 8254 programmable timer/counter * chip. This chip is not currently supported by this driver. * * Interrupt support for these boards is also not currently supported. * * Configuration Options: not applicable, uses PCI auto config. */ #include <linux/module.h> #include "../comedi_pci.h" #include "8255.h" enum pci_8255_boardid { BOARD_ADLINK_PCI7224, BOARD_ADLINK_PCI7248, BOARD_ADLINK_PCI7296, BOARD_CB_PCIDIO24, BOARD_CB_PCIDIO24H, BOARD_CB_PCIDIO48H_OLD, BOARD_CB_PCIDIO48H_NEW, BOARD_CB_PCIDIO96H, BOARD_NI_PCIDIO96, BOARD_NI_PCIDIO96B, BOARD_NI_PXI6508, BOARD_NI_PCI6503, BOARD_NI_PCI6503B, BOARD_NI_PCI6503X, BOARD_NI_PXI_6503, }; struct pci_8255_boardinfo { const char *name; int dio_badr; int n_8255; unsigned int has_mite:1; }; static const struct pci_8255_boardinfo pci_8255_boards[] = { [BOARD_ADLINK_PCI7224] = { .name = "adl_pci-7224", .dio_badr = 2, .n_8255 = 1, }, [BOARD_ADLINK_PCI7248] = { .name = "adl_pci-7248", .dio_badr = 2, .n_8255 = 2, }, [BOARD_ADLINK_PCI7296] = { .name = "adl_pci-7296", .dio_badr = 2, .n_8255 = 4, }, [BOARD_CB_PCIDIO24] = { .name = "cb_pci-dio24", .dio_badr = 2, .n_8255 = 1, }, [BOARD_CB_PCIDIO24H] = { .name = "cb_pci-dio24h", .dio_badr = 2, .n_8255 = 1, }, [BOARD_CB_PCIDIO48H_OLD] = { .name = "cb_pci-dio48h", .dio_badr = 1, .n_8255 = 2, }, [BOARD_CB_PCIDIO48H_NEW] = { .name = "cb_pci-dio48h", .dio_badr = 2, .n_8255 = 2, }, [BOARD_CB_PCIDIO96H] = { .name = "cb_pci-dio96h", .dio_badr = 2, .n_8255 = 4, }, [BOARD_NI_PCIDIO96] = { .name = "ni_pci-dio-96", .dio_badr = 1, .n_8255 = 4, .has_mite = 1, }, [BOARD_NI_PCIDIO96B] = { .name = "ni_pci-dio-96b", .dio_badr = 1, .n_8255 = 4, .has_mite = 1, }, [BOARD_NI_PXI6508] = { .name = "ni_pxi-6508", .dio_badr = 1, .n_8255 = 4, .has_mite = 1, }, [BOARD_NI_PCI6503] = { .name = "ni_pci-6503", .dio_badr = 1, .n_8255 = 1, .has_mite = 1, }, [BOARD_NI_PCI6503B] = { .name = "ni_pci-6503b", .dio_badr = 1, .n_8255 = 1, .has_mite = 1, }, [BOARD_NI_PCI6503X] = { .name = "ni_pci-6503x", .dio_badr = 1, .n_8255 = 1, .has_mite = 1, }, [BOARD_NI_PXI_6503] = { .name = "ni_pxi-6503", .dio_badr = 1, .n_8255 = 1, .has_mite = 1, }, }; /* ripped from mite.h and mite_setup2() to avoid mite dependency */ #define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */ #define WENAB BIT(7) /* window enable */ static int pci_8255_mite_init(struct pci_dev *pcidev) { void __iomem *mite_base; u32 main_phys_addr; /* ioremap the MITE registers (BAR 0) temporarily */ mite_base = pci_ioremap_bar(pcidev, 0); if (!mite_base) return -ENOMEM; /* set data window to main registers (BAR 1) */ main_phys_addr = pci_resource_start(pcidev, 1); writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR); /* finished with MITE registers */ iounmap(mite_base); return 0; } static int pci_8255_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct pci_8255_boardinfo *board = NULL; struct comedi_subdevice *s; int ret; int i; if (context < ARRAY_SIZE(pci_8255_boards)) board = &pci_8255_boards[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; ret = comedi_pci_enable(dev); if (ret) return ret; if (board->has_mite) { ret = pci_8255_mite_init(pcidev); if (ret) return ret; } if ((pci_resource_flags(pcidev, board->dio_badr) & IORESOURCE_MEM)) { dev->mmio = pci_ioremap_bar(pcidev, board->dio_badr); if (!dev->mmio) return -ENOMEM; } else { dev->iobase = pci_resource_start(pcidev, board->dio_badr); } /* * One, two, or four subdevices are setup by this driver depending * on the number of channels provided by the board. Each subdevice * has 24 channels supported by the 8255 module. */ ret = comedi_alloc_subdevices(dev, board->n_8255); if (ret) return ret; for (i = 0; i < board->n_8255; i++) { s = &dev->subdevices[i]; if (dev->mmio) ret = subdev_8255_mm_init(dev, s, NULL, i * I8255_SIZE); else ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE); if (ret) return ret; } return 0; } static struct comedi_driver pci_8255_driver = { .driver_name = "8255_pci", .module = THIS_MODULE, .auto_attach = pci_8255_auto_attach, .detach = comedi_pci_detach, }; static int pci_8255_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data); } static const struct pci_device_id pci_8255_pci_table[] = { { PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 }, { PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 }, { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000), .driver_data = BOARD_CB_PCIDIO48H_OLD }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b), .driver_data = BOARD_CB_PCIDIO48H_NEW }, { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, { PCI_VDEVICE(NI, 0x13c0), BOARD_NI_PXI6508 }, { PCI_VDEVICE(NI, 0x0400), BOARD_NI_PCI6503 }, { PCI_VDEVICE(NI, 0x1250), BOARD_NI_PCI6503B }, { PCI_VDEVICE(NI, 0x17d0), BOARD_NI_PCI6503X }, { PCI_VDEVICE(NI, 0x1800), BOARD_NI_PXI_6503 }, { 0 } }; MODULE_DEVICE_TABLE(pci, pci_8255_pci_table); static struct pci_driver pci_8255_pci_driver = { .name = "8255_pci", .id_table = pci_8255_pci_table, .probe = pci_8255_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(pci_8255_driver, pci_8255_pci_driver); MODULE_DESCRIPTION("COMEDI - Generic PCI based 8255 Digital I/O boards"); MODULE_AUTHOR("Comedi https://www.comedi.org"); MODULE_LICENSE("GPL");
gpl-2.0
12019/Dorimanx-SG2-I9100-Kernel
drivers/thermal/imx_thermal.c
165
14172
/* * Copyright 2013 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/cpu_cooling.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/thermal.h> #include <linux/types.h> #define REG_SET 0x4 #define REG_CLR 0x8 #define REG_TOG 0xc #define MISC0 0x0150 #define MISC0_REFTOP_SELBIASOFF (1 << 3) #define TEMPSENSE0 0x0180 #define TEMPSENSE0_ALARM_VALUE_SHIFT 20 #define TEMPSENSE0_ALARM_VALUE_MASK (0xfff << TEMPSENSE0_ALARM_VALUE_SHIFT) #define TEMPSENSE0_TEMP_CNT_SHIFT 8 #define TEMPSENSE0_TEMP_CNT_MASK (0xfff << TEMPSENSE0_TEMP_CNT_SHIFT) #define TEMPSENSE0_FINISHED (1 << 2) #define TEMPSENSE0_MEASURE_TEMP (1 << 1) #define TEMPSENSE0_POWER_DOWN (1 << 0) #define TEMPSENSE1 0x0190 #define TEMPSENSE1_MEASURE_FREQ 0xffff #define OCOTP_ANA1 0x04e0 /* The driver supports 1 passive trip point and 1 critical trip point */ enum imx_thermal_trip { IMX_TRIP_PASSIVE, IMX_TRIP_CRITICAL, IMX_TRIP_NUM, }; /* * It defines the temperature in millicelsius for passive trip point * that will trigger cooling action when crossed. */ #define IMX_TEMP_PASSIVE 85000 #define IMX_POLLING_DELAY 2000 /* millisecond */ #define IMX_PASSIVE_DELAY 1000 struct imx_thermal_data { struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; enum thermal_device_mode mode; struct regmap *tempmon; int c1, c2; /* See formula in imx_get_sensor_data() */ unsigned long temp_passive; unsigned long temp_critical; unsigned long alarm_temp; unsigned long last_temp; bool irq_enabled; int irq; }; static void imx_set_alarm_temp(struct imx_thermal_data *data, signed long alarm_temp) { struct regmap *map = data->tempmon; int alarm_value; data->alarm_temp = alarm_temp; alarm_value = (alarm_temp - data->c2) / data->c1; regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK); regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value << TEMPSENSE0_ALARM_VALUE_SHIFT); } static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp) { struct imx_thermal_data *data = tz->devdata; struct regmap *map = data->tempmon; unsigned int n_meas; bool wait; u32 val; if (data->mode == THERMAL_DEVICE_ENABLED) { /* Check if a measurement is currently in progress */ regmap_read(map, TEMPSENSE0, &val); wait = !(val & TEMPSENSE0_FINISHED); } else { /* * Every time we measure the temperature, we will power on the * temperature sensor, enable measurements, take a reading, * disable measurements, power off the temperature sensor. */ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); wait = true; } /* * According to the temp sensor designers, it may require up to ~17us * to complete a measurement. */ if (wait) usleep_range(20, 50); regmap_read(map, TEMPSENSE0, &val); if (data->mode != THERMAL_DEVICE_ENABLED) { regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); } if ((val & TEMPSENSE0_FINISHED) == 0) { dev_dbg(&tz->device, "temp measurement never finished\n"); return -EAGAIN; } n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT; /* See imx_get_sensor_data() for formula derivation */ *temp = data->c2 + data->c1 * n_meas; /* Update alarm value to next higher trip point */ if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive) imx_set_alarm_temp(data, data->temp_critical); if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) { imx_set_alarm_temp(data, data->temp_passive); dev_dbg(&tz->device, "thermal alarm off: T < %lu\n", data->alarm_temp / 1000); } if (*temp != data->last_temp) { dev_dbg(&tz->device, "millicelsius: %ld\n", *temp); data->last_temp = *temp; } /* Reenable alarm IRQ if temperature below alarm temperature */ if (!data->irq_enabled && *temp < data->alarm_temp) { data->irq_enabled = true; enable_irq(data->irq); } return 0; } static int imx_get_mode(struct thermal_zone_device *tz, enum thermal_device_mode *mode) { struct imx_thermal_data *data = tz->devdata; *mode = data->mode; return 0; } static int imx_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { struct imx_thermal_data *data = tz->devdata; struct regmap *map = data->tempmon; if (mode == THERMAL_DEVICE_ENABLED) { tz->polling_delay = IMX_POLLING_DELAY; tz->passive_delay = IMX_PASSIVE_DELAY; regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); if (!data->irq_enabled) { data->irq_enabled = true; enable_irq(data->irq); } } else { regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); tz->polling_delay = 0; tz->passive_delay = 0; if (data->irq_enabled) { disable_irq(data->irq); data->irq_enabled = false; } } data->mode = mode; thermal_zone_device_update(tz); return 0; } static int imx_get_trip_type(struct thermal_zone_device *tz, int trip, enum thermal_trip_type *type) { *type = (trip == IMX_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE : THERMAL_TRIP_CRITICAL; return 0; } static int imx_get_crit_temp(struct thermal_zone_device *tz, unsigned long *temp) { struct imx_thermal_data *data = tz->devdata; *temp = data->temp_critical; return 0; } static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip, unsigned long *temp) { struct imx_thermal_data *data = tz->devdata; *temp = (trip == IMX_TRIP_PASSIVE) ? data->temp_passive : data->temp_critical; return 0; } static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, unsigned long temp) { struct imx_thermal_data *data = tz->devdata; if (trip == IMX_TRIP_CRITICAL) return -EPERM; if (temp > IMX_TEMP_PASSIVE) return -EINVAL; data->temp_passive = temp; imx_set_alarm_temp(data, temp); return 0; } static int imx_bind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { int ret; ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev, THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); if (ret) { dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", tz->type, cdev->type, ret); return ret; } return 0; } static int imx_unbind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { int ret; ret = thermal_zone_unbind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev); if (ret) { dev_err(&tz->device, "unbinding zone %s with cdev %s failed:%d\n", tz->type, cdev->type, ret); return ret; } return 0; } static const struct thermal_zone_device_ops imx_tz_ops = { .bind = imx_bind, .unbind = imx_unbind, .get_temp = imx_get_temp, .get_mode = imx_get_mode, .set_mode = imx_set_mode, .get_trip_type = imx_get_trip_type, .get_trip_temp = imx_get_trip_temp, .get_crit_temp = imx_get_crit_temp, .set_trip_temp = imx_set_trip_temp, }; static int imx_get_sensor_data(struct platform_device *pdev) { struct imx_thermal_data *data = platform_get_drvdata(pdev); struct regmap *map; int t1, t2, n1, n2; int ret; u32 val; map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon-data"); if (IS_ERR(map)) { ret = PTR_ERR(map); dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret); return ret; } ret = regmap_read(map, OCOTP_ANA1, &val); if (ret) { dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret); return ret; } if (val == 0 || val == ~0) { dev_err(&pdev->dev, "invalid sensor calibration data\n"); return -EINVAL; } /* * Sensor data layout: * [31:20] - sensor value @ 25C * [19:8] - sensor value of hot * [7:0] - hot temperature value */ n1 = val >> 20; n2 = (val & 0xfff00) >> 8; t2 = val & 0xff; t1 = 25; /* t1 always 25C */ /* * Derived from linear interpolation, * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2) * We want to reduce this down to the minimum computation necessary * for each temperature read. Also, we want Tmeas in millicelsius * and we don't want to lose precision from integer division. So... * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2) * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2) * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2) * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2) * Let constant c2 = (1000 * T2) - (c1 * N2) * milli_Tmeas = c2 + (c1 * Nmeas) */ data->c1 = 1000 * (t1 - t2) / (n1 - n2); data->c2 = 1000 * t2 - data->c1 * n2; /* * Set the default passive cooling trip point to 20 °C below the * maximum die temperature. Can be changed from userspace. */ data->temp_passive = 1000 * (t2 - 20); /* * The maximum die temperature is t2, let's give 5 °C cushion * for noise and possible temperature rise between measurements. */ data->temp_critical = 1000 * (t2 - 5); return 0; } static irqreturn_t imx_thermal_alarm_irq(int irq, void *dev) { struct imx_thermal_data *data = dev; disable_irq_nosync(irq); data->irq_enabled = false; return IRQ_WAKE_THREAD; } static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev) { struct imx_thermal_data *data = dev; dev_dbg(&data->tz->device, "THERMAL ALARM: T > %lu\n", data->alarm_temp / 1000); thermal_zone_device_update(data->tz); return IRQ_HANDLED; } static int imx_thermal_probe(struct platform_device *pdev) { struct imx_thermal_data *data; struct cpumask clip_cpus; struct regmap *map; int measure_freq; int ret; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon"); if (IS_ERR(map)) { ret = PTR_ERR(map); dev_err(&pdev->dev, "failed to get tempmon regmap: %d\n", ret); return ret; } data->tempmon = map; data->irq = platform_get_irq(pdev, 0); if (data->irq < 0) return data->irq; ret = devm_request_threaded_irq(&pdev->dev, data->irq, imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread, 0, "imx_thermal", data); if (ret < 0) { dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); return ret; } platform_set_drvdata(pdev, data); ret = imx_get_sensor_data(pdev); if (ret) { dev_err(&pdev->dev, "failed to get sensor data\n"); return ret; } /* Make sure sensor is in known good state for measurements */ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); cpumask_set_cpu(0, &clip_cpus); data->cdev = cpufreq_cooling_register(&clip_cpus); if (IS_ERR(data->cdev)) { ret = PTR_ERR(data->cdev); dev_err(&pdev->dev, "failed to register cpufreq cooling device: %d\n", ret); return ret; } data->tz = thermal_zone_device_register("imx_thermal_zone", IMX_TRIP_NUM, BIT(IMX_TRIP_PASSIVE), data, &imx_tz_ops, NULL, IMX_PASSIVE_DELAY, IMX_POLLING_DELAY); if (IS_ERR(data->tz)) { ret = PTR_ERR(data->tz); dev_err(&pdev->dev, "failed to register thermal zone device %d\n", ret); cpufreq_cooling_unregister(data->cdev); return ret; } /* Enable measurements at ~ 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq); imx_set_alarm_temp(data, data->temp_passive); regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); data->irq_enabled = true; data->mode = THERMAL_DEVICE_ENABLED; return 0; } static int imx_thermal_remove(struct platform_device *pdev) { struct imx_thermal_data *data = platform_get_drvdata(pdev); struct regmap *map = data->tempmon; /* Disable measurements */ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); thermal_zone_device_unregister(data->tz); cpufreq_cooling_unregister(data->cdev); return 0; } #ifdef CONFIG_PM_SLEEP static int imx_thermal_suspend(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); struct regmap *map = data->tempmon; u32 val; regmap_read(map, TEMPSENSE0, &val); if ((val & TEMPSENSE0_POWER_DOWN) == 0) { /* * If a measurement is taking place, wait for a long enough * time for it to finish, and then check again. If it still * does not finish, something must go wrong. */ udelay(50); regmap_read(map, TEMPSENSE0, &val); if ((val & TEMPSENSE0_POWER_DOWN) == 0) return -ETIMEDOUT; } return 0; } static int imx_thermal_resume(struct device *dev) { /* Nothing to do for now */ return 0; } #endif static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, imx_thermal_suspend, imx_thermal_resume); static const struct of_device_id of_imx_thermal_match[] = { { .compatible = "fsl,imx6q-tempmon", }, { /* end */ } }; static struct platform_driver imx_thermal = { .driver = { .name = "imx_thermal", .owner = THIS_MODULE, .pm = &imx_thermal_pm_ops, .of_match_table = of_imx_thermal_match, }, .probe = imx_thermal_probe, .remove = imx_thermal_remove, }; module_platform_driver(imx_thermal); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("Thermal driver for Freescale i.MX SoCs"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:imx-thermal");
gpl-2.0
ftteam/kernel
drivers/net/wireless/mwifiex/sta_rx.c
165
7938
/* * Marvell Wireless LAN device driver: station RX data handling * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include <uapi/linux/ipv6.h> #include <net/ndisc.h> #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "11n_aggr.h" #include "11n_rxreorder.h" /* This function checks if a frame is IPv4 ARP or IPv6 Neighbour advertisement * frame. If frame has both source and destination mac address as same, this * function drops such gratuitous frames. */ static bool mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv, struct sk_buff *skb) { const struct mwifiex_arp_eth_header *arp; struct ethhdr *eth_hdr; struct ipv6hdr *ipv6; struct icmp6hdr *icmpv6; eth_hdr = (struct ethhdr *)skb->data; switch (ntohs(eth_hdr->h_proto)) { case ETH_P_ARP: arp = (void *)(skb->data + sizeof(struct ethhdr)); if (arp->hdr.ar_op == htons(ARPOP_REPLY) || arp->hdr.ar_op == htons(ARPOP_REQUEST)) { if (!memcmp(arp->ar_sip, arp->ar_tip, 4)) return true; } break; case ETH_P_IPV6: ipv6 = (void *)(skb->data + sizeof(struct ethhdr)); icmpv6 = (void *)(skb->data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr)); if (NDISC_NEIGHBOUR_ADVERTISEMENT == icmpv6->icmp6_type) { if (!memcmp(&ipv6->saddr, &ipv6->daddr, sizeof(struct in6_addr))) return true; } break; default: break; } return false; } /* * This function processes the received packet and forwards it * to kernel/upper layer. * * This function parses through the received packet and determines * if it is a debug packet or normal packet. * * For non-debug packets, the function chops off unnecessary leading * header bytes, reconstructs the packet as an ethernet frame or * 802.2/llc/snap frame as required, and sends it to kernel/upper layer. * * The completion callback is called after processing in complete. */ int mwifiex_process_rx_packet(struct mwifiex_private *priv, struct sk_buff *skb) { int ret; struct rx_packet_hdr *rx_pkt_hdr; struct rxpd *local_rx_pd; int hdr_chop; struct ethhdr *eth_hdr; u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; local_rx_pd = (struct rxpd *) (skb->data); rx_pkt_hdr = (void *)local_rx_pd + le16_to_cpu(local_rx_pd->rx_pkt_offset); if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type * (ethertype). * The firmware only passes up SNAP frames converting * all RX Data from 802.11 to 802.2/LLC/SNAP frames. * To create the Ethernet II, just move the src, dst address * right before the snap_type. */ eth_hdr = (struct ethhdr *) ((u8 *) &rx_pkt_hdr->eth803_hdr + sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rx_pkt_hdr->rfc1042_hdr) - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) - sizeof(rx_pkt_hdr->eth803_hdr.h_source) - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, sizeof(eth_hdr->h_source)); memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, sizeof(eth_hdr->h_dest)); /* Chop off the rxpd + the excess memory from the 802.2/llc/snap header that was removed. */ hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd; } else { /* Chop off the rxpd */ hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr - (u8 *) local_rx_pd; } /* Chop off the leading header bytes so the it points to the start of either the reconstructed EthII frame or the 802.2/llc/snap frame */ skb_pull(skb, hdr_chop); if (priv->hs2_enabled && mwifiex_discard_gratuitous_arp(priv, skb)) { dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n"); dev_kfree_skb_any(skb); return 0; } priv->rxpd_rate = local_rx_pd->rx_rate; priv->rxpd_htinfo = local_rx_pd->ht_info; ret = mwifiex_recv_packet(priv, skb); if (ret == -1) dev_err(priv->adapter->dev, "recv packet failed\n"); return ret; } /* * This function processes the received buffer. * * The function looks into the RxPD and performs sanity tests on the * received buffer to ensure its a valid packet, before processing it * further. If the packet is determined to be aggregated, it is * de-aggregated accordingly. Non-unicast packets are sent directly to * the kernel/upper layers. Unicast packets are handed over to the * Rx reordering routine if 11n is enabled. * * The completion callback is called after processing in complete. */ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; int ret = 0; struct rxpd *local_rx_pd; struct rx_packet_hdr *rx_pkt_hdr; u8 ta[ETH_ALEN]; u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num; local_rx_pd = (struct rxpd *) (skb->data); rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type); rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset); rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length); seq_num = le16_to_cpu(local_rx_pd->seq_num); rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { dev_err(adapter->dev, "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, rx_pkt_offset, rx_pkt_length); priv->stats.rx_dropped++; if (adapter->if_ops.data_complete) adapter->if_ops.data_complete(adapter, skb); else dev_kfree_skb_any(skb); return ret; } if (rx_pkt_type == PKT_TYPE_AMSDU) { struct sk_buff_head list; struct sk_buff *rx_skb; __skb_queue_head_init(&list); skb_pull(skb, rx_pkt_offset); skb_trim(skb, rx_pkt_length); ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, priv->wdev->iftype, 0, false); while (!skb_queue_empty(&list)) { rx_skb = __skb_dequeue(&list); ret = mwifiex_recv_packet(priv, rx_skb); if (ret == -1) dev_err(adapter->dev, "Rx of A-MSDU failed"); } return 0; } else if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) dev_err(adapter->dev, "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } /* * If the packet is not an unicast packet then send the packet * directly to os. Don't pass thru rx reordering */ if (!IS_11N_ENABLED(priv) || memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) { mwifiex_process_rx_packet(priv, skb); return ret; } if (mwifiex_queuing_ra_based(priv)) { memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); } else { if (rx_pkt_type != PKT_TYPE_BAR) priv->rx_seq[local_rx_pd->priority] = seq_num; memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN); } /* Reorder and send to OS */ ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority, ta, (u8) rx_pkt_type, skb); if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { if (adapter->if_ops.data_complete) adapter->if_ops.data_complete(adapter, skb); else dev_kfree_skb_any(skb); } if (ret) priv->stats.rx_dropped++; return ret; }
gpl-2.0
Zoxc/avery-newlib
newlib/libm/mathfp/e_j0.c
165
16123
/* @(#)e_j0.c 5.1 93/09/24 */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ /* j0(x), y0(x) * Bessel function of the first and second kinds of order zero. * Method -- j0(x): * 1. For tiny x, we use j0(x) = 1 - x^2/4 + x^4/64 - ... * 2. Reduce x to |x| since j0(x)=j0(-x), and * for x in (0,2) * j0(x) = 1-z/4+ z^2*R0/S0, where z = x*x; * (precision: |j0-1+z/4-z^2R0/S0 |<2**-63.67 ) * for x in (2,inf) * j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)-q0(x)*sin(x0)) * where x0 = x-pi/4. It is better to compute sin(x0),cos(x0) * as follow: * cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4) * = 1/sqrt(2) * (cos(x) + sin(x)) * sin(x0) = sin(x)cos(pi/4)-cos(x)sin(pi/4) * = 1/sqrt(2) * (sin(x) - cos(x)) * (To avoid cancellation, use * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x)) * to compute the worse one.) * * 3 Special cases * j0(nan)= nan * j0(0) = 1 * j0(inf) = 0 * * Method -- y0(x): * 1. For x<2. * Since * y0(x) = 2/pi*(j0(x)*(ln(x/2)+Euler) + x^2/4 - ...) * therefore y0(x)-2/pi*j0(x)*ln(x) is an even function. * We use the following function to approximate y0, * y0(x) = U(z)/V(z) + (2/pi)*(j0(x)*ln(x)), z= x^2 * where * U(z) = u00 + u01*z + ... + u06*z^6 * V(z) = 1 + v01*z + ... + v04*z^4 * with absolute approximation error bounded by 2**-72. * Note: For tiny x, U/V = u0 and j0(x)~1, hence * y0(tiny) = u0 + (2/pi)*ln(tiny), (choose tiny<2**-27) * 2. For x>=2. * y0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)+q0(x)*sin(x0)) * where x0 = x-pi/4. It is better to compute sin(x0),cos(x0) * by the method mentioned above. * 3. Special cases: y0(0)=-inf, y0(x<0)=NaN, y0(inf)=0. */ #include "fdlibm.h" #ifndef _DOUBLE_IS_32BITS #ifdef __STDC__ static double pzero(double), qzero(double); #else static double pzero(), qzero(); #endif #ifdef __STDC__ static const double #else static double #endif huge = 1e300, one = 1.0, invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */ tpi = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */ /* R0/S0 on [0, 2.00] */ R02 = 1.56249999999999947958e-02, /* 0x3F8FFFFF, 0xFFFFFFFD */ R03 = -1.89979294238854721751e-04, /* 0xBF28E6A5, 0xB61AC6E9 */ R04 = 1.82954049532700665670e-06, /* 0x3EBEB1D1, 0x0C503919 */ R05 = -4.61832688532103189199e-09, /* 0xBE33D5E7, 0x73D63FCE */ S01 = 1.56191029464890010492e-02, /* 0x3F8FFCE8, 0x82C8C2A4 */ S02 = 1.16926784663337450260e-04, /* 0x3F1EA6D2, 0xDD57DBF4 */ S03 = 5.13546550207318111446e-07, /* 0x3EA13B54, 0xCE84D5A9 */ S04 = 1.16614003333790000205e-09; /* 0x3E1408BC, 0xF4745D8F */ #ifdef __STDC__ static const double zero = 0.0; #else static double zero = 0.0; #endif #ifdef __STDC__ double j0(double x) #else double j0(x) double x; #endif { double z, s,c,ss,cc,r,u,v; __int32_t hx,ix; GET_HIGH_WORD(hx,x); ix = hx&0x7fffffff; if(ix>=0x7ff00000) return one/(x*x); x = fabs(x); if(ix >= 0x40000000) { /* |x| >= 2.0 */ s = sin(x); c = cos(x); ss = s-c; cc = s+c; if(ix<0x7fe00000) { /* make sure x+x not overflow */ z = -cos(x+x); if ((s*c)<zero) cc = z/ss; else ss = z/cc; } /* * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x) * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x) */ if(ix>0x48000000) z = (invsqrtpi*cc)/sqrt(x); else { u = pzero(x); v = qzero(x); z = invsqrtpi*(u*cc-v*ss)/sqrt(x); } return z; } if(ix<0x3f200000) { /* |x| < 2**-13 */ if(huge+x>one) { /* raise inexact if x != 0 */ if(ix<0x3e400000) return one; /* |x|<2**-27 */ else return one - 0.25*x*x; } } z = x*x; r = z*(R02+z*(R03+z*(R04+z*R05))); s = one+z*(S01+z*(S02+z*(S03+z*S04))); if(ix < 0x3FF00000) { /* |x| < 1.00 */ return one + z*(-0.25+(r/s)); } else { u = 0.5*x; return((one+u)*(one-u)+z*(r/s)); } } #ifdef __STDC__ static const double #else static double #endif u00 = -7.38042951086872317523e-02, /* 0xBFB2E4D6, 0x99CBD01F */ u01 = 1.76666452509181115538e-01, /* 0x3FC69D01, 0x9DE9E3FC */ u02 = -1.38185671945596898896e-02, /* 0xBF8C4CE8, 0xB16CFA97 */ u03 = 3.47453432093683650238e-04, /* 0x3F36C54D, 0x20B29B6B */ u04 = -3.81407053724364161125e-06, /* 0xBECFFEA7, 0x73D25CAD */ u05 = 1.95590137035022920206e-08, /* 0x3E550057, 0x3B4EABD4 */ u06 = -3.98205194132103398453e-11, /* 0xBDC5E43D, 0x693FB3C8 */ v01 = 1.27304834834123699328e-02, /* 0x3F8A1270, 0x91C9C71A */ v02 = 7.60068627350353253702e-05, /* 0x3F13ECBB, 0xF578C6C1 */ v03 = 2.59150851840457805467e-07, /* 0x3E91642D, 0x7FF202FD */ v04 = 4.41110311332675467403e-10; /* 0x3DFE5018, 0x3BD6D9EF */ #ifdef __STDC__ double y0(double x) #else double y0(x) double x; #endif { double z, s,c,ss,cc,u,v; __int32_t hx,ix,lx; EXTRACT_WORDS(hx,lx,x); ix = 0x7fffffff&hx; /* Y0(NaN) is NaN, y0(-inf) is Nan, y0(inf) is 0 */ if(ix>=0x7ff00000) return one/(x+x*x); if((ix|lx)==0) return -one/zero; if(hx<0) return zero/zero; if(ix >= 0x40000000) { /* |x| >= 2.0 */ /* y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0)) * where x0 = x-pi/4 * Better formula: * cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4) * = 1/sqrt(2) * (sin(x) + cos(x)) * sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) * = 1/sqrt(2) * (sin(x) - cos(x)) * To avoid cancellation, use * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x)) * to compute the worse one. */ s = sin(x); c = cos(x); ss = s-c; cc = s+c; /* * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x) * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x) */ if(ix<0x7fe00000) { /* make sure x+x not overflow */ z = -cos(x+x); if ((s*c)<zero) cc = z/ss; else ss = z/cc; } if(ix>0x48000000) z = (invsqrtpi*ss)/sqrt(x); else { u = pzero(x); v = qzero(x); z = invsqrtpi*(u*ss+v*cc)/sqrt(x); } return z; } if(ix<=0x3e400000) { /* x < 2**-27 */ return(u00 + tpi*log(x)); } z = x*x; u = u00+z*(u01+z*(u02+z*(u03+z*(u04+z*(u05+z*u06))))); v = one+z*(v01+z*(v02+z*(v03+z*v04))); return(u/v + tpi*(j0(x)*log(x))); } /* The asymptotic expansions of pzero is * 1 - 9/128 s^2 + 11025/98304 s^4 - ..., where s = 1/x. * For x >= 2, We approximate pzero by * pzero(x) = 1 + (R/S) * where R = pR0 + pR1*s^2 + pR2*s^4 + ... + pR5*s^10 * S = 1 + pS0*s^2 + ... + pS4*s^10 * and * | pzero(x)-1-R/S | <= 2 ** ( -60.26) */ #ifdef __STDC__ static const double pR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */ #else static double pR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */ #endif 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */ -7.03124999999900357484e-02, /* 0xBFB1FFFF, 0xFFFFFD32 */ -8.08167041275349795626e+00, /* 0xC02029D0, 0xB44FA779 */ -2.57063105679704847262e+02, /* 0xC0701102, 0x7B19E863 */ -2.48521641009428822144e+03, /* 0xC0A36A6E, 0xCD4DCAFC */ -5.25304380490729545272e+03, /* 0xC0B4850B, 0x36CC643D */ }; #ifdef __STDC__ static const double pS8[5] = { #else static double pS8[5] = { #endif 1.16534364619668181717e+02, /* 0x405D2233, 0x07A96751 */ 3.83374475364121826715e+03, /* 0x40ADF37D, 0x50596938 */ 4.05978572648472545552e+04, /* 0x40E3D2BB, 0x6EB6B05F */ 1.16752972564375915681e+05, /* 0x40FC810F, 0x8F9FA9BD */ 4.76277284146730962675e+04, /* 0x40E74177, 0x4F2C49DC */ }; #ifdef __STDC__ static const double pR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */ #else static double pR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */ #endif -1.14125464691894502584e-11, /* 0xBDA918B1, 0x47E495CC */ -7.03124940873599280078e-02, /* 0xBFB1FFFF, 0xE69AFBC6 */ -4.15961064470587782438e+00, /* 0xC010A370, 0xF90C6BBF */ -6.76747652265167261021e+01, /* 0xC050EB2F, 0x5A7D1783 */ -3.31231299649172967747e+02, /* 0xC074B3B3, 0x6742CC63 */ -3.46433388365604912451e+02, /* 0xC075A6EF, 0x28A38BD7 */ }; #ifdef __STDC__ static const double pS5[5] = { #else static double pS5[5] = { #endif 6.07539382692300335975e+01, /* 0x404E6081, 0x0C98C5DE */ 1.05125230595704579173e+03, /* 0x40906D02, 0x5C7E2864 */ 5.97897094333855784498e+03, /* 0x40B75AF8, 0x8FBE1D60 */ 9.62544514357774460223e+03, /* 0x40C2CCB8, 0xFA76FA38 */ 2.40605815922939109441e+03, /* 0x40A2CC1D, 0xC70BE864 */ }; #ifdef __STDC__ static const double pR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */ #else static double pR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */ #endif -2.54704601771951915620e-09, /* 0xBE25E103, 0x6FE1AA86 */ -7.03119616381481654654e-02, /* 0xBFB1FFF6, 0xF7C0E24B */ -2.40903221549529611423e+00, /* 0xC00345B2, 0xAEA48074 */ -2.19659774734883086467e+01, /* 0xC035F74A, 0x4CB94E14 */ -5.80791704701737572236e+01, /* 0xC04D0A22, 0x420A1A45 */ -3.14479470594888503854e+01, /* 0xC03F72AC, 0xA892D80F */ }; #ifdef __STDC__ static const double pS3[5] = { #else static double pS3[5] = { #endif 3.58560338055209726349e+01, /* 0x4041ED92, 0x84077DD3 */ 3.61513983050303863820e+02, /* 0x40769839, 0x464A7C0E */ 1.19360783792111533330e+03, /* 0x4092A66E, 0x6D1061D6 */ 1.12799679856907414432e+03, /* 0x40919FFC, 0xB8C39B7E */ 1.73580930813335754692e+02, /* 0x4065B296, 0xFC379081 */ }; #ifdef __STDC__ static const double pR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */ #else static double pR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */ #endif -8.87534333032526411254e-08, /* 0xBE77D316, 0xE927026D */ -7.03030995483624743247e-02, /* 0xBFB1FF62, 0x495E1E42 */ -1.45073846780952986357e+00, /* 0xBFF73639, 0x8A24A843 */ -7.63569613823527770791e+00, /* 0xC01E8AF3, 0xEDAFA7F3 */ -1.11931668860356747786e+01, /* 0xC02662E6, 0xC5246303 */ -3.23364579351335335033e+00, /* 0xC009DE81, 0xAF8FE70F */ }; #ifdef __STDC__ static const double pS2[5] = { #else static double pS2[5] = { #endif 2.22202997532088808441e+01, /* 0x40363865, 0x908B5959 */ 1.36206794218215208048e+02, /* 0x4061069E, 0x0EE8878F */ 2.70470278658083486789e+02, /* 0x4070E786, 0x42EA079B */ 1.53875394208320329881e+02, /* 0x40633C03, 0x3AB6FAFF */ 1.46576176948256193810e+01, /* 0x402D50B3, 0x44391809 */ }; #ifdef __STDC__ static double pzero(double x) #else static double pzero(x) double x; #endif { #ifdef __STDC__ const double *p,*q; #else double *p,*q; #endif double z,r,s; __int32_t ix; GET_HIGH_WORD(ix,x); ix &= 0x7fffffff; if(ix>=0x40200000) {p = pR8; q= pS8;} else if(ix>=0x40122E8B){p = pR5; q= pS5;} else if(ix>=0x4006DB6D){p = pR3; q= pS3;} else {p = pR2; q= pS2;} z = one/(x*x); r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5])))); s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4])))); return one+ r/s; } /* For x >= 8, the asymptotic expansions of qzero is * -1/8 s + 75/1024 s^3 - ..., where s = 1/x. * We approximate qzero by * qzero(x) = s*(-1.25 + (R/S)) * where R = qR0 + qR1*s^2 + qR2*s^4 + ... + qR5*s^10 * S = 1 + qS0*s^2 + ... + qS5*s^12 * and * | qzero(x)/s +1.25-R/S | <= 2 ** ( -61.22) */ #ifdef __STDC__ static const double qR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */ #else static double qR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */ #endif 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */ 7.32421874999935051953e-02, /* 0x3FB2BFFF, 0xFFFFFE2C */ 1.17682064682252693899e+01, /* 0x40278952, 0x5BB334D6 */ 5.57673380256401856059e+02, /* 0x40816D63, 0x15301825 */ 8.85919720756468632317e+03, /* 0x40C14D99, 0x3E18F46D */ 3.70146267776887834771e+04, /* 0x40E212D4, 0x0E901566 */ }; #ifdef __STDC__ static const double qS8[6] = { #else static double qS8[6] = { #endif 1.63776026895689824414e+02, /* 0x406478D5, 0x365B39BC */ 8.09834494656449805916e+03, /* 0x40BFA258, 0x4E6B0563 */ 1.42538291419120476348e+05, /* 0x41016652, 0x54D38C3F */ 8.03309257119514397345e+05, /* 0x412883DA, 0x83A52B43 */ 8.40501579819060512818e+05, /* 0x4129A66B, 0x28DE0B3D */ -3.43899293537866615225e+05, /* 0xC114FD6D, 0x2C9530C5 */ }; #ifdef __STDC__ static const double qR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */ #else static double qR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */ #endif 1.84085963594515531381e-11, /* 0x3DB43D8F, 0x29CC8CD9 */ 7.32421766612684765896e-02, /* 0x3FB2BFFF, 0xD172B04C */ 5.83563508962056953777e+00, /* 0x401757B0, 0xB9953DD3 */ 1.35111577286449829671e+02, /* 0x4060E392, 0x0A8788E9 */ 1.02724376596164097464e+03, /* 0x40900CF9, 0x9DC8C481 */ 1.98997785864605384631e+03, /* 0x409F17E9, 0x53C6E3A6 */ }; #ifdef __STDC__ static const double qS5[6] = { #else static double qS5[6] = { #endif 8.27766102236537761883e+01, /* 0x4054B1B3, 0xFB5E1543 */ 2.07781416421392987104e+03, /* 0x40A03BA0, 0xDA21C0CE */ 1.88472887785718085070e+04, /* 0x40D267D2, 0x7B591E6D */ 5.67511122894947329769e+04, /* 0x40EBB5E3, 0x97E02372 */ 3.59767538425114471465e+04, /* 0x40E19118, 0x1F7A54A0 */ -5.35434275601944773371e+03, /* 0xC0B4EA57, 0xBEDBC609 */ }; #ifdef __STDC__ static const double qR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */ #else static double qR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */ #endif 4.37741014089738620906e-09, /* 0x3E32CD03, 0x6ADECB82 */ 7.32411180042911447163e-02, /* 0x3FB2BFEE, 0x0E8D0842 */ 3.34423137516170720929e+00, /* 0x400AC0FC, 0x61149CF5 */ 4.26218440745412650017e+01, /* 0x40454F98, 0x962DAEDD */ 1.70808091340565596283e+02, /* 0x406559DB, 0xE25EFD1F */ 1.66733948696651168575e+02, /* 0x4064D77C, 0x81FA21E0 */ }; #ifdef __STDC__ static const double qS3[6] = { #else static double qS3[6] = { #endif 4.87588729724587182091e+01, /* 0x40486122, 0xBFE343A6 */ 7.09689221056606015736e+02, /* 0x40862D83, 0x86544EB3 */ 3.70414822620111362994e+03, /* 0x40ACF04B, 0xE44DFC63 */ 6.46042516752568917582e+03, /* 0x40B93C6C, 0xD7C76A28 */ 2.51633368920368957333e+03, /* 0x40A3A8AA, 0xD94FB1C0 */ -1.49247451836156386662e+02, /* 0xC062A7EB, 0x201CF40F */ }; #ifdef __STDC__ static const double qR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */ #else static double qR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */ #endif 1.50444444886983272379e-07, /* 0x3E84313B, 0x54F76BDB */ 7.32234265963079278272e-02, /* 0x3FB2BEC5, 0x3E883E34 */ 1.99819174093815998816e+00, /* 0x3FFFF897, 0xE727779C */ 1.44956029347885735348e+01, /* 0x402CFDBF, 0xAAF96FE5 */ 3.16662317504781540833e+01, /* 0x403FAA8E, 0x29FBDC4A */ 1.62527075710929267416e+01, /* 0x403040B1, 0x71814BB4 */ }; #ifdef __STDC__ static const double qS2[6] = { #else static double qS2[6] = { #endif 3.03655848355219184498e+01, /* 0x403E5D96, 0xF7C07AED */ 2.69348118608049844624e+02, /* 0x4070D591, 0xE4D14B40 */ 8.44783757595320139444e+02, /* 0x408A6645, 0x22B3BF22 */ 8.82935845112488550512e+02, /* 0x408B977C, 0x9C5CC214 */ 2.12666388511798828631e+02, /* 0x406A9553, 0x0E001365 */ -5.31095493882666946917e+00, /* 0xC0153E6A, 0xF8B32931 */ }; #ifdef __STDC__ static double qzero(double x) #else static double qzero(x) double x; #endif { #ifdef __STDC__ const double *p,*q; #else double *p,*q; #endif double s,r,z; __int32_t ix; GET_HIGH_WORD(ix,x); ix &= 0x7fffffff; if(ix>=0x40200000) {p = qR8; q= qS8;} else if(ix>=0x40122E8B){p = qR5; q= qS5;} else if(ix>=0x4006DB6D){p = qR3; q= qS3;} else {p = qR2; q= qS2;} z = one/(x*x); r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5])))); s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5]))))); return (-.125 + r/s)/x; } #endif /* defined(_DOUBLE_IS_32BITS) */
gpl-2.0
bhadram/linux
samples/bpf/tracex4_kern.c
421
1293
/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include <linux/ptrace.h> #include <linux/version.h> #include <uapi/linux/bpf.h> #include "bpf_helpers.h" struct pair { u64 val; u64 ip; }; struct bpf_map_def SEC("maps") my_map = { .type = BPF_MAP_TYPE_HASH, .key_size = sizeof(long), .value_size = sizeof(struct pair), .max_entries = 1000000, }; /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe * example will no longer be meaningful */ SEC("kprobe/kmem_cache_free") int bpf_prog1(struct pt_regs *ctx) { long ptr = PT_REGS_PARM2(ctx); bpf_map_delete_elem(&my_map, &ptr); return 0; } SEC("kretprobe/kmem_cache_alloc_node") int bpf_prog2(struct pt_regs *ctx) { long ptr = PT_REGS_RC(ctx); long ip = 0; /* get ip address of kmem_cache_alloc_node() caller */ bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); struct pair v = { .val = bpf_ktime_get_ns(), .ip = ip, }; bpf_map_update_elem(&my_map, &ptr, &v, BPF_ANY); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
gpl-2.0
arkas/Callisto_kernel_2.6.35
net/sched/sch_drr.c
933
11667
/* * net/sched/sch_drr.c Deficit Round Robin scheduler * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/pkt_sched.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> struct drr_class { struct Qdisc_class_common common; unsigned int refcnt; unsigned int filter_cnt; struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; struct gnet_stats_rate_est rate_est; struct list_head alist; struct Qdisc *qdisc; u32 quantum; u32 deficit; }; struct drr_sched { struct list_head active; struct tcf_proto *filter_list; struct Qdisc_class_hash clhash; }; static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) { struct drr_sched *q = qdisc_priv(sch); struct Qdisc_class_common *clc; clc = qdisc_class_find(&q->clhash, classid); if (clc == NULL) return NULL; return container_of(clc, struct drr_class, common); } static void drr_purge_queue(struct drr_class *cl) { unsigned int len = cl->qdisc->q.qlen; qdisc_reset(cl->qdisc); qdisc_tree_decrease_qlen(cl->qdisc, len); } static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, }; static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl = (struct drr_class *)*arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_DRR_MAX + 1]; u32 quantum; int err; if (!opt) return -EINVAL; err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy); if (err < 0) return err; if (tb[TCA_DRR_QUANTUM]) { quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]); if (quantum == 0) return -EINVAL; } else quantum = psched_mtu(qdisc_dev(sch)); if (cl != NULL) { if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) return err; } sch_tree_lock(sch); if (tb[TCA_DRR_QUANTUM]) cl->quantum = quantum; sch_tree_unlock(sch); return 0; } cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL); if (cl == NULL) return -ENOBUFS; cl->refcnt = 1; cl->common.classid = classid; cl->quantum = quantum; cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { qdisc_destroy(cl->qdisc); kfree(cl); return err; } } sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); sch_tree_unlock(sch); qdisc_class_hash_grow(sch, &q->clhash); *arg = (unsigned long)cl; return 0; } static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) { gen_kill_estimator(&cl->bstats, &cl->rate_est); qdisc_destroy(cl->qdisc); kfree(cl); } static int drr_delete_class(struct Qdisc *sch, unsigned long arg) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl = (struct drr_class *)arg; if (cl->filter_cnt > 0) return -EBUSY; sch_tree_lock(sch); drr_purge_queue(cl); qdisc_class_hash_remove(&q->clhash, &cl->common); BUG_ON(--cl->refcnt == 0); /* * This shouldn't happen: we "hold" one cops->get() when called * from tc_ctl_tclass; the destroy method is done from cops->put(). */ sch_tree_unlock(sch); return 0; } static unsigned long drr_get_class(struct Qdisc *sch, u32 classid) { struct drr_class *cl = drr_find_class(sch, classid); if (cl != NULL) cl->refcnt++; return (unsigned long)cl; } static void drr_put_class(struct Qdisc *sch, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; if (--cl->refcnt == 0) drr_destroy_class(sch, cl); } static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl) { struct drr_sched *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) { struct drr_class *cl = drr_find_class(sch, classid); if (cl != NULL) cl->filter_cnt++; return (unsigned long)cl; } static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; cl->filter_cnt--; } static int drr_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct drr_class *cl = (struct drr_class *)arg; if (new == NULL) { new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, cl->common.classid); if (new == NULL) new = &noop_qdisc; } sch_tree_lock(sch); drr_purge_queue(cl); *old = cl->qdisc; cl->qdisc = new; sch_tree_unlock(sch); return 0; } static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; return cl->qdisc; } static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); } static int drr_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct drr_class *cl = (struct drr_class *)arg; struct nlattr *nest; tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle = cl->common.classid; tcm->tcm_info = cl->qdisc->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct drr_class *cl = (struct drr_class *)arg; struct tc_drr_stats xstats; memset(&xstats, 0, sizeof(xstats)); if (cl->qdisc->q.qlen) { xstats.deficit = cl->deficit; cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; } if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); } static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct hlist_node *n; unsigned int i; if (arg->stop) return; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } } } static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct tcf_result res; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { cl = drr_find_class(sch, skb->priority); if (cl != NULL) return cl; } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tc_classify(skb, q->filter_list, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif cl = (struct drr_class *)res.class; if (cl == NULL) cl = drr_find_class(sch, res.classid); return cl; } return NULL; } static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; unsigned int len; int err; cl = drr_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return err; } len = qdisc_pkt_len(skb); err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; sch->qstats.drops++; } return err; } if (cl->qdisc->q.qlen == 1) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } cl->bstats.packets++; cl->bstats.bytes += len; sch->bstats.packets++; sch->bstats.bytes += len; sch->q.qlen++; return err; } static struct sk_buff *drr_dequeue(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct sk_buff *skb; unsigned int len; if (list_empty(&q->active)) goto out; while (1) { cl = list_first_entry(&q->active, struct drr_class, alist); skb = cl->qdisc->ops->peek(cl->qdisc); if (skb == NULL) goto out; len = qdisc_pkt_len(skb); if (len <= cl->deficit) { cl->deficit -= len; skb = qdisc_dequeue_peeked(cl->qdisc); if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); sch->q.qlen--; return skb; } cl->deficit += cl->quantum; list_move_tail(&cl->alist, &q->active); } out: return NULL; } static unsigned int drr_drop(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; unsigned int len; list_for_each_entry(cl, &q->active, alist) { if (cl->qdisc->ops->drop) { len = cl->qdisc->ops->drop(cl->qdisc); if (len > 0) { sch->q.qlen--; if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); return len; } } } return 0; } static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) { struct drr_sched *q = qdisc_priv(sch); int err; err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; INIT_LIST_HEAD(&q->active); return 0; } static void drr_reset_qdisc(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct hlist_node *n; unsigned int i; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { if (cl->qdisc->q.qlen) list_del(&cl->alist); qdisc_reset(cl->qdisc); } } sch->q.qlen = 0; } static void drr_destroy_qdisc(struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct hlist_node *n, *next; unsigned int i; tcf_destroy_chain(&q->filter_list); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], common.hnode) drr_destroy_class(sch, cl); } qdisc_class_hash_destroy(&q->clhash); } static const struct Qdisc_class_ops drr_class_ops = { .change = drr_change_class, .delete = drr_delete_class, .get = drr_get_class, .put = drr_put_class, .tcf_chain = drr_tcf_chain, .bind_tcf = drr_bind_tcf, .unbind_tcf = drr_unbind_tcf, .graft = drr_graft_class, .leaf = drr_class_leaf, .qlen_notify = drr_qlen_notify, .dump = drr_dump_class, .dump_stats = drr_dump_class_stats, .walk = drr_walk, }; static struct Qdisc_ops drr_qdisc_ops __read_mostly = { .cl_ops = &drr_class_ops, .id = "drr", .priv_size = sizeof(struct drr_sched), .enqueue = drr_enqueue, .dequeue = drr_dequeue, .peek = qdisc_peek_dequeued, .drop = drr_drop, .init = drr_init_qdisc, .reset = drr_reset_qdisc, .destroy = drr_destroy_qdisc, .owner = THIS_MODULE, }; static int __init drr_init(void) { return register_qdisc(&drr_qdisc_ops); } static void __exit drr_exit(void) { unregister_qdisc(&drr_qdisc_ops); } module_init(drr_init); module_exit(drr_exit); MODULE_LICENSE("GPL");
gpl-2.0
wolverine2k/htc7x30-3.0
drivers/i2c/busses/i2c-sis630.c
1701
13922
/* Copyright (c) 2002,2003 Alexander Malysh <amalysh@web.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Changes: 24.08.2002 Fixed the typo in sis630_access (Thanks to Mark M. Hoffman) Changed sis630_transaction.(Thanks to Mark M. Hoffman) 18.09.2002 Added SIS730 as supported. 21.09.2002 Added high_clock module option.If this option is set used Host Master Clock 56KHz (default 14KHz).For now we save old Host Master Clock and after transaction completed restore (otherwise it's confuse BIOS and hung Machine). 24.09.2002 Fixed typo in sis630_access Fixed logical error by restoring of Host Master Clock 31.07.2003 Added block data read/write support. */ /* Status: beta Supports: SIS 630 SIS 730 Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* SIS630 SMBus registers */ #define SMB_STS 0x80 /* status */ #define SMB_EN 0x81 /* status enable */ #define SMB_CNT 0x82 #define SMBHOST_CNT 0x83 #define SMB_ADDR 0x84 #define SMB_CMD 0x85 #define SMB_PCOUNT 0x86 /* processed count */ #define SMB_COUNT 0x87 #define SMB_BYTE 0x88 /* ~0x8F data byte field */ #define SMBDEV_ADDR 0x90 #define SMB_DB0 0x91 #define SMB_DB1 0x92 #define SMB_SAA 0x93 /* register count for request_region */ #define SIS630_SMB_IOREGION 20 /* PCI address constants */ /* acpi base address register */ #define SIS630_ACPI_BASE_REG 0x74 /* bios control register */ #define SIS630_BIOS_CTL_REG 0x40 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS630 constants */ #define SIS630_QUICK 0x00 #define SIS630_BYTE 0x01 #define SIS630_BYTE_DATA 0x02 #define SIS630_WORD_DATA 0x03 #define SIS630_PCALL 0x04 #define SIS630_BLOCK_DATA 0x05 static struct pci_driver sis630_driver; /* insmod parameters */ static int high_clock; static int force; module_param(high_clock, bool, 0); MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz)."); module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!"); /* acpi base address */ static unsigned short acpi_base; /* supported chips */ static int supported[] = { PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_730, 0 /* terminates the list */ }; static inline u8 sis630_read(u8 reg) { return inb(acpi_base + reg); } static inline void sis630_write(u8 reg, u8 data) { outb(data, acpi_base + reg); } static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock) { int temp; /* Make sure the SMBus host is ready to start transmitting. */ if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%02x).Resetting...\n",temp); /* kill smbus transaction */ sis630_write(SMBHOST_CNT, 0x20); if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* save old clock, so we can prevent machine for hung */ *oldclock = sis630_read(SMB_CNT); dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock); /* disable timeout interrupt , set Host Master Clock to 56KHz if requested */ if (high_clock) sis630_write(SMB_CNT, 0x20); else sis630_write(SMB_CNT, (*oldclock & ~0x40)); /* clear all sticky bits */ temp = sis630_read(SMB_STS); sis630_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size */ sis630_write(SMBHOST_CNT,0x10 | (size & 0x07)); return 0; } static int sis630_transaction_wait(struct i2c_adapter *adap, int size) { int temp, result = 0, timeout = 0; /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis630_read(SMB_STS); /* check if block transmitted */ if (size == SIS630_BLOCK_DATA && (temp & 0x10)) break; } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x02) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & 0x04) { dev_err(&adap->dev, "Bus collision!\n"); result = -EIO; /* TBD: Datasheet say: the software should clear this bit and restart SMBUS operation. Should we do it or user start request again? */ } return result; } static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock) { int temp = 0; /* clear all status "sticky" bits */ sis630_write(SMB_STS, temp); dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT)); /* * restore old Host Master Clock if high_clock is set * and oldclock was not 56KHz */ if (high_clock && !(oldclock & 0x20)) sis630_write(SMB_CNT,(sis630_read(SMB_CNT) & ~0x20)); dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT)); } static int sis630_transaction(struct i2c_adapter *adap, int size) { int result = 0; u8 oldclock = 0; result = sis630_transaction_start(adap, size, &oldclock); if (!result) { result = sis630_transaction_wait(adap, size); sis630_transaction_end(adap, oldclock); } return result; } static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write) { int i, len = 0, rc = 0; u8 oldclock = 0; if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) len = 0; else if (len > 32) len = 32; sis630_write(SMB_COUNT, len); for (i=1; i <= len; i++) { dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]); /* set data */ sis630_write(SMB_BYTE+(i-1)%8, data->block[i]); if (i==8 || (len<8 && i==len)) { dev_dbg(&adap->dev, "start trans len=%d i=%d\n",len ,i); /* first transaction */ rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; } else if ((i-1)%8 == 7 || i==len) { dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n",len,i); if (i>8) { dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i); /* If this is not first transaction, we must clear sticky bit. clear SMBARY_STS */ sis630_write(SMB_STS,0x10); } rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } } } } else { /* read request */ data->block[0] = len = 0; rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; do { rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } /* if this first transaction then read byte count */ if (len == 0) data->block[0] = sis630_read(SMB_COUNT); /* just to be sure */ if (data->block[0] > 32) data->block[0] = 32; dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]); for (i=0; i < 8 && len < data->block[0]; i++,len++) { dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len); data->block[len+1] = sis630_read(SMB_BYTE+i); } dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i); /* clear SMBARY_STS */ sis630_write(SMB_STS,0x10); } while(len < data->block[0]); } sis630_transaction_end(adap, oldclock); return rc; } /* Return negative errno on error. */ static s32 sis630_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS630_QUICK; break; case I2C_SMBUS_BYTE: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_CMD, command); size = SIS630_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_BYTE, data->byte); size = SIS630_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis630_write(SMB_BYTE, data->word & 0xff); sis630_write(SMB_BYTE + 1,(data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA); break; case I2C_SMBUS_BLOCK_DATA: sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); size = SIS630_BLOCK_DATA; return sis630_block_data(adap, data, read_write); default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis630_transaction(adap, size); if (status) return status; if ((size != SIS630_PCALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS630_QUICK))) { return 0; } switch(size) { case SIS630_BYTE: case SIS630_BYTE_DATA: data->byte = sis630_read(SMB_BYTE); break; case SIS630_PCALL: case SIS630_WORD_DATA: data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis630_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA; } static int __devinit sis630_setup(struct pci_dev *sis630_dev) { unsigned char b; struct pci_dev *dummy = NULL; int retval, i; /* check for supported SiS devices */ for (i=0; supported[i] > 0 ; i++) { if ((dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy))) break; /* found */ } if (dummy) { pci_dev_put(dummy); } else if (force) { dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but " "loading because of force option enabled\n"); } else { return -ENODEV; } /* Enable ACPI first , so we can accsess reg 74-75 in acpi io space and read acpi base addr */ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) { dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n"); retval = -ENODEV; goto exit; } /* if ACPI already enabled , do nothing */ if (!(b & 0x80) && pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) { dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n"); retval = -ENODEV; goto exit; } /* Determine the ACPI base address */ if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) { dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n"); retval = -ENODEV; goto exit; } dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04x\n", acpi_base); retval = acpi_check_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name); if (retval) goto exit; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name)) { dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already " "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA); retval = -EBUSY; goto exit; } retval = 0; exit: if (retval) acpi_base = 0; return retval; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis630_access, .functionality = sis630_func, }; static struct i2c_adapter sis630_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id sis630_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis630_ids); static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis630_adapter.dev.parent = &dev->dev; snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04x", acpi_base + SMB_STS); return i2c_add_adapter(&sis630_adapter); } static void __devexit sis630_remove(struct pci_dev *dev) { if (acpi_base) { i2c_del_adapter(&sis630_adapter); release_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION); acpi_base = 0; } } static struct pci_driver sis630_driver = { .name = "sis630_smbus", .id_table = sis630_ids, .probe = sis630_probe, .remove = __devexit_p(sis630_remove), }; static int __init i2c_sis630_init(void) { return pci_register_driver(&sis630_driver); } static void __exit i2c_sis630_exit(void) { pci_unregister_driver(&sis630_driver); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Malysh <amalysh@web.de>"); MODULE_DESCRIPTION("SIS630 SMBus driver"); module_init(i2c_sis630_init); module_exit(i2c_sis630_exit);
gpl-2.0
dkthompson/MSM-8x60-ICS
drivers/i2c/busses/i2c-sis630.c
1701
13922
/* Copyright (c) 2002,2003 Alexander Malysh <amalysh@web.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Changes: 24.08.2002 Fixed the typo in sis630_access (Thanks to Mark M. Hoffman) Changed sis630_transaction.(Thanks to Mark M. Hoffman) 18.09.2002 Added SIS730 as supported. 21.09.2002 Added high_clock module option.If this option is set used Host Master Clock 56KHz (default 14KHz).For now we save old Host Master Clock and after transaction completed restore (otherwise it's confuse BIOS and hung Machine). 24.09.2002 Fixed typo in sis630_access Fixed logical error by restoring of Host Master Clock 31.07.2003 Added block data read/write support. */ /* Status: beta Supports: SIS 630 SIS 730 Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* SIS630 SMBus registers */ #define SMB_STS 0x80 /* status */ #define SMB_EN 0x81 /* status enable */ #define SMB_CNT 0x82 #define SMBHOST_CNT 0x83 #define SMB_ADDR 0x84 #define SMB_CMD 0x85 #define SMB_PCOUNT 0x86 /* processed count */ #define SMB_COUNT 0x87 #define SMB_BYTE 0x88 /* ~0x8F data byte field */ #define SMBDEV_ADDR 0x90 #define SMB_DB0 0x91 #define SMB_DB1 0x92 #define SMB_SAA 0x93 /* register count for request_region */ #define SIS630_SMB_IOREGION 20 /* PCI address constants */ /* acpi base address register */ #define SIS630_ACPI_BASE_REG 0x74 /* bios control register */ #define SIS630_BIOS_CTL_REG 0x40 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS630 constants */ #define SIS630_QUICK 0x00 #define SIS630_BYTE 0x01 #define SIS630_BYTE_DATA 0x02 #define SIS630_WORD_DATA 0x03 #define SIS630_PCALL 0x04 #define SIS630_BLOCK_DATA 0x05 static struct pci_driver sis630_driver; /* insmod parameters */ static int high_clock; static int force; module_param(high_clock, bool, 0); MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz)."); module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!"); /* acpi base address */ static unsigned short acpi_base; /* supported chips */ static int supported[] = { PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_730, 0 /* terminates the list */ }; static inline u8 sis630_read(u8 reg) { return inb(acpi_base + reg); } static inline void sis630_write(u8 reg, u8 data) { outb(data, acpi_base + reg); } static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock) { int temp; /* Make sure the SMBus host is ready to start transmitting. */ if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%02x).Resetting...\n",temp); /* kill smbus transaction */ sis630_write(SMBHOST_CNT, 0x20); if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* save old clock, so we can prevent machine for hung */ *oldclock = sis630_read(SMB_CNT); dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock); /* disable timeout interrupt , set Host Master Clock to 56KHz if requested */ if (high_clock) sis630_write(SMB_CNT, 0x20); else sis630_write(SMB_CNT, (*oldclock & ~0x40)); /* clear all sticky bits */ temp = sis630_read(SMB_STS); sis630_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size */ sis630_write(SMBHOST_CNT,0x10 | (size & 0x07)); return 0; } static int sis630_transaction_wait(struct i2c_adapter *adap, int size) { int temp, result = 0, timeout = 0; /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis630_read(SMB_STS); /* check if block transmitted */ if (size == SIS630_BLOCK_DATA && (temp & 0x10)) break; } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x02) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & 0x04) { dev_err(&adap->dev, "Bus collision!\n"); result = -EIO; /* TBD: Datasheet say: the software should clear this bit and restart SMBUS operation. Should we do it or user start request again? */ } return result; } static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock) { int temp = 0; /* clear all status "sticky" bits */ sis630_write(SMB_STS, temp); dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT)); /* * restore old Host Master Clock if high_clock is set * and oldclock was not 56KHz */ if (high_clock && !(oldclock & 0x20)) sis630_write(SMB_CNT,(sis630_read(SMB_CNT) & ~0x20)); dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT)); } static int sis630_transaction(struct i2c_adapter *adap, int size) { int result = 0; u8 oldclock = 0; result = sis630_transaction_start(adap, size, &oldclock); if (!result) { result = sis630_transaction_wait(adap, size); sis630_transaction_end(adap, oldclock); } return result; } static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write) { int i, len = 0, rc = 0; u8 oldclock = 0; if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) len = 0; else if (len > 32) len = 32; sis630_write(SMB_COUNT, len); for (i=1; i <= len; i++) { dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]); /* set data */ sis630_write(SMB_BYTE+(i-1)%8, data->block[i]); if (i==8 || (len<8 && i==len)) { dev_dbg(&adap->dev, "start trans len=%d i=%d\n",len ,i); /* first transaction */ rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; } else if ((i-1)%8 == 7 || i==len) { dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n",len,i); if (i>8) { dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i); /* If this is not first transaction, we must clear sticky bit. clear SMBARY_STS */ sis630_write(SMB_STS,0x10); } rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } } } } else { /* read request */ data->block[0] = len = 0; rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; do { rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } /* if this first transaction then read byte count */ if (len == 0) data->block[0] = sis630_read(SMB_COUNT); /* just to be sure */ if (data->block[0] > 32) data->block[0] = 32; dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]); for (i=0; i < 8 && len < data->block[0]; i++,len++) { dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len); data->block[len+1] = sis630_read(SMB_BYTE+i); } dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i); /* clear SMBARY_STS */ sis630_write(SMB_STS,0x10); } while(len < data->block[0]); } sis630_transaction_end(adap, oldclock); return rc; } /* Return negative errno on error. */ static s32 sis630_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS630_QUICK; break; case I2C_SMBUS_BYTE: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_CMD, command); size = SIS630_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_BYTE, data->byte); size = SIS630_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis630_write(SMB_BYTE, data->word & 0xff); sis630_write(SMB_BYTE + 1,(data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA); break; case I2C_SMBUS_BLOCK_DATA: sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); size = SIS630_BLOCK_DATA; return sis630_block_data(adap, data, read_write); default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis630_transaction(adap, size); if (status) return status; if ((size != SIS630_PCALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS630_QUICK))) { return 0; } switch(size) { case SIS630_BYTE: case SIS630_BYTE_DATA: data->byte = sis630_read(SMB_BYTE); break; case SIS630_PCALL: case SIS630_WORD_DATA: data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis630_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA; } static int __devinit sis630_setup(struct pci_dev *sis630_dev) { unsigned char b; struct pci_dev *dummy = NULL; int retval, i; /* check for supported SiS devices */ for (i=0; supported[i] > 0 ; i++) { if ((dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy))) break; /* found */ } if (dummy) { pci_dev_put(dummy); } else if (force) { dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but " "loading because of force option enabled\n"); } else { return -ENODEV; } /* Enable ACPI first , so we can accsess reg 74-75 in acpi io space and read acpi base addr */ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) { dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n"); retval = -ENODEV; goto exit; } /* if ACPI already enabled , do nothing */ if (!(b & 0x80) && pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) { dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n"); retval = -ENODEV; goto exit; } /* Determine the ACPI base address */ if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) { dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n"); retval = -ENODEV; goto exit; } dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04x\n", acpi_base); retval = acpi_check_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name); if (retval) goto exit; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name)) { dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already " "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA); retval = -EBUSY; goto exit; } retval = 0; exit: if (retval) acpi_base = 0; return retval; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis630_access, .functionality = sis630_func, }; static struct i2c_adapter sis630_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id sis630_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis630_ids); static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis630_adapter.dev.parent = &dev->dev; snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04x", acpi_base + SMB_STS); return i2c_add_adapter(&sis630_adapter); } static void __devexit sis630_remove(struct pci_dev *dev) { if (acpi_base) { i2c_del_adapter(&sis630_adapter); release_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION); acpi_base = 0; } } static struct pci_driver sis630_driver = { .name = "sis630_smbus", .id_table = sis630_ids, .probe = sis630_probe, .remove = __devexit_p(sis630_remove), }; static int __init i2c_sis630_init(void) { return pci_register_driver(&sis630_driver); } static void __exit i2c_sis630_exit(void) { pci_unregister_driver(&sis630_driver); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Malysh <amalysh@web.de>"); MODULE_DESCRIPTION("SIS630 SMBus driver"); module_init(i2c_sis630_init); module_exit(i2c_sis630_exit);
gpl-2.0
Ander-Alvarez/android_kernel_motorola_msm8916
drivers/spi/spi-xcomm.c
2213
6330
/* * Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <asm/unaligned.h> #define SPI_XCOMM_SETTINGS_LEN_OFFSET 10 #define SPI_XCOMM_SETTINGS_3WIRE BIT(6) #define SPI_XCOMM_SETTINGS_CS_HIGH BIT(5) #define SPI_XCOMM_SETTINGS_SAMPLE_END BIT(4) #define SPI_XCOMM_SETTINGS_CPHA BIT(3) #define SPI_XCOMM_SETTINGS_CPOL BIT(2) #define SPI_XCOMM_SETTINGS_CLOCK_DIV_MASK 0x3 #define SPI_XCOMM_SETTINGS_CLOCK_DIV_64 0x2 #define SPI_XCOMM_SETTINGS_CLOCK_DIV_16 0x1 #define SPI_XCOMM_SETTINGS_CLOCK_DIV_4 0x0 #define SPI_XCOMM_CMD_UPDATE_CONFIG 0x03 #define SPI_XCOMM_CMD_WRITE 0x04 #define SPI_XCOMM_CLOCK 48000000 struct spi_xcomm { struct i2c_client *i2c; uint16_t settings; uint16_t chipselect; unsigned int current_speed; uint8_t buf[63]; }; static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len) { uint16_t settings; uint8_t *buf = spi_xcomm->buf; settings = spi_xcomm->settings; settings |= len << SPI_XCOMM_SETTINGS_LEN_OFFSET; buf[0] = SPI_XCOMM_CMD_UPDATE_CONFIG; put_unaligned_be16(settings, &buf[1]); put_unaligned_be16(spi_xcomm->chipselect, &buf[3]); return i2c_master_send(spi_xcomm->i2c, buf, 5); } static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm, struct spi_device *spi, int is_active) { unsigned long cs = spi->chip_select; uint16_t chipselect = spi_xcomm->chipselect; if (is_active) chipselect |= BIT(cs); else chipselect &= ~BIT(cs); spi_xcomm->chipselect = chipselect; } static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm, struct spi_device *spi, struct spi_transfer *t, unsigned int *settings) { unsigned int speed; if ((t->bits_per_word && t->bits_per_word != 8) || t->len > 62) return -EINVAL; speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; if (speed != spi_xcomm->current_speed) { unsigned int divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, speed); if (divider >= 64) *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64; else if (divider >= 16) *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_16; else *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4; spi_xcomm->current_speed = speed; } if (spi->mode & SPI_CPOL) *settings |= SPI_XCOMM_SETTINGS_CPOL; else *settings &= ~SPI_XCOMM_SETTINGS_CPOL; if (spi->mode & SPI_CPHA) *settings &= ~SPI_XCOMM_SETTINGS_CPHA; else *settings |= SPI_XCOMM_SETTINGS_CPHA; if (spi->mode & SPI_3WIRE) *settings |= SPI_XCOMM_SETTINGS_3WIRE; else *settings &= ~SPI_XCOMM_SETTINGS_3WIRE; return 0; } static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm, struct spi_device *spi, struct spi_transfer *t) { int ret; if (t->tx_buf) { spi_xcomm->buf[0] = SPI_XCOMM_CMD_WRITE; memcpy(spi_xcomm->buf + 1, t->tx_buf, t->len); ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1); if (ret < 0) return ret; else if (ret != t->len + 1) return -EIO; } else if (t->rx_buf) { ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len); if (ret < 0) return ret; else if (ret != t->len) return -EIO; } return t->len; } static int spi_xcomm_transfer_one(struct spi_master *master, struct spi_message *msg) { struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master); unsigned int settings = spi_xcomm->settings; struct spi_device *spi = msg->spi; unsigned cs_change = 0; struct spi_transfer *t; bool is_first = true; int status = 0; bool is_last; is_first = true; spi_xcomm_chipselect(spi_xcomm, spi, true); list_for_each_entry(t, &msg->transfers, transfer_list) { if (!t->tx_buf && !t->rx_buf && t->len) { status = -EINVAL; break; } status = spi_xcomm_setup_transfer(spi_xcomm, spi, t, &settings); if (status < 0) break; is_last = list_is_last(&t->transfer_list, &msg->transfers); cs_change = t->cs_change; if (cs_change ^ is_last) settings |= BIT(5); else settings &= ~BIT(5); if (t->rx_buf) { spi_xcomm->settings = settings; status = spi_xcomm_sync_config(spi_xcomm, t->len); if (status < 0) break; } else if (settings != spi_xcomm->settings || is_first) { spi_xcomm->settings = settings; status = spi_xcomm_sync_config(spi_xcomm, 0); if (status < 0) break; } if (t->len) { status = spi_xcomm_txrx_bufs(spi_xcomm, spi, t); if (status < 0) break; if (status > 0) msg->actual_length += status; } status = 0; if (t->delay_usecs) udelay(t->delay_usecs); is_first = false; } if (status != 0 || !cs_change) spi_xcomm_chipselect(spi_xcomm, spi, false); msg->status = status; spi_finalize_current_message(master); return status; } static int spi_xcomm_setup(struct spi_device *spi) { if (spi->bits_per_word != 8) return -EINVAL; return 0; } static int spi_xcomm_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct spi_xcomm *spi_xcomm; struct spi_master *master; int ret; master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm)); if (!master) return -ENOMEM; spi_xcomm = spi_master_get_devdata(master); spi_xcomm->i2c = i2c; master->num_chipselect = 16; master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE; master->flags = SPI_MASTER_HALF_DUPLEX; master->setup = spi_xcomm_setup; master->transfer_one_message = spi_xcomm_transfer_one; master->dev.of_node = i2c->dev.of_node; i2c_set_clientdata(i2c, master); ret = spi_register_master(master); if (ret < 0) spi_master_put(master); return ret; } static int spi_xcomm_remove(struct i2c_client *i2c) { struct spi_master *master = i2c_get_clientdata(i2c); spi_unregister_master(master); return 0; } static const struct i2c_device_id spi_xcomm_ids[] = { { "spi-xcomm" }, { }, }; static struct i2c_driver spi_xcomm_driver = { .driver = { .name = "spi-xcomm", .owner = THIS_MODULE, }, .id_table = spi_xcomm_ids, .probe = spi_xcomm_probe, .remove = spi_xcomm_remove, }; module_i2c_driver(spi_xcomm_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver");
gpl-2.0
daishi4u/J7_Afterburner
fs/udf/file.c
2213
7027
/* * file.c * * PURPOSE * File handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998-1999 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 10/02/98 dgb Attempt to integrate into udf.o * 10/07/98 Switched to using generic_readpage, etc., like isofs * And it works! * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but * ICBTAG_FLAG_AD_IN_ICB. * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c * 05/12/99 Preliminary file write support */ #include "udfdecl.h" #include <linux/fs.h> #include <asm/uaccess.h> #include <linux/kernel.h> #include <linux/string.h> /* memset */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/aio.h> #include "udf_i.h" #include "udf_sb.h" static void __udf_adinicb_readpage(struct page *page) { struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); kaddr = kmap(page); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); } static int udf_adinicb_readpage(struct file *file, struct page *page) { BUG_ON(!PageLocked(page)); __udf_adinicb_readpage(page); unlock_page(page); return 0; } static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); BUG_ON(!PageLocked(page)); kaddr = kmap(page); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); mark_inode_dirty(inode); SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; } static int udf_adinicb_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *page; if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) return -EIO; page = grab_cache_page_write_begin(mapping, 0, flags); if (!page) return -ENOMEM; *pagep = page; if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) __udf_adinicb_readpage(page); return 0; } static int udf_adinicb_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; unsigned offset = pos & (PAGE_CACHE_SIZE - 1); char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); kaddr = kmap_atomic(page); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, kaddr + offset, copied); kunmap_atomic(kaddr); return simple_write_end(file, mapping, pos, len, copied, page, fsdata); } static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { /* Fallback to buffered I/O. */ return 0; } const struct address_space_operations udf_adinicb_aops = { .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, .write_begin = udf_adinicb_write_begin, .write_end = udf_adinicb_write_end, .direct_IO = udf_adinicb_direct_IO, }; static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t ppos) { ssize_t retval; struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); int err, pos; size_t count = iocb->ki_left; struct udf_inode_info *iinfo = UDF_I(inode); down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (file->f_flags & O_APPEND) pos = inode->i_size; else pos = ppos; if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + pos + count)) { err = udf_expand_file_adinicb(inode); if (err) { udf_debug("udf_expand_adinicb: err=%d\n", err); return err; } } else { if (pos + count > inode->i_size) iinfo->i_lenAlloc = pos + count; else iinfo->i_lenAlloc = inode->i_size; up_write(&iinfo->i_data_sem); } } else up_write(&iinfo->i_data_sem); retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); if (retval > 0) mark_inode_dirty(inode); return retval; } long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); long old_block, new_block; int result = -EINVAL; if (inode_permission(inode, MAY_READ) != 0) { udf_debug("no permission to access inode %lu\n", inode->i_ino); result = -EPERM; goto out; } if (!arg) { udf_debug("invalid argument to udf_ioctl\n"); result = -EINVAL; goto out; } switch (cmd) { case UDF_GETVOLIDENT: if (copy_to_user((char __user *)arg, UDF_SB(inode->i_sb)->s_volume_ident, 32)) result = -EFAULT; else result = 0; goto out; case UDF_RELOCATE_BLOCKS: if (!capable(CAP_SYS_ADMIN)) { result = -EPERM; goto out; } if (get_user(old_block, (long __user *)arg)) { result = -EFAULT; goto out; } result = udf_relocate_blocks(inode->i_sb, old_block, &new_block); if (result == 0) result = put_user(new_block, (long __user *)arg); goto out; case UDF_GETEASIZE: result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg); goto out; case UDF_GETEABLOCK: result = copy_to_user((char __user *)arg, UDF_I(inode)->i_ext.i_data, UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0; goto out; } out: return result; } static int udf_release_file(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE) { down_write(&UDF_I(inode)->i_data_sem); udf_discard_prealloc(inode); udf_truncate_tail_extent(inode); up_write(&UDF_I(inode)->i_data_sem); } return 0; } const struct file_operations udf_file_operations = { .read = do_sync_read, .aio_read = generic_file_aio_read, .unlocked_ioctl = udf_ioctl, .open = generic_file_open, .mmap = generic_file_mmap, .write = do_sync_write, .aio_write = udf_file_aio_write, .release = udf_release_file, .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, .llseek = generic_file_llseek, }; static int udf_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error; error = inode_change_ok(inode, attr); if (error) return error; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = udf_setsize(inode, attr->ia_size); if (error) return error; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } const struct inode_operations udf_file_inode_operations = { .setattr = udf_setattr, };
gpl-2.0
Snuzzo/vigor_aosp_kernel
arch/ia64/hp/sim/simserial.c
2981
24227
/* * Simulated Serial Driver (fake serial) * * This driver is mostly used for bringup purposes and will go away. * It has a strong dependency on the system console. All outputs * are rerouted to the same facility as the one used by printk which, in our * case means sys_sim.c console (goes via the simulator). The code hereafter * is completely leveraged from the serial.c driver. * * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. * 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking */ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/console.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/serialP.h> #include <linux/sysrq.h> #include <asm/irq.h> #include <asm/hw_irq.h> #include <asm/uaccess.h> #undef SIMSERIAL_DEBUG /* define this to get some debug information */ #define KEYBOARD_INTR 3 /* must match with simulator! */ #define NR_PORTS 1 /* only one port for now */ #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED) #define SSC_GETCHAR 21 extern long ia64_ssc (long, long, long, long, int); extern void ia64_ssc_connect_irq (long intr, long irq); static char *serial_name = "SimSerial driver"; static char *serial_version = "0.6"; /* * This has been extracted from asm/serial.h. We need one eventually but * I don't know exactly what we're going to put in it so just fake one * for now. */ #define BASE_BAUD ( 1843200 / 16 ) #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) /* * Most of the values here are meaningless to this particular driver. * However some values must be preserved for the code (leveraged from serial.c * to work correctly). * port must not be 0 * type must not be UNKNOWN * So I picked arbitrary (guess from where?) values instead */ static struct serial_state rs_table[NR_PORTS]={ /* UART CLK PORT IRQ FLAGS */ { 0, BASE_BAUD, 0x3F8, 0, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */ }; /* * Just for the fun of it ! */ static struct serial_uart_config uart_config[] = { { "unknown", 1, 0 }, { "8250", 1, 0 }, { "16450", 1, 0 }, { "16550", 1, 0 }, { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO }, { "cirrus", 1, 0 }, { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH }, { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH }, { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO}, { NULL, 0} }; struct tty_driver *hp_simserial_driver; static struct async_struct *IRQ_ports[NR_IRQS]; static struct console *console; static unsigned char *tmp_buf; extern struct console *console_drivers; /* from kernel/printk.c */ /* * ------------------------------------------------------------ * rs_stop() and rs_start() * * This routines are called before setting or resetting tty->stopped. * They enable or disable transmitter interrupts, as necessary. * ------------------------------------------------------------ */ static void rs_stop(struct tty_struct *tty) { #ifdef SIMSERIAL_DEBUG printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", tty->stopped, tty->hw_stopped, tty->flow_stopped); #endif } static void rs_start(struct tty_struct *tty) { #ifdef SIMSERIAL_DEBUG printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", tty->stopped, tty->hw_stopped, tty->flow_stopped); #endif } static void receive_chars(struct tty_struct *tty) { unsigned char ch; static unsigned char seen_esc = 0; while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { if ( ch == 27 && seen_esc == 0 ) { seen_esc = 1; continue; } else { if ( seen_esc==1 && ch == 'O' ) { seen_esc = 2; continue; } else if ( seen_esc == 2 ) { if ( ch == 'P' ) /* F1 */ show_state(); #ifdef CONFIG_MAGIC_SYSRQ if ( ch == 'S' ) { /* F4 */ do ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR); while (!ch); handle_sysrq(ch); } #endif seen_esc = 0; continue; } } seen_esc = 0; if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0) break; } tty_flip_buffer_push(tty); } /* * This is the serial driver's interrupt routine for a single port */ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) { struct async_struct * info; /* * I don't know exactly why they don't use the dev_id opaque data * pointer instead of this extra lookup table */ info = IRQ_ports[irq]; if (!info || !info->tty) { printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info); return IRQ_NONE; } /* * pretty simple in our case, because we only get interrupts * on inbound traffic */ receive_chars(info->tty); return IRQ_HANDLED; } /* * ------------------------------------------------------------------- * Here ends the serial interrupt routines. * ------------------------------------------------------------------- */ static void do_softint(struct work_struct *private_) { printk(KERN_ERR "simserial: do_softint called\n"); } static int rs_put_char(struct tty_struct *tty, unsigned char ch) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (!tty || !info->xmit.buf) return 0; local_irq_save(flags); if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { local_irq_restore(flags); return 0; } info->xmit.buf[info->xmit.head] = ch; info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); local_irq_restore(flags); return 1; } static void transmit_chars(struct async_struct *info, int *intr_done) { int count; unsigned long flags; local_irq_save(flags); if (info->x_char) { char c = info->x_char; console->write(console, &c, 1); info->state->icount.tx++; info->x_char = 0; goto out; } if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) { #ifdef SIMSERIAL_DEBUG printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", info->xmit.head, info->xmit.tail, info->tty->stopped); #endif goto out; } /* * We removed the loop and try to do it in to chunks. We need * 2 operations maximum because it's a ring buffer. * * First from current to tail if possible. * Then from the beginning of the buffer until necessary */ count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), SERIAL_XMIT_SIZE - info->xmit.tail); console->write(console, info->xmit.buf+info->xmit.tail, count); info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1); /* * We have more at the beginning of the buffer */ count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (count) { console->write(console, info->xmit.buf, count); info->xmit.tail += count; } out: local_irq_restore(flags); } static void rs_flush_chars(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || !info->xmit.buf) return; transmit_chars(info, NULL); } static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count) { int c, ret = 0; struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (!tty || !info->xmit.buf || !tmp_buf) return 0; local_irq_save(flags); while (1) { c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (count < c) c = count; if (c <= 0) { break; } memcpy(info->xmit.buf + info->xmit.head, buf, c); info->xmit.head = ((info->xmit.head + c) & (SERIAL_XMIT_SIZE-1)); buf += c; count -= c; ret += c; } local_irq_restore(flags); /* * Hey, we transmit directly from here in our case */ if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && !tty->stopped && !tty->hw_stopped) { transmit_chars(info, NULL); } return ret; } static int rs_write_room(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } static int rs_chars_in_buffer(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } static void rs_flush_buffer(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; local_irq_save(flags); info->xmit.head = info->xmit.tail = 0; local_irq_restore(flags); tty_wakeup(tty); } /* * This function is used to send a high-priority XON/XOFF character to * the device */ static void rs_send_xchar(struct tty_struct *tty, char ch) { struct async_struct *info = (struct async_struct *)tty->driver_data; info->x_char = ch; if (ch) { /* * I guess we could call console->write() directly but * let's do that for now. */ transmit_chars(info, NULL); } } /* * ------------------------------------------------------------ * rs_throttle() * * This routine is called by the upper-layer tty layer to signal that * incoming characters should be throttled. * ------------------------------------------------------------ */ static void rs_throttle(struct tty_struct * tty) { if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); printk(KERN_INFO "simrs_throttle called\n"); } static void rs_unthrottle(struct tty_struct * tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else rs_send_xchar(tty, START_CHAR(tty)); } printk(KERN_INFO "simrs_unthrottle called\n"); } static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && (cmd != TIOCMIWAIT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case TIOCGSERIAL: printk(KERN_INFO "simrs_ioctl TIOCGSERIAL called\n"); return 0; case TIOCSSERIAL: printk(KERN_INFO "simrs_ioctl TIOCSSERIAL called\n"); return 0; case TIOCSERCONFIG: printk(KERN_INFO "rs_ioctl: TIOCSERCONFIG called\n"); return -EINVAL; case TIOCSERGETLSR: /* Get line status register */ printk(KERN_INFO "rs_ioctl: TIOCSERGETLSR called\n"); return -EINVAL; case TIOCSERGSTRUCT: printk(KERN_INFO "rs_ioctl: TIOCSERGSTRUCT called\n"); #if 0 if (copy_to_user((struct async_struct *) arg, info, sizeof(struct async_struct))) return -EFAULT; #endif return 0; /* * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ case TIOCMIWAIT: printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n"); return 0; case TIOCSERGWILD: case TIOCSERSWILD: /* "setserial -W" is called in Debian boot */ printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n"); return 0; default: return -ENOIOCTLCMD; } return 0; } #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; rs_start(tty); } } /* * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. */ static void shutdown(struct async_struct * info) { unsigned long flags; struct serial_state *state; int retval; if (!(info->flags & ASYNC_INITIALIZED)) return; state = info->state; #ifdef SIMSERIAL_DEBUG printk("Shutting down serial port %d (irq %d)....", info->line, state->irq); #endif local_irq_save(flags); { /* * First unlink the serial port from the IRQ chain... */ if (info->next_port) info->next_port->prev_port = info->prev_port; if (info->prev_port) info->prev_port->next_port = info->next_port; else IRQ_ports[state->irq] = info->next_port; /* * Free the IRQ, if necessary */ if (state->irq && (!IRQ_ports[state->irq] || !IRQ_ports[state->irq]->next_port)) { if (IRQ_ports[state->irq]) { free_irq(state->irq, NULL); retval = request_irq(state->irq, rs_interrupt_single, IRQ_T(info), "serial", NULL); if (retval) printk(KERN_ERR "serial shutdown: request_irq: error %d" " Couldn't reacquire IRQ.\n", retval); } else free_irq(state->irq, NULL); } if (info->xmit.buf) { free_page((unsigned long) info->xmit.buf); info->xmit.buf = NULL; } if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); info->flags &= ~ASYNC_INITIALIZED; } local_irq_restore(flags); } /* * ------------------------------------------------------------ * rs_close() * * This routine is called when the serial port gets closed. First, we * wait for the last remaining data to be sent. Then, we unlink its * async structure from the interrupt chain if necessary, and we free * that IRQ if nothing is left in the chain. * ------------------------------------------------------------ */ static void rs_close(struct tty_struct *tty, struct file * filp) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct serial_state *state; unsigned long flags; if (!info ) return; state = info->state; local_irq_save(flags); if (tty_hung_up_p(filp)) { #ifdef SIMSERIAL_DEBUG printk("rs_close: hung_up\n"); #endif local_irq_restore(flags); return; } #ifdef SIMSERIAL_DEBUG printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif if ((tty->count == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, " "state->count is %d\n", state->count); state->count = 1; } if (--state->count < 0) { printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n", info->line, state->count); state->count = 0; } if (state->count) { local_irq_restore(flags); return; } info->flags |= ASYNC_CLOSING; local_irq_restore(flags); /* * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ shutdown(info); rs_flush_buffer(tty); tty_ldisc_flush(tty); info->event = 0; info->tty = NULL; if (info->blocked_open) { if (info->close_delay) schedule_timeout_interruptible(info->close_delay); wake_up_interruptible(&info->open_wait); } info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&info->close_wait); } /* * rs_wait_until_sent() --- wait until the transmitter is empty */ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) { } /* * rs_hangup() --- called by tty_hangup() when a hangup is signaled. */ static void rs_hangup(struct tty_struct *tty) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct serial_state *state = info->state; #ifdef SIMSERIAL_DEBUG printk("rs_hangup: called\n"); #endif state = info->state; rs_flush_buffer(tty); if (info->flags & ASYNC_CLOSING) return; shutdown(info); info->event = 0; state->count = 0; info->flags &= ~ASYNC_NORMAL_ACTIVE; info->tty = NULL; wake_up_interruptible(&info->open_wait); } static int get_async_struct(int line, struct async_struct **ret_info) { struct async_struct *info; struct serial_state *sstate; sstate = rs_table + line; sstate->count++; if (sstate->info) { *ret_info = sstate->info; return 0; } info = kzalloc(sizeof(struct async_struct), GFP_KERNEL); if (!info) { sstate->count--; return -ENOMEM; } init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); init_waitqueue_head(&info->delta_msr_wait); info->magic = SERIAL_MAGIC; info->port = sstate->port; info->flags = sstate->flags; info->xmit_fifo_size = sstate->xmit_fifo_size; info->line = line; INIT_WORK(&info->work, do_softint); info->state = sstate; if (sstate->info) { kfree(info); *ret_info = sstate->info; return 0; } *ret_info = sstate->info = info; return 0; } static int startup(struct async_struct *info) { unsigned long flags; int retval=0; irq_handler_t handler; struct serial_state *state= info->state; unsigned long page; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; local_irq_save(flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); goto errout; } if (!state->port || !state->type) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); free_page(page); goto errout; } if (info->xmit.buf) free_page(page); else info->xmit.buf = (unsigned char *) page; #ifdef SIMSERIAL_DEBUG printk("startup: ttys%d (irq %d)...", info->line, state->irq); #endif /* * Allocate the IRQ if necessary */ if (state->irq && (!IRQ_ports[state->irq] || !IRQ_ports[state->irq]->next_port)) { if (IRQ_ports[state->irq]) { retval = -EBUSY; goto errout; } else handler = rs_interrupt_single; retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL); if (retval) { if (capable(CAP_SYS_ADMIN)) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); retval = 0; } goto errout; } } /* * Insert serial port into IRQ chain. */ info->prev_port = NULL; info->next_port = IRQ_ports[state->irq]; if (info->next_port) info->next_port->prev_port = info; IRQ_ports[state->irq] = info; if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); info->xmit.head = info->xmit.tail = 0; #if 0 /* * Set up serial timers... */ timer_table[RS_TIMER].expires = jiffies + 2*HZ/100; timer_active |= 1 << RS_TIMER; #endif /* * Set up the tty->alt_speed kludge */ if (info->tty) { if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) info->tty->alt_speed = 57600; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) info->tty->alt_speed = 115200; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) info->tty->alt_speed = 230400; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) info->tty->alt_speed = 460800; } info->flags |= ASYNC_INITIALIZED; local_irq_restore(flags); return 0; errout: local_irq_restore(flags); return retval; } /* * This routine is called whenever a serial port is opened. It * enables interrupts for a serial port, linking in its async structure into * the IRQ chain. It also performs the serial-specific * initialization for the tty structure. */ static int rs_open(struct tty_struct *tty, struct file * filp) { struct async_struct *info; int retval, line; unsigned long page; line = tty->index; if ((line < 0) || (line >= NR_PORTS)) return -ENODEV; retval = get_async_struct(line, &info); if (retval) return retval; tty->driver_data = info; info->tty = tty; #ifdef SIMSERIAL_DEBUG printk("rs_open %s, count = %d\n", tty->name, info->state->count); #endif info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; if (!tmp_buf) { page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; if (tmp_buf) free_page(page); else tmp_buf = (unsigned char *) page; } /* * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { if (info->flags & ASYNC_CLOSING) interruptible_sleep_on(&info->close_wait); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); #else return -EAGAIN; #endif } /* * Start up serial port */ retval = startup(info); if (retval) { return retval; } /* * figure out which console to use (should be one already) */ console = console_drivers; while (console) { if ((console->flags & CON_ENABLED) && console->write) break; console = console->next; } #ifdef SIMSERIAL_DEBUG printk("rs_open ttys%d successful\n", info->line); #endif return 0; } /* * /proc fs routines.... */ static inline void line_info(struct seq_file *m, struct serial_state *state) { seq_printf(m, "%d: uart:%s port:%lX irq:%d\n", state->line, uart_config[state->type].name, state->port, state->irq); } static int rs_proc_show(struct seq_file *m, void *v) { int i; seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version); for (i = 0; i < NR_PORTS; i++) line_info(m, &rs_table[i]); return 0; } static int rs_proc_open(struct inode *inode, struct file *file) { return single_open(file, rs_proc_show, NULL); } static const struct file_operations rs_proc_fops = { .owner = THIS_MODULE, .open = rs_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * --------------------------------------------------------------------- * rs_init() and friends * * rs_init() is called at boot-time to initialize the serial driver. * --------------------------------------------------------------------- */ /* * This routine prints out the appropriate serial driver version * number, and identifies which options were configured into this * driver. */ static inline void show_serial_version(void) { printk(KERN_INFO "%s version %s with", serial_name, serial_version); printk(KERN_INFO " no serial options enabled\n"); } static const struct tty_operations hp_ops = { .open = rs_open, .close = rs_close, .write = rs_write, .put_char = rs_put_char, .flush_chars = rs_flush_chars, .write_room = rs_write_room, .chars_in_buffer = rs_chars_in_buffer, .flush_buffer = rs_flush_buffer, .ioctl = rs_ioctl, .throttle = rs_throttle, .unthrottle = rs_unthrottle, .send_xchar = rs_send_xchar, .set_termios = rs_set_termios, .stop = rs_stop, .start = rs_start, .hangup = rs_hangup, .wait_until_sent = rs_wait_until_sent, .proc_fops = &rs_proc_fops, }; /* * The serial driver boot-time initialization code! */ static int __init simrs_init (void) { int i, rc; struct serial_state *state; if (!ia64_platform_is("hpsim")) return -ENODEV; hp_simserial_driver = alloc_tty_driver(1); if (!hp_simserial_driver) return -ENOMEM; show_serial_version(); /* Initialize the tty_driver structure */ hp_simserial_driver->owner = THIS_MODULE; hp_simserial_driver->driver_name = "simserial"; hp_simserial_driver->name = "ttyS"; hp_simserial_driver->major = TTY_MAJOR; hp_simserial_driver->minor_start = 64; hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL; hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL; hp_simserial_driver->init_termios = tty_std_termios; hp_simserial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hp_simserial_driver, &hp_ops); /* * Let's have a little bit of fun ! */ for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { if (state->type == PORT_UNKNOWN) continue; if (!state->irq) { if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) panic("%s: out of interrupt vectors!\n", __func__); state->irq = rc; ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); } printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n", state->line, state->port, state->irq, uart_config[state->type].name); } if (tty_register_driver(hp_simserial_driver)) panic("Couldn't register simserial driver\n"); return 0; } #ifndef MODULE __initcall(simrs_init); #endif
gpl-2.0
htc-mirror/jewel-ics-crc-3.0.8-3fd0422
arch/arm/mach-ks8695/irq.c
2981
4284
/* * arch/arm/mach-ks8695/irq.c * * Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk> * Copyright (C) 2006 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/sysdev.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/regs-irq.h> #include <mach/regs-gpio.h> static void ks8695_irq_mask(struct irq_data *d) { unsigned long inten; inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN); inten &= ~(1 << d->irq); __raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN); } static void ks8695_irq_unmask(struct irq_data *d) { unsigned long inten; inten = __raw_readl(KS8695_IRQ_VA + KS8695_INTEN); inten |= (1 << d->irq); __raw_writel(inten, KS8695_IRQ_VA + KS8695_INTEN); } static void ks8695_irq_ack(struct irq_data *d) { __raw_writel((1 << d->irq), KS8695_IRQ_VA + KS8695_INTST); } static struct irq_chip ks8695_irq_level_chip; static struct irq_chip ks8695_irq_edge_chip; static int ks8695_irq_set_type(struct irq_data *d, unsigned int type) { unsigned long ctrl, mode; unsigned short level_triggered = 0; ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC); switch (type) { case IRQ_TYPE_LEVEL_HIGH: mode = IOPC_TM_HIGH; level_triggered = 1; break; case IRQ_TYPE_LEVEL_LOW: mode = IOPC_TM_LOW; level_triggered = 1; break; case IRQ_TYPE_EDGE_RISING: mode = IOPC_TM_RISING; break; case IRQ_TYPE_EDGE_FALLING: mode = IOPC_TM_FALLING; break; case IRQ_TYPE_EDGE_BOTH: mode = IOPC_TM_EDGE; break; default: return -EINVAL; } switch (d->irq) { case KS8695_IRQ_EXTERN0: ctrl &= ~IOPC_IOEINT0TM; ctrl |= IOPC_IOEINT0_MODE(mode); break; case KS8695_IRQ_EXTERN1: ctrl &= ~IOPC_IOEINT1TM; ctrl |= IOPC_IOEINT1_MODE(mode); break; case KS8695_IRQ_EXTERN2: ctrl &= ~IOPC_IOEINT2TM; ctrl |= IOPC_IOEINT2_MODE(mode); break; case KS8695_IRQ_EXTERN3: ctrl &= ~IOPC_IOEINT3TM; ctrl |= IOPC_IOEINT3_MODE(mode); break; default: return -EINVAL; } if (level_triggered) { irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip, handle_level_irq); } else { irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip, handle_edge_irq); } __raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC); return 0; } static struct irq_chip ks8695_irq_level_chip = { .irq_ack = ks8695_irq_mask, .irq_mask = ks8695_irq_mask, .irq_unmask = ks8695_irq_unmask, .irq_set_type = ks8695_irq_set_type, }; static struct irq_chip ks8695_irq_edge_chip = { .irq_ack = ks8695_irq_ack, .irq_mask = ks8695_irq_mask, .irq_unmask = ks8695_irq_unmask, .irq_set_type = ks8695_irq_set_type, }; void __init ks8695_init_irq(void) { unsigned int irq; /* Disable all interrupts initially */ __raw_writel(0, KS8695_IRQ_VA + KS8695_INTMC); __raw_writel(0, KS8695_IRQ_VA + KS8695_INTEN); for (irq = 0; irq < NR_IRQS; irq++) { switch (irq) { /* Level-triggered interrupts */ case KS8695_IRQ_BUS_ERROR: case KS8695_IRQ_UART_MODEM_STATUS: case KS8695_IRQ_UART_LINE_STATUS: case KS8695_IRQ_UART_RX: case KS8695_IRQ_COMM_TX: case KS8695_IRQ_COMM_RX: irq_set_chip_and_handler(irq, &ks8695_irq_level_chip, handle_level_irq); break; /* Edge-triggered interrupts */ default: /* clear pending bit */ ks8695_irq_ack(irq_get_irq_data(irq)); irq_set_chip_and_handler(irq, &ks8695_irq_edge_chip, handle_edge_irq); } set_irq_flags(irq, IRQF_VALID); } }
gpl-2.0
beats4x/kernel_lge_g3-v10m
arch/arm/mach-at91/at91sam926x_time.c
4773
6394
/* * at91sam926x_time.c - Periodic Interval Timer (PIT) for at91sam926x * * Copyright (C) 2005-2006 M. Amine SAYA, ATMEL Rousset, France * Revision 2005 M. Nicolas Diremdjian, ATMEL Rousset, France * Converted to ClockSource/ClockEvents by David Brownell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/mach/time.h> #include <mach/at91_pit.h> #define PIT_CPIV(x) ((x) & AT91_PIT_CPIV) #define PIT_PICNT(x) (((x) & AT91_PIT_PICNT) >> 20) static u32 pit_cycle; /* write-once */ static u32 pit_cnt; /* access only w/system irq blocked */ static void __iomem *pit_base_addr __read_mostly; static inline unsigned int pit_read(unsigned int reg_offset) { return __raw_readl(pit_base_addr + reg_offset); } static inline void pit_write(unsigned int reg_offset, unsigned long value) { __raw_writel(value, pit_base_addr + reg_offset); } /* * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ static cycle_t read_pit_clk(struct clocksource *cs) { unsigned long flags; u32 elapsed; u32 t; raw_local_irq_save(flags); elapsed = pit_cnt; t = pit_read(AT91_PIT_PIIR); raw_local_irq_restore(flags); elapsed += PIT_PICNT(t) * pit_cycle; elapsed += PIT_CPIV(t); return elapsed; } static struct clocksource pit_clk = { .name = "pit", .rating = 175, .read = read_pit_clk, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ static void pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* update clocksource counter */ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN); break; case CLOCK_EVT_MODE_ONESHOT: BUG(); /* FALLTHROUGH */ case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: /* disable irq, leaving the clocksource active */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); break; case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device pit_clkevt = { .name = "pit", .features = CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 100, .set_mode = pit_clkevt_mode, }; /* * IRQ handler for the timer. */ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) { /* * irqs should be disabled here, but as the irq is shared they are only * guaranteed to be off if the timer irq is registered first. */ WARN_ON_ONCE(!irqs_disabled()); /* The PIT interrupt may be disabled, and is shared */ if ((pit_clkevt.mode == CLOCK_EVT_MODE_PERIODIC) && (pit_read(AT91_PIT_SR) & AT91_PIT_PITS)) { unsigned nr_ticks; /* Get number of ticks performed before irq, and ack it */ nr_ticks = PIT_PICNT(pit_read(AT91_PIT_PIVR)); do { pit_cnt += pit_cycle; pit_clkevt.event_handler(&pit_clkevt); nr_ticks--; } while (nr_ticks); return IRQ_HANDLED; } return IRQ_NONE; } static struct irqaction at91sam926x_pit_irq = { .name = "at91_tick", .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = at91sam926x_pit_interrupt, .irq = AT91_ID_SYS, }; static void at91sam926x_pit_reset(void) { /* Disable timer and irqs */ pit_write(AT91_PIT_MR, 0); /* Clear any pending interrupts, wait for PIT to stop counting */ while (PIT_CPIV(pit_read(AT91_PIT_PIVR)) != 0) cpu_relax(); /* Start PIT but don't enable IRQ */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); } #ifdef CONFIG_OF static struct of_device_id pit_timer_ids[] = { { .compatible = "atmel,at91sam9260-pit" }, { /* sentinel */ } }; static int __init of_at91sam926x_pit_init(void) { struct device_node *np; int ret; np = of_find_matching_node(NULL, pit_timer_ids); if (!np) goto err; pit_base_addr = of_iomap(np, 0); if (!pit_base_addr) goto node_err; /* Get the interrupts property */ ret = irq_of_parse_and_map(np, 0); if (!ret) { pr_crit("AT91: PIT: Unable to get IRQ from DT\n"); goto ioremap_err; } at91sam926x_pit_irq.irq = ret; of_node_put(np); return 0; ioremap_err: iounmap(pit_base_addr); node_err: of_node_put(np); err: return -EINVAL; } #else static int __init of_at91sam926x_pit_init(void) { return -EINVAL; } #endif /* * Set up both clocksource and clockevent support. */ static void __init at91sam926x_pit_init(void) { unsigned long pit_rate; unsigned bits; int ret; /* For device tree enabled device: initialize here */ of_at91sam926x_pit_init(); /* * Use our actual MCK to figure out how many MCK/16 ticks per * 1/HZ period (instead of a compile-time constant LATCH). */ pit_rate = clk_get_rate(clk_get(NULL, "mck")) / 16; pit_cycle = (pit_rate + HZ/2) / HZ; WARN_ON(((pit_cycle - 1) & ~AT91_PIT_PIV) != 0); /* Initialize and enable the timer */ at91sam926x_pit_reset(); /* * Register clocksource. The high order bits of PIV are unused, * so this isn't a 32-bit counter unless we get clockevent irqs. */ bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */; pit_clk.mask = CLOCKSOURCE_MASK(bits); clocksource_register_hz(&pit_clk, pit_rate); /* Set up irq handler */ ret = setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); if (ret) pr_crit("AT91: PIT: Unable to setup IRQ\n"); /* Set up and register clockevents */ pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); pit_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&pit_clkevt); } static void at91sam926x_pit_suspend(void) { /* Disable timer */ pit_write(AT91_PIT_MR, 0); } void __init at91sam926x_ioremap_pit(u32 addr) { #if defined(CONFIG_OF) struct device_node *np = of_find_matching_node(NULL, pit_timer_ids); if (np) { of_node_put(np); return; } #endif pit_base_addr = ioremap(addr, 16); if (!pit_base_addr) panic("Impossible to ioremap PIT\n"); } struct sys_timer at91sam926x_timer = { .init = at91sam926x_pit_init, .suspend = at91sam926x_pit_suspend, .resume = at91sam926x_pit_reset, };
gpl-2.0
huy1561998/android_kernel_lge_d722
drivers/media/dvb/siano/smssdio.c
5029
8350
/* * smssdio.c - Siano 1xxx SDIO interface driver * * Copyright 2008 Pierre Ossman * * Based on code by Siano Mobile Silicon, Inc., * Copyright (C) 2006-2008, Uri Shkolnik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * * This hardware is a bit odd in that all transfers should be done * to/from the SMSSDIO_DATA register, yet the "increase address" bit * always needs to be set. * * Also, buffers from the card are always aligned to 128 byte * boundaries. */ /* * General cleanup notes: * * - only typedefs should be name *_t * * - use ERR_PTR and friends for smscore_register_device() * * - smscore_getbuffer should zero fields * * Fix stop command */ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/module.h> #include "smscoreapi.h" #include "sms-cards.h" /* Registers */ #define SMSSDIO_DATA 0x00 #define SMSSDIO_INT 0x04 #define SMSSDIO_BLOCK_SIZE 128 static const struct sdio_device_id smssdio_ids[] __devinitconst = { {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), .driver_data = SMS1XXX_BOARD_SIANO_STELLAR}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0), .driver_data = SMS1XXX_BOARD_SIANO_NOVA_A}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0), .driver_data = SMS1XXX_BOARD_SIANO_NOVA_B}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0), .driver_data = SMS1XXX_BOARD_SIANO_VEGA}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE), .driver_data = SMS1XXX_BOARD_SIANO_VEGA}, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, smssdio_ids); struct smssdio_device { struct sdio_func *func; struct smscore_device_t *coredev; struct smscore_buffer_t *split_cb; }; /*******************************************************************/ /* Siano core callbacks */ /*******************************************************************/ static int smssdio_sendrequest(void *context, void *buffer, size_t size) { int ret = 0; struct smssdio_device *smsdev; smsdev = context; sdio_claim_host(smsdev->func); while (size >= smsdev->func->cur_blksize) { ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, buffer, smsdev->func->cur_blksize); if (ret) goto out; buffer += smsdev->func->cur_blksize; size -= smsdev->func->cur_blksize; } if (size) { ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, buffer, size); } out: sdio_release_host(smsdev->func); return ret; } /*******************************************************************/ /* SDIO callbacks */ /*******************************************************************/ static void smssdio_interrupt(struct sdio_func *func) { int ret, isr; struct smssdio_device *smsdev; struct smscore_buffer_t *cb; struct SmsMsgHdr_ST *hdr; size_t size; smsdev = sdio_get_drvdata(func); /* * The interrupt register has no defined meaning. It is just * a way of turning of the level triggered interrupt. */ isr = sdio_readb(func, SMSSDIO_INT, &ret); if (ret) { sms_err("Unable to read interrupt register!\n"); return; } if (smsdev->split_cb == NULL) { cb = smscore_getbuffer(smsdev->coredev); if (!cb) { sms_err("Unable to allocate data buffer!\n"); return; } ret = sdio_memcpy_fromio(smsdev->func, cb->p, SMSSDIO_DATA, SMSSDIO_BLOCK_SIZE); if (ret) { sms_err("Error %d reading initial block!\n", ret); return; } hdr = cb->p; if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) { smsdev->split_cb = cb; return; } if (hdr->msgLength > smsdev->func->cur_blksize) size = hdr->msgLength - smsdev->func->cur_blksize; else size = 0; } else { cb = smsdev->split_cb; hdr = cb->p; size = hdr->msgLength - sizeof(struct SmsMsgHdr_ST); smsdev->split_cb = NULL; } if (size) { void *buffer; buffer = cb->p + (hdr->msgLength - size); size = ALIGN(size, SMSSDIO_BLOCK_SIZE); BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); /* * First attempt to transfer all of it in one go... */ ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, size); if (ret && ret != -EINVAL) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading data from card!\n", ret); return; } /* * ..then fall back to one block at a time if that is * not possible... * * (we have to do this manually because of the * problem with the "increase address" bit) */ if (ret == -EINVAL) { while (size) { ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, smsdev->func->cur_blksize); if (ret) { smscore_putbuffer(smsdev->coredev, cb); sms_err("Error %d reading " "data from card!\n", ret); return; } buffer += smsdev->func->cur_blksize; if (size > smsdev->func->cur_blksize) size -= smsdev->func->cur_blksize; else size = 0; } } } cb->size = hdr->msgLength; cb->offset = 0; smscore_onresponse(smsdev->coredev, cb); } static int __devinit smssdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret; int board_id; struct smssdio_device *smsdev; struct smsdevice_params_t params; board_id = id->driver_data; smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL); if (!smsdev) return -ENOMEM; smsdev->func = func; memset(&params, 0, sizeof(struct smsdevice_params_t)); params.device = &func->dev; params.buffer_size = 0x5000; /* ?? */ params.num_buffers = 22; /* ?? */ params.context = smsdev; snprintf(params.devpath, sizeof(params.devpath), "sdio\\%s", sdio_func_id(func)); params.sendrequest_handler = smssdio_sendrequest; params.device_type = sms_get_board(board_id)->type; if (params.device_type != SMS_STELLAR) params.flags |= SMS_DEVICE_FAMILY2; else { /* * FIXME: Stellar needs special handling... */ ret = -ENODEV; goto free; } ret = smscore_register_device(&params, &smsdev->coredev); if (ret < 0) goto free; smscore_set_board_id(smsdev->coredev, board_id); sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) goto release; ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE); if (ret) goto disable; ret = sdio_claim_irq(func, smssdio_interrupt); if (ret) goto disable; sdio_set_drvdata(func, smsdev); sdio_release_host(func); ret = smscore_start_device(smsdev->coredev); if (ret < 0) goto reclaim; return 0; reclaim: sdio_claim_host(func); sdio_release_irq(func); disable: sdio_disable_func(func); release: sdio_release_host(func); smscore_unregister_device(smsdev->coredev); free: kfree(smsdev); return ret; } static void smssdio_remove(struct sdio_func *func) { struct smssdio_device *smsdev; smsdev = sdio_get_drvdata(func); /* FIXME: racy! */ if (smsdev->split_cb) smscore_putbuffer(smsdev->coredev, smsdev->split_cb); smscore_unregister_device(smsdev->coredev); sdio_claim_host(func); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); kfree(smsdev); } static struct sdio_driver smssdio_driver = { .name = "smssdio", .id_table = smssdio_ids, .probe = smssdio_probe, .remove = smssdio_remove, }; /*******************************************************************/ /* Module functions */ /*******************************************************************/ static int __init smssdio_module_init(void) { int ret = 0; printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n"); printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n"); ret = sdio_register_driver(&smssdio_driver); return ret; } static void __exit smssdio_module_exit(void) { sdio_unregister_driver(&smssdio_driver); } module_init(smssdio_module_init); module_exit(smssdio_module_exit); MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver"); MODULE_AUTHOR("Pierre Ossman"); MODULE_LICENSE("GPL");
gpl-2.0
spezi77/kernel_msm
fs/squashfs/inode.c
5029
12651
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * inode.c */ /* * This file implements code to create and read inodes from disk. * * Inodes in Squashfs are identified by a 48-bit inode which encodes the * location of the compressed metadata block containing the inode, and the byte * offset into that block where the inode is placed (<block, offset>). * * To maximise compression there are different inodes for each file type * (regular file, directory, device, etc.), the inode contents and length * varying with the type. * * To further maximise compression, two types of regular file inode and * directory inode are defined: inodes optimised for frequently occurring * regular files and directories, and extended types where extra * information has to be stored. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/xattr.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "xattr.h" /* * Initialise VFS inode with the base inode information common to all * Squashfs inode types. Sqsh_ino contains the unswapped base inode * off disk. */ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, struct squashfs_base_inode *sqsh_ino) { int err; err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &inode->i_uid); if (err) return err; err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &inode->i_gid); if (err) return err; inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime); inode->i_atime.tv_sec = inode->i_mtime.tv_sec; inode->i_ctime.tv_sec = inode->i_mtime.tv_sec; inode->i_mode = le16_to_cpu(sqsh_ino->mode); inode->i_size = 0; return err; } struct inode *squashfs_iget(struct super_block *sb, long long ino, unsigned int ino_number) { struct inode *inode = iget_locked(sb, ino_number); int err; TRACE("Entered squashfs_iget\n"); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; err = squashfs_read_inode(inode, ino); if (err) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } /* * Initialise VFS inode by reading inode from inode table (compressed * metadata). The format and amount of data read depends on type. */ int squashfs_read_inode(struct inode *inode, long long ino) { struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; int err, type, offset = SQUASHFS_INODE_OFFSET(ino); union squashfs_inode squashfs_ino; struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base; int xattr_id = SQUASHFS_INVALID_XATTR; TRACE("Entered squashfs_read_inode\n"); /* * Read inode base common to all inode types. */ err = squashfs_read_metadata(sb, sqshb_ino, &block, &offset, sizeof(*sqshb_ino)); if (err < 0) goto failed_read; err = squashfs_new_inode(sb, inode, sqshb_ino); if (err) goto failed_read; block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; offset = SQUASHFS_INODE_OFFSET(ino); type = le16_to_cpu(sqshb_ino->inode_type); switch (type) { case SQUASHFS_REG_TYPE: { unsigned int frag_offset, frag; int frag_size; u64 frag_blk; struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { err = frag_size; goto failed_read; } } else { frag_blk = SQUASHFS_INVALID_BLK; frag_size = 0; frag_offset = 0; } set_nlink(inode, 1); inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; squashfs_i(inode)->fragment_block = frag_blk; squashfs_i(inode)->fragment_size = frag_size; squashfs_i(inode)->fragment_offset = frag_offset; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, block, offset); break; } case SQUASHFS_LREG_TYPE: { unsigned int frag_offset, frag; int frag_size; u64 frag_blk; struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { err = frag_size; goto failed_read; } } else { frag_blk = SQUASHFS_INVALID_BLK; frag_size = 0; frag_offset = 0; } xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le64_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_inode_ops; inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = (inode->i_size - le64_to_cpu(sqsh_ino->sparse) + 511) >> 9; squashfs_i(inode)->fragment_block = frag_blk; squashfs_i(inode)->fragment_size = frag_size; squashfs_i(inode)->fragment_offset = frag_offset; squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, block, offset); break; } case SQUASHFS_DIR_TYPE: { struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le16_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_dir_inode_ops; inode->i_fop = &squashfs_dir_ops; inode->i_mode |= S_IFDIR; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); squashfs_i(inode)->dir_idx_cnt = 0; squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); TRACE("Directory inode %x:%x, start_block %llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, le16_to_cpu(sqsh_ino->offset)); break; } case SQUASHFS_LDIR_TYPE: { struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_dir_inode_ops; inode->i_fop = &squashfs_dir_ops; inode->i_mode |= S_IFDIR; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); squashfs_i(inode)->dir_idx_start = block; squashfs_i(inode)->dir_idx_offset = offset; squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count); squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); TRACE("Long directory inode %x:%x, start_block %llx, offset " "%x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, le16_to_cpu(sqsh_ino->offset)); break; } case SQUASHFS_SYMLINK_TYPE: case SQUASHFS_LSYMLINK_TYPE: { struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); inode->i_op = &squashfs_symlink_inode_ops; inode->i_data.a_ops = &squashfs_symlink_aops; inode->i_mode |= S_IFLNK; squashfs_i(inode)->start = block; squashfs_i(inode)->offset = offset; if (type == SQUASHFS_LSYMLINK_TYPE) { __le32 xattr; err = squashfs_read_metadata(sb, NULL, &block, &offset, inode->i_size); if (err < 0) goto failed_read; err = squashfs_read_metadata(sb, &xattr, &block, &offset, sizeof(xattr)); if (err < 0) goto failed_read; xattr_id = le32_to_cpu(xattr); } TRACE("Symbolic link inode %x:%x, start_block %llx, offset " "%x\n", SQUASHFS_INODE_BLK(ino), offset, block, offset); break; } case SQUASHFS_BLKDEV_TYPE: case SQUASHFS_CHRDEV_TYPE: { struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev; unsigned int rdev; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_CHRDEV_TYPE) inode->i_mode |= S_IFCHR; else inode->i_mode |= S_IFBLK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); break; } case SQUASHFS_LBLKDEV_TYPE: case SQUASHFS_LCHRDEV_TYPE: { struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev; unsigned int rdev; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_LCHRDEV_TYPE) inode->i_mode |= S_IFCHR; else inode->i_mode |= S_IFBLK; xattr_id = le32_to_cpu(sqsh_ino->xattr); inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); break; } case SQUASHFS_FIFO_TYPE: case SQUASHFS_SOCKET_TYPE: { struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_FIFO_TYPE) inode->i_mode |= S_IFIFO; else inode->i_mode |= S_IFSOCK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); break; } case SQUASHFS_LFIFO_TYPE: case SQUASHFS_LSOCKET_TYPE: { struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_LFIFO_TYPE) inode->i_mode |= S_IFIFO; else inode->i_mode |= S_IFSOCK; xattr_id = le32_to_cpu(sqsh_ino->xattr); inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); break; } default: ERROR("Unknown inode type %d in squashfs_iget!\n", type); return -EINVAL; } if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) { err = squashfs_xattr_lookup(sb, xattr_id, &squashfs_i(inode)->xattr_count, &squashfs_i(inode)->xattr_size, &squashfs_i(inode)->xattr); if (err < 0) goto failed_read; inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9) + 1; } else squashfs_i(inode)->xattr_count = 0; return 0; failed_read: ERROR("Unable to read inode 0x%llx\n", ino); return err; } const struct inode_operations squashfs_inode_ops = { .getxattr = generic_getxattr, .listxattr = squashfs_listxattr };
gpl-2.0
jongwonk/s5pv210_linux_kernel
drivers/media/dvb/pt1/va1j5jf8007s.c
5029
15464
/* * ISDB-S driver for VA1J5JF8007/VA1J5JF8011 * * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info> * * based on pt1dvr - http://pt1dvr.sourceforge.jp/ * by Tomoaki Ishikawa <tomy@users.sourceforge.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include "dvb_frontend.h" #include "va1j5jf8007s.h" enum va1j5jf8007s_tune_state { VA1J5JF8007S_IDLE, VA1J5JF8007S_SET_FREQUENCY_1, VA1J5JF8007S_SET_FREQUENCY_2, VA1J5JF8007S_SET_FREQUENCY_3, VA1J5JF8007S_CHECK_FREQUENCY, VA1J5JF8007S_SET_MODULATION, VA1J5JF8007S_CHECK_MODULATION, VA1J5JF8007S_SET_TS_ID, VA1J5JF8007S_CHECK_TS_ID, VA1J5JF8007S_TRACK, }; struct va1j5jf8007s_state { const struct va1j5jf8007s_config *config; struct i2c_adapter *adap; struct dvb_frontend fe; enum va1j5jf8007s_tune_state tune_state; }; static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr) { struct va1j5jf8007s_state *state; u8 addr; int i; u8 write_buf[1], read_buf[1]; struct i2c_msg msgs[2]; s32 word, x1, x2, x3, x4, x5, y; state = fe->demodulator_priv; addr = state->config->demod_address; word = 0; for (i = 0; i < 2; i++) { write_buf[0] = 0xbc + i; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; word <<= 8; word |= read_buf[0]; } word -= 3000; if (word < 0) word = 0; x1 = int_sqrt(word << 16) * ((15625ll << 21) / 1000000); x2 = (s64)x1 * x1 >> 31; x3 = (s64)x2 * x1 >> 31; x4 = (s64)x2 * x2 >> 31; x5 = (s64)x4 * x1 >> 31; y = (58857ll << 23) / 1000; y -= (s64)x1 * ((89565ll << 24) / 1000) >> 30; y += (s64)x2 * ((88977ll << 24) / 1000) >> 28; y -= (s64)x3 * ((50259ll << 25) / 1000) >> 27; y += (s64)x4 * ((14341ll << 27) / 1000) >> 27; y -= (s64)x5 * ((16346ll << 30) / 10000) >> 28; *snr = y < 0 ? 0 : y >> 15; return 0; } static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } static int va1j5jf8007s_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct va1j5jf8007s_state *state; state = fe->demodulator_priv; switch (state->tune_state) { case VA1J5JF8007S_IDLE: case VA1J5JF8007S_SET_FREQUENCY_1: case VA1J5JF8007S_SET_FREQUENCY_2: case VA1J5JF8007S_SET_FREQUENCY_3: case VA1J5JF8007S_CHECK_FREQUENCY: *status = 0; return 0; case VA1J5JF8007S_SET_MODULATION: case VA1J5JF8007S_CHECK_MODULATION: *status |= FE_HAS_SIGNAL; return 0; case VA1J5JF8007S_SET_TS_ID: case VA1J5JF8007S_CHECK_TS_ID: *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER; return 0; case VA1J5JF8007S_TRACK: *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_LOCK; return 0; } BUG(); } struct va1j5jf8007s_cb_map { u32 frequency; u8 cb; }; static const struct va1j5jf8007s_cb_map va1j5jf8007s_cb_maps[] = { { 986000, 0xb2 }, { 1072000, 0xd2 }, { 1154000, 0xe2 }, { 1291000, 0x20 }, { 1447000, 0x40 }, { 1615000, 0x60 }, { 1791000, 0x80 }, { 1972000, 0xa0 }, }; static u8 va1j5jf8007s_lookup_cb(u32 frequency) { int i; const struct va1j5jf8007s_cb_map *map; for (i = 0; i < ARRAY_SIZE(va1j5jf8007s_cb_maps); i++) { map = &va1j5jf8007s_cb_maps[i]; if (frequency < map->frequency) return map->cb; } return 0xc0; } static int va1j5jf8007s_set_frequency_1(struct va1j5jf8007s_state *state) { u32 frequency; u16 word; u8 buf[6]; struct i2c_msg msg; frequency = state->fe.dtv_property_cache.frequency; word = (frequency + 500) / 1000; if (frequency < 1072000) word = (word << 1 & ~0x1f) | (word & 0x0f); buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0x40 | word >> 8; buf[3] = word; buf[4] = 0xe0; buf[5] = va1j5jf8007s_lookup_cb(frequency); msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_set_frequency_2(struct va1j5jf8007s_state *state) { u8 buf[3]; struct i2c_msg msg; buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0xe4; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_set_frequency_3(struct va1j5jf8007s_state *state) { u32 frequency; u8 buf[4]; struct i2c_msg msg; frequency = state->fe.dtv_property_cache.frequency; buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0xf4; buf[3] = va1j5jf8007s_lookup_cb(frequency) | 0x4; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_check_frequency(struct va1j5jf8007s_state *state, int *lock) { u8 addr; u8 write_buf[2], read_buf[1]; struct i2c_msg msgs[2]; addr = state->config->demod_address; write_buf[0] = 0xfe; write_buf[1] = 0xc1; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; *lock = read_buf[0] & 0x40; return 0; } static int va1j5jf8007s_set_modulation(struct va1j5jf8007s_state *state) { u8 buf[2]; struct i2c_msg msg; buf[0] = 0x03; buf[1] = 0x01; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_check_modulation(struct va1j5jf8007s_state *state, int *lock) { u8 addr; u8 write_buf[1], read_buf[1]; struct i2c_msg msgs[2]; addr = state->config->demod_address; write_buf[0] = 0xc3; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; *lock = !(read_buf[0] & 0x10); return 0; } static int va1j5jf8007s_set_ts_id(struct va1j5jf8007s_state *state) { u32 ts_id; u8 buf[3]; struct i2c_msg msg; ts_id = state->fe.dtv_property_cache.isdbs_ts_id; if (!ts_id) return 0; buf[0] = 0x8f; buf[1] = ts_id >> 8; buf[2] = ts_id; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_check_ts_id(struct va1j5jf8007s_state *state, int *lock) { u8 addr; u8 write_buf[1], read_buf[2]; struct i2c_msg msgs[2]; u32 ts_id; ts_id = state->fe.dtv_property_cache.isdbs_ts_id; if (!ts_id) { *lock = 1; return 0; } addr = state->config->demod_address; write_buf[0] = 0xe6; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; *lock = (read_buf[0] << 8 | read_buf[1]) == ts_id; return 0; } static int va1j5jf8007s_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, unsigned int *delay, fe_status_t *status) { struct va1j5jf8007s_state *state; int ret; int lock = 0; state = fe->demodulator_priv; if (re_tune) state->tune_state = VA1J5JF8007S_SET_FREQUENCY_1; switch (state->tune_state) { case VA1J5JF8007S_IDLE: *delay = 3 * HZ; *status = 0; return 0; case VA1J5JF8007S_SET_FREQUENCY_1: ret = va1j5jf8007s_set_frequency_1(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_SET_FREQUENCY_2; *delay = 0; *status = 0; return 0; case VA1J5JF8007S_SET_FREQUENCY_2: ret = va1j5jf8007s_set_frequency_2(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_SET_FREQUENCY_3; *delay = (HZ + 99) / 100; *status = 0; return 0; case VA1J5JF8007S_SET_FREQUENCY_3: ret = va1j5jf8007s_set_frequency_3(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_CHECK_FREQUENCY; *delay = 0; *status = 0; return 0; case VA1J5JF8007S_CHECK_FREQUENCY: ret = va1j5jf8007s_check_frequency(state, &lock); if (ret < 0) return ret; if (!lock) { *delay = (HZ + 999) / 1000; *status = 0; return 0; } state->tune_state = VA1J5JF8007S_SET_MODULATION; *delay = 0; *status = FE_HAS_SIGNAL; return 0; case VA1J5JF8007S_SET_MODULATION: ret = va1j5jf8007s_set_modulation(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_CHECK_MODULATION; *delay = 0; *status = FE_HAS_SIGNAL; return 0; case VA1J5JF8007S_CHECK_MODULATION: ret = va1j5jf8007s_check_modulation(state, &lock); if (ret < 0) return ret; if (!lock) { *delay = (HZ + 49) / 50; *status = FE_HAS_SIGNAL; return 0; } state->tune_state = VA1J5JF8007S_SET_TS_ID; *delay = 0; *status = FE_HAS_SIGNAL | FE_HAS_CARRIER; return 0; case VA1J5JF8007S_SET_TS_ID: ret = va1j5jf8007s_set_ts_id(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_CHECK_TS_ID; return 0; case VA1J5JF8007S_CHECK_TS_ID: ret = va1j5jf8007s_check_ts_id(state, &lock); if (ret < 0) return ret; if (!lock) { *delay = (HZ + 99) / 100; *status = FE_HAS_SIGNAL | FE_HAS_CARRIER; return 0; } state->tune_state = VA1J5JF8007S_TRACK; /* fall through */ case VA1J5JF8007S_TRACK: *delay = 3 * HZ; *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_LOCK; return 0; } BUG(); } static int va1j5jf8007s_init_frequency(struct va1j5jf8007s_state *state) { u8 buf[4]; struct i2c_msg msg; buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0xf0; buf[3] = 0x04; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_set_sleep(struct va1j5jf8007s_state *state, int sleep) { u8 buf[2]; struct i2c_msg msg; buf[0] = 0x17; buf[1] = sleep ? 0x01 : 0x00; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_sleep(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; int ret; state = fe->demodulator_priv; ret = va1j5jf8007s_init_frequency(state); if (ret < 0) return ret; return va1j5jf8007s_set_sleep(state, 1); } static int va1j5jf8007s_init(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; state = fe->demodulator_priv; state->tune_state = VA1J5JF8007S_IDLE; return va1j5jf8007s_set_sleep(state, 0); } static void va1j5jf8007s_release(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops va1j5jf8007s_ops = { .delsys = { SYS_ISDBS }, .info = { .name = "VA1J5JF8007/VA1J5JF8011 ISDB-S", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, }, .read_snr = va1j5jf8007s_read_snr, .get_frontend_algo = va1j5jf8007s_get_frontend_algo, .read_status = va1j5jf8007s_read_status, .tune = va1j5jf8007s_tune, .sleep = va1j5jf8007s_sleep, .init = va1j5jf8007s_init, .release = va1j5jf8007s_release, }; static int va1j5jf8007s_prepare_1(struct va1j5jf8007s_state *state) { u8 addr; u8 write_buf[1], read_buf[1]; struct i2c_msg msgs[2]; addr = state->config->demod_address; write_buf[0] = 0x07; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; if (read_buf[0] != 0x41) return -EIO; return 0; } static const u8 va1j5jf8007s_20mhz_prepare_bufs[][2] = { {0x04, 0x02}, {0x0d, 0x55}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04}, {0x8e, 0x02}, {0xa3, 0xf7}, {0xa5, 0xc0}, }; static const u8 va1j5jf8007s_25mhz_prepare_bufs[][2] = { {0x04, 0x02}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04}, {0x8e, 0x26}, {0xa3, 0xf7}, {0xa5, 0xc0}, }; static int va1j5jf8007s_prepare_2(struct va1j5jf8007s_state *state) { const u8 (*bufs)[2]; int size; u8 addr; u8 buf[2]; struct i2c_msg msg; int i; switch (state->config->frequency) { case VA1J5JF8007S_20MHZ: bufs = va1j5jf8007s_20mhz_prepare_bufs; size = ARRAY_SIZE(va1j5jf8007s_20mhz_prepare_bufs); break; case VA1J5JF8007S_25MHZ: bufs = va1j5jf8007s_25mhz_prepare_bufs; size = ARRAY_SIZE(va1j5jf8007s_25mhz_prepare_bufs); break; default: return -EINVAL; } addr = state->config->demod_address; msg.addr = addr; msg.flags = 0; msg.len = 2; msg.buf = buf; for (i = 0; i < size; i++) { memcpy(buf, bufs[i], sizeof(buf)); if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; } return 0; } /* must be called after va1j5jf8007t_attach */ int va1j5jf8007s_prepare(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; int ret; state = fe->demodulator_priv; ret = va1j5jf8007s_prepare_1(state); if (ret < 0) return ret; ret = va1j5jf8007s_prepare_2(state); if (ret < 0) return ret; return va1j5jf8007s_init_frequency(state); } struct dvb_frontend * va1j5jf8007s_attach(const struct va1j5jf8007s_config *config, struct i2c_adapter *adap) { struct va1j5jf8007s_state *state; struct dvb_frontend *fe; u8 buf[2]; struct i2c_msg msg; state = kzalloc(sizeof(struct va1j5jf8007s_state), GFP_KERNEL); if (!state) return NULL; state->config = config; state->adap = adap; fe = &state->fe; memcpy(&fe->ops, &va1j5jf8007s_ops, sizeof(struct dvb_frontend_ops)); fe->demodulator_priv = state; buf[0] = 0x01; buf[1] = 0x80; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) { kfree(state); return NULL; } return fe; }
gpl-2.0
schqiushui/Kernel_Lollipop_GPE5.1_M8ACE
arch/alpha/kernel/asm-offsets.c
13733
1456
/* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract * and format the required data. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/kbuild.h> #include <asm/io.h> void foo(void) { DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); DEFINE(CRED_UID, offsetof(struct cred, uid)); DEFINE(CRED_EUID, offsetof(struct cred, euid)); DEFINE(CRED_GID, offsetof(struct cred, gid)); DEFINE(CRED_EGID, offsetof(struct cred, egid)); BLANK(); DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(SIGCHLD, SIGCHLD); BLANK(); DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache)); DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register)); }
gpl-2.0
geeknik/StupidFast
drivers/misc/sgi-gru/grufile.c
166
12510
/* * SN Platform GRU Driver * * FILE OPERATIONS & DRIVER INITIALIZATION * * This file supports the user system call for file open, close, mmap, etc. * This also incudes the driver initialization code. * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include "gru.h" #include "grulib.h" #include "grutables.h" #if defined CONFIG_X86_64 #include <asm/genapic.h> #include <asm/irq.h> #define IS_UV() is_uv_system() #elif defined CONFIG_IA64 #include <asm/system.h> #include <asm/sn/simulator.h> /* temp support for running on hardware simulator */ #define IS_UV() IS_MEDUSA() || ia64_platform_is("uv") #else #define IS_UV() 0 #endif #include <asm/uv/uv_hub.h> #include <asm/uv/uv_mmrs.h> struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; unsigned long gru_start_paddr, gru_end_paddr __read_mostly; struct gru_stats_s gru_stats; /* Guaranteed user available resources on each node */ static int max_user_cbrs, max_user_dsr_bytes; static struct file_operations gru_fops; static struct miscdevice gru_miscdev; /* * gru_vma_close * * Called when unmapping a device mapping. Frees all gru resources * and tables belonging to the vma. */ static void gru_vma_close(struct vm_area_struct *vma) { struct gru_vma_data *vdata; struct gru_thread_state *gts; struct list_head *entry, *next; if (!vma->vm_private_data) return; vdata = vma->vm_private_data; vma->vm_private_data = NULL; gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, vdata); list_for_each_safe(entry, next, &vdata->vd_head) { gts = list_entry(entry, struct gru_thread_state, ts_next); list_del(&gts->ts_next); mutex_lock(&gts->ts_ctxlock); if (gts->ts_gru) gru_unload_context(gts, 0); mutex_unlock(&gts->ts_ctxlock); gts_drop(gts); } kfree(vdata); STAT(vdata_free); } /* * gru_file_mmap * * Called when mmaping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) return -EPERM; if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) return -EINVAL; vma->vm_flags |= (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | VM_RESERVED); vma->vm_page_prot = PAGE_SHARED; vma->vm_ops = &gru_vm_ops; vma->vm_private_data = gru_alloc_vma_data(vma, 0); if (!vma->vm_private_data) return -ENOMEM; gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", file, vma->vm_start, vma, vma->vm_private_data); return 0; } /* * Create a new GRU context */ static int gru_create_new_context(unsigned long arg) { struct gru_create_context_req req; struct vm_area_struct *vma; struct gru_vma_data *vdata; int ret = -EINVAL; if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; if (req.data_segment_bytes == 0 || req.data_segment_bytes > max_user_dsr_bytes) return -EINVAL; if (!req.control_blocks || !req.maximum_thread_count || req.control_blocks > max_user_cbrs) return -EINVAL; if (!(req.options & GRU_OPT_MISS_MASK)) req.options |= GRU_OPT_MISS_FMM_INTR; down_write(&current->mm->mmap_sem); vma = gru_find_vma(req.gseg); if (vma) { vdata = vma->vm_private_data; vdata->vd_user_options = req.options; vdata->vd_dsr_au_count = GRU_DS_BYTES_TO_AU(req.data_segment_bytes); vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); ret = 0; } up_write(&current->mm->mmap_sem); return ret; } /* * Get GRU configuration info (temp - for emulator testing) */ static long gru_get_config_info(unsigned long arg) { struct gru_config_info info; int nodesperblade; if (num_online_nodes() > 1 && (uv_node_to_blade_id(1) == uv_node_to_blade_id(0))) nodesperblade = 2; else nodesperblade = 1; info.cpus = num_online_cpus(); info.nodes = num_online_nodes(); info.blades = info.nodes / nodesperblade; info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades; if (copy_to_user((void __user *)arg, &info, sizeof(info))) return -EFAULT; return 0; } /* * Get GRU chiplet status */ static long gru_get_chiplet_status(unsigned long arg) { struct gru_state *gru; struct gru_chiplet_info info; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; if (info.node == -1) info.node = numa_node_id(); if (info.node >= num_possible_nodes() || info.chiplet >= GRU_CHIPLETS_PER_HUB || info.node < 0 || info.chiplet < 0) return -EINVAL; info.blade = uv_node_to_blade_id(info.node); gru = get_gru(info.blade, info.chiplet); info.total_dsr_bytes = GRU_NUM_DSR_BYTES; info.total_cbr = GRU_NUM_CB; info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES - gru->gs_reserved_dsr_bytes; info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs; info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; if (copy_to_user((void __user *)arg, &info, sizeof(info))) return -EFAULT; return 0; } /* * gru_file_unlocked_ioctl * * Called to update file attributes via IOCTL calls. */ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, unsigned long arg) { int err = -EBADRQC; gru_dbg(grudev, "file %p\n", file); switch (req) { case GRU_CREATE_CONTEXT: err = gru_create_new_context(arg); break; case GRU_SET_TASK_SLICE: err = gru_set_task_slice(arg); break; case GRU_USER_GET_EXCEPTION_DETAIL: err = gru_get_exception_detail(arg); break; case GRU_USER_UNLOAD_CONTEXT: err = gru_user_unload_context(arg); break; case GRU_GET_CHIPLET_STATUS: err = gru_get_chiplet_status(arg); break; case GRU_USER_FLUSH_TLB: err = gru_user_flush_tlb(arg); break; case GRU_USER_CALL_OS: err = gru_handle_user_call_os(arg); break; case GRU_GET_CONFIG_INFO: err = gru_get_config_info(arg); break; } return err; } /* * Called at init time to build tables for all GRUs that are present in the * system. */ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, void *vaddr, int nid, int bid, int grunum) { spin_lock_init(&gru->gs_lock); spin_lock_init(&gru->gs_asid_lock); gru->gs_gru_base_paddr = paddr; gru->gs_gru_base_vaddr = vaddr; gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; gru->gs_blade = gru_base[bid]; gru->gs_blade_id = bid; gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; gru_tgh_flush_init(gru); gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n", bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, gru->gs_gru_base_paddr); gru_kservices_init(gru); } static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) { int pnode, nid, bid, chip; int cbrs, dsrbytes, n; int order = get_order(sizeof(struct gru_blade_state)); struct page *page; struct gru_state *gru; unsigned long paddr; void *vaddr; max_user_cbrs = GRU_NUM_CB; max_user_dsr_bytes = GRU_NUM_DSR_BYTES; for_each_online_node(nid) { bid = uv_node_to_blade_id(nid); pnode = uv_node_to_pnode(nid); if (gru_base[bid]) continue; page = alloc_pages_node(nid, GFP_KERNEL, order); if (!page) goto fail; gru_base[bid] = page_address(page); memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; spin_lock_init(&gru_base[bid]->bs_lock); dsrbytes = 0; cbrs = 0; for (gru = gru_base[bid]->bs_grus, chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++, gru++) { paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; cbrs = max(cbrs, n); n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; dsrbytes = max(dsrbytes, n); } max_user_cbrs = min(max_user_cbrs, cbrs); max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes); } return 0; fail: for (nid--; nid >= 0; nid--) free_pages((unsigned long)gru_base[nid], order); return -ENOMEM; } #ifdef CONFIG_IA64 static int get_base_irq(void) { return IRQ_GRU; } #elif defined CONFIG_X86_64 static void noop(unsigned int irq) { } static struct irq_chip gru_chip = { .name = "gru", .mask = noop, .unmask = noop, .ack = noop, }; static int get_base_irq(void) { set_irq_chip(IRQ_GRU, &gru_chip); set_irq_chip(IRQ_GRU + 1, &gru_chip); return IRQ_GRU; } #endif /* * gru_init * * Called at boot or module load time to initialize the GRUs. */ static int __init gru_init(void) { int ret, irq, chip; char id[10]; void *gru_start_vaddr; if (!IS_UV()) return 0; #if defined CONFIG_IA64 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ #else gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & 0x7fffffffffffUL; #endif gru_start_vaddr = __va(gru_start_paddr); gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", gru_start_paddr, gru_end_paddr); irq = get_base_irq(); for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { ret = request_irq(irq + chip, gru_intr, 0, id, NULL); /* TODO: fix irq handling on x86. For now ignore failures because * interrupts are not required & not yet fully supported */ if (ret) { printk("!!!WARNING: GRU ignoring request failure!!!\n"); ret = 0; } if (ret) { printk(KERN_ERR "%s: request_irq failed\n", GRU_DRIVER_ID_STR); goto exit1; } } ret = misc_register(&gru_miscdev); if (ret) { printk(KERN_ERR "%s: misc_register failed\n", GRU_DRIVER_ID_STR); goto exit1; } ret = gru_proc_init(); if (ret) { printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); goto exit2; } ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); if (ret) { printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); goto exit3; } printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, GRU_DRIVER_VERSION_STR); return 0; exit3: gru_proc_exit(); exit2: misc_deregister(&gru_miscdev); exit1: for (--chip; chip >= 0; chip--) free_irq(irq + chip, NULL); return ret; } static void __exit gru_exit(void) { int i, bid; int order = get_order(sizeof(struct gru_state) * GRU_CHIPLETS_PER_BLADE); if (!IS_UV()) return; for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) free_irq(IRQ_GRU + i, NULL); for (bid = 0; bid < GRU_MAX_BLADES; bid++) free_pages((unsigned long)gru_base[bid], order); misc_deregister(&gru_miscdev); gru_proc_exit(); } static struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, }; static struct miscdevice gru_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "gru", .fops = &gru_fops, }; struct vm_operations_struct gru_vm_ops = { .close = gru_vma_close, .fault = gru_fault, }; fs_initcall(gru_init); module_exit(gru_exit); module_param(gru_options, ulong, 0644); MODULE_PARM_DESC(gru_options, "Various debug options"); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR); MODULE_VERSION(GRU_DRIVER_VERSION_STR);
gpl-2.0
igou/gcc
gcc/testsuite/gcc.dg/vshift-3.c
166
2201
/* { dg-do run } */ /* { dg-options "-O3" } */ #include <stdlib.h> #define N 64 #ifndef TYPE1 #define TYPE1 int #define TYPE2 long long #endif signed TYPE1 a[N], b, g[N]; unsigned TYPE1 c[N], h[N]; signed TYPE2 d[N], e, j[N]; unsigned TYPE2 f[N], k[N]; #ifndef S #define S(x) x #endif __attribute__((noinline)) void f1 (void) { int i; for (i = 0; i < N; i++) g[i] = a[i] << S (b); } __attribute__((noinline)) void f2 (void) { int i; for (i = 0; i < N; i++) g[i] = a[i] >> S (b); } __attribute__((noinline)) void f3 (void) { int i; for (i = 0; i < N; i++) h[i] = c[i] >> S (b); } __attribute__((noinline)) void f4 (void) { int i; for (i = 0; i < N; i++) j[i] = d[i] << S (e); } __attribute__((noinline)) void f5 (void) { int i; for (i = 0; i < N; i++) j[i] = d[i] >> S (e); } __attribute__((noinline)) void f6 (void) { int i; for (i = 0; i < N; i++) k[i] = f[i] >> S (e); } __attribute__((noinline)) void f7 (void) { int i; for (i = 0; i < N; i++) j[i] = d[i] << S (b); } __attribute__((noinline)) void f8 (void) { int i; for (i = 0; i < N; i++) j[i] = d[i] >> S (b); } __attribute__((noinline)) void f9 (void) { int i; for (i = 0; i < N; i++) k[i] = f[i] >> S (b); } int main () { int i; b = 7; e = 12; for (i = 0; i < N; i++) { asm (""); c[i] = (rand () << 1) | (rand () & 1); a[i] = c[i]; d[i] = (rand () << 1) | (rand () & 1); d[i] |= (unsigned long long) c[i] << 32; f[i] = d[i]; } f1 (); f3 (); f4 (); f6 (); for (i = 0; i < N; i++) if (g[i] != (signed TYPE1) (a[i] << S (b)) || h[i] != (unsigned TYPE1) (c[i] >> S (b)) || j[i] != (signed TYPE2) (d[i] << S (e)) || k[i] != (unsigned TYPE2) (f[i] >> S (e))) abort (); f2 (); f5 (); f9 (); for (i = 0; i < N; i++) if (g[i] != (signed TYPE1) (a[i] >> S (b)) || j[i] != (signed TYPE2) (d[i] >> S (e)) || k[i] != (unsigned TYPE2) (f[i] >> S (b))) abort (); f7 (); for (i = 0; i < N; i++) if (j[i] != (signed TYPE2) (d[i] << S (b))) abort (); f8 (); for (i = 0; i < N; i++) if (j[i] != (signed TYPE2) (d[i] >> S (b))) abort (); return 0; }
gpl-2.0
xsacha/SymbianGCC
gcc/testsuite/gcc.c-torture/compile/sync-3.c
166
7006
/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "" { target *-*-* } 0 } */ /* { dg-options "-ffat-lto-objects" } */ /* Validate that each of the __sync builtins compiles. This won't necessarily link, since the target might not support the builtin, so this may result in external library calls. */ void test_op_ignore (void) { signed char sc[2]; unsigned char uc[2]; signed short ss[2]; unsigned short us[2]; signed int si[2]; unsigned int ui[2]; signed long sl[2]; unsigned long ul[2]; signed long long sll[2]; unsigned long long ull[2]; (void) __sync_fetch_and_add (&sc[1], -1); (void) __sync_fetch_and_add (&uc[1], -1); (void) __sync_fetch_and_add (&ss[1], -1); (void) __sync_fetch_and_add (&us[1], -1); (void) __sync_fetch_and_add (&si[1], -1); (void) __sync_fetch_and_add (&ui[1], -1); (void) __sync_fetch_and_add (&sl[1], -1); (void) __sync_fetch_and_add (&ul[1], -1); (void) __sync_fetch_and_add (&sll[1], -1); (void) __sync_fetch_and_add (&ull[1], -1); (void) __sync_fetch_and_sub (&sc[1], -1); (void) __sync_fetch_and_sub (&uc[1], -1); (void) __sync_fetch_and_sub (&ss[1], -1); (void) __sync_fetch_and_sub (&us[1], -1); (void) __sync_fetch_and_sub (&si[1], -1); (void) __sync_fetch_and_sub (&ui[1], -1); (void) __sync_fetch_and_sub (&sl[1], -1); (void) __sync_fetch_and_sub (&ul[1], -1); (void) __sync_fetch_and_sub (&sll[1], -1); (void) __sync_fetch_and_sub (&ull[1], -1); (void) __sync_fetch_and_or (&sc[1], -1); (void) __sync_fetch_and_or (&uc[1], -1); (void) __sync_fetch_and_or (&ss[1], -1); (void) __sync_fetch_and_or (&us[1], -1); (void) __sync_fetch_and_or (&si[1], -1); (void) __sync_fetch_and_or (&ui[1], -1); (void) __sync_fetch_and_or (&sl[1], -1); (void) __sync_fetch_and_or (&ul[1], -1); (void) __sync_fetch_and_or (&sll[1], -1); (void) __sync_fetch_and_or (&ull[1], -1); (void) __sync_fetch_and_xor (&sc[1], -1); (void) __sync_fetch_and_xor (&uc[1], -1); (void) __sync_fetch_and_xor (&ss[1], -1); (void) __sync_fetch_and_xor (&us[1], -1); (void) __sync_fetch_and_xor (&si[1], -1); (void) __sync_fetch_and_xor (&ui[1], -1); (void) __sync_fetch_and_xor (&sl[1], -1); (void) __sync_fetch_and_xor (&ul[1], -1); (void) __sync_fetch_and_xor (&sll[1], -1); (void) __sync_fetch_and_xor (&ull[1], -1); (void) __sync_fetch_and_and (&sc[1], -1); (void) __sync_fetch_and_and (&uc[1], -1); (void) __sync_fetch_and_and (&ss[1], -1); (void) __sync_fetch_and_and (&us[1], -1); (void) __sync_fetch_and_and (&si[1], -1); (void) __sync_fetch_and_and (&ui[1], -1); (void) __sync_fetch_and_and (&sl[1], -1); (void) __sync_fetch_and_and (&ul[1], -1); (void) __sync_fetch_and_and (&sll[1], -1); (void) __sync_fetch_and_and (&ull[1], -1); (void) __sync_fetch_and_nand (&sc[1], -1); (void) __sync_fetch_and_nand (&uc[1], -1); (void) __sync_fetch_and_nand (&ss[1], -1); (void) __sync_fetch_and_nand (&us[1], -1); (void) __sync_fetch_and_nand (&si[1], -1); (void) __sync_fetch_and_nand (&ui[1], -1); (void) __sync_fetch_and_nand (&sl[1], -1); (void) __sync_fetch_and_nand (&ul[1], -1); (void) __sync_fetch_and_nand (&sll[1], -1); (void) __sync_fetch_and_nand (&ull[1], -1); } void test_fetch_and_op (void) { signed char sc[2]; unsigned char uc[2]; signed short ss[2]; unsigned short us[2]; signed int si[2]; unsigned int ui[2]; signed long sl[2]; unsigned long ul[2]; signed long long sll[2]; unsigned long long ull[2]; sc[1] = __sync_fetch_and_add (&sc[1], -11); uc[1] = __sync_fetch_and_add (&uc[1], -11); ss[1] = __sync_fetch_and_add (&ss[1], -11); us[1] = __sync_fetch_and_add (&us[1], -11); si[1] = __sync_fetch_and_add (&si[1], -11); ui[1] = __sync_fetch_and_add (&ui[1], -11); sl[1] = __sync_fetch_and_add (&sl[1], -11); ul[1] = __sync_fetch_and_add (&ul[1], -11); sll[1] = __sync_fetch_and_add (&sll[1], -11); ull[1] = __sync_fetch_and_add (&ull[1], -11); sc[1] = __sync_fetch_and_sub (&sc[1], -11); uc[1] = __sync_fetch_and_sub (&uc[1], -11); ss[1] = __sync_fetch_and_sub (&ss[1], -11); us[1] = __sync_fetch_and_sub (&us[1], -11); si[1] = __sync_fetch_and_sub (&si[1], -11); ui[1] = __sync_fetch_and_sub (&ui[1], -11); sl[1] = __sync_fetch_and_sub (&sl[1], -11); ul[1] = __sync_fetch_and_sub (&ul[1], -11); sll[1] = __sync_fetch_and_sub (&sll[1], -11); ull[1] = __sync_fetch_and_sub (&ull[1], -11); sc[1] = __sync_fetch_and_or (&sc[1], -11); uc[1] = __sync_fetch_and_or (&uc[1], -11); ss[1] = __sync_fetch_and_or (&ss[1], -11); us[1] = __sync_fetch_and_or (&us[1], -11); si[1] = __sync_fetch_and_or (&si[1], -11); ui[1] = __sync_fetch_and_or (&ui[1], -11); sl[1] = __sync_fetch_and_or (&sl[1], -11); ul[1] = __sync_fetch_and_or (&ul[1], -11); sll[1] = __sync_fetch_and_or (&sll[1], -11); ull[1] = __sync_fetch_and_or (&ull[1], -11); sc[1] = __sync_fetch_and_xor (&sc[1], -11); uc[1] = __sync_fetch_and_xor (&uc[1], -11); ss[1] = __sync_fetch_and_xor (&ss[1], -11); us[1] = __sync_fetch_and_xor (&us[1], -11); si[1] = __sync_fetch_and_xor (&si[1], -11); ui[1] = __sync_fetch_and_xor (&ui[1], -11); sl[1] = __sync_fetch_and_xor (&sl[1], -11); ul[1] = __sync_fetch_and_xor (&ul[1], -11); sll[1] = __sync_fetch_and_xor (&sll[1], -11); ull[1] = __sync_fetch_and_xor (&ull[1], -11); sc[1] = __sync_fetch_and_and (&sc[1], -11); uc[1] = __sync_fetch_and_and (&uc[1], -11); ss[1] = __sync_fetch_and_and (&ss[1], -11); us[1] = __sync_fetch_and_and (&us[1], -11); si[1] = __sync_fetch_and_and (&si[1], -11); ui[1] = __sync_fetch_and_and (&ui[1], -11); sl[1] = __sync_fetch_and_and (&sl[1], -11); ul[1] = __sync_fetch_and_and (&ul[1], -11); sll[1] = __sync_fetch_and_and (&sll[1], -11); ull[1] = __sync_fetch_and_and (&ull[1], -11); sc[1] = __sync_fetch_and_nand (&sc[1], -11); uc[1] = __sync_fetch_and_nand (&uc[1], -11); ss[1] = __sync_fetch_and_nand (&ss[1], -11); us[1] = __sync_fetch_and_nand (&us[1], -11); si[1] = __sync_fetch_and_nand (&si[1], -11); ui[1] = __sync_fetch_and_nand (&ui[1], -11); sl[1] = __sync_fetch_and_nand (&sl[1], -11); ul[1] = __sync_fetch_and_nand (&ul[1], -11); sll[1] = __sync_fetch_and_nand (&sll[1], -11); ull[1] = __sync_fetch_and_nand (&ull[1], -11); } void test_lock (void) { signed char sc[2]; unsigned char uc[2]; signed short ss[2]; unsigned short us[2]; signed int si[2]; unsigned int ui[2]; signed long sl[2]; unsigned long ul[2]; signed long long sll[2]; unsigned long long ull[2]; sc[1] = __sync_lock_test_and_set (&sc[1], -1); uc[1] = __sync_lock_test_and_set (&uc[1], -1); ss[1] = __sync_lock_test_and_set (&ss[1], -1); us[1] = __sync_lock_test_and_set (&us[1], -1); si[1] = __sync_lock_test_and_set (&si[1], -1); ui[1] = __sync_lock_test_and_set (&ui[1], -1); sl[1] = __sync_lock_test_and_set (&sl[1], -1); ul[1] = __sync_lock_test_and_set (&ul[1], -1); sll[1] = __sync_lock_test_and_set (&sll[1], -1); ull[1] = __sync_lock_test_and_set (&ull[1], -1); }
gpl-2.0
mdeejay/shooteru-ics-sense
arch/powerpc/sysdev/mpic_u3msi.c
934
5314
/* * Copyright 2006, Segher Boessenkool, IBM Corporation. * Copyright 2006-2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> #include "mpic.h" /* A bit ugly, can we get this from the pci_dev somehow? */ static struct mpic *msi_mpic; static void mpic_u3msi_mask_irq(unsigned int irq) { mask_msi_irq(irq); mpic_mask_irq(irq); } static void mpic_u3msi_unmask_irq(unsigned int irq) { mpic_unmask_irq(irq); unmask_msi_irq(irq); } static struct irq_chip mpic_u3msi_chip = { .shutdown = mpic_u3msi_mask_irq, .mask = mpic_u3msi_mask_irq, .unmask = mpic_u3msi_unmask_irq, .eoi = mpic_end_irq, .set_type = mpic_set_irq_type, .set_affinity = mpic_set_affinity, .name = "MPIC-U3MSI", }; static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) { u8 flags; u32 tmp; u64 addr; pci_read_config_byte(pdev, pos + HT_MSI_FLAGS, &flags); if (flags & HT_MSI_FLAGS_FIXED) return HT_MSI_FIXED_ADDR; pci_read_config_dword(pdev, pos + HT_MSI_ADDR_LO, &tmp); addr = tmp & HT_MSI_ADDR_LO_MASK; pci_read_config_dword(pdev, pos + HT_MSI_ADDR_HI, &tmp); addr = addr | ((u64)tmp << 32); return addr; } static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq) { struct pci_bus *bus; unsigned int pos; for (bus = pdev->bus; bus && bus->self; bus = bus->parent) { pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING); if (pos) return read_ht_magic_addr(bus->self, pos); } return 0; } static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); /* U4 PCIe MSIs need to write to the special register in * the bridge that generates interrupts. There should be * theorically a register at 0xf8005000 where you just write * the MSI number and that triggers the right interrupt, but * unfortunately, this is busted in HW, the bridge endian swaps * the value and hits the wrong nibble in the register. * * So instead we use another register set which is used normally * for converting HT interrupts to MPIC interrupts, which decodes * the interrupt number as part of the low address bits * * This will not work if we ever use more than one legacy MSI in * a block but we never do. For one MSI or multiple MSI-X where * each interrupt address can be specified separately, it works * just fine. */ if (of_device_is_compatible(hose->dn, "u4-pcie") || of_device_is_compatible(hose->dn, "U4-pcie")) return 0xf8004000 | (hwirq << 4); return 0; } static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type) { if (type == PCI_CAP_ID_MSIX) pr_debug("u3msi: MSI-X untested, trying anyway.\n"); /* If we can't find a magic address then MSI ain't gonna work */ if (find_ht_magic_addr(pdev, 0) == 0 && find_u4_magic_addr(pdev, 0) == 0) { pr_debug("u3msi: no magic address found for %s\n", pci_name(pdev)); return -ENXIO; } return 0; } static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; set_irq_msi(entry->irq, NULL); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, virq_to_hw(entry->irq), 1); irq_dispose_mapping(entry->irq); } return; } static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { unsigned int virq; struct msi_desc *entry; struct msi_msg msg; u64 addr; int hwirq; list_for_each_entry(entry, &pdev->msi_list, list) { hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); if (hwirq < 0) { pr_debug("u3msi: failed allocating hwirq\n"); return hwirq; } addr = find_ht_magic_addr(pdev, hwirq); if (addr == 0) addr = find_u4_magic_addr(pdev, hwirq); msg.address_lo = addr & 0xFFFFFFFF; msg.address_hi = addr >> 32; virq = irq_create_mapping(msi_mpic->irqhost, hwirq); if (virq == NO_IRQ) { pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); return -ENOSPC; } set_irq_msi(virq, entry); set_irq_chip(virq, &mpic_u3msi_chip); set_irq_type(virq, IRQ_TYPE_EDGE_RISING); pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", virq, hwirq, (unsigned long)addr); printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", virq, hwirq, (unsigned long)addr); msg.data = hwirq; write_msi_msg(virq, &msg); hwirq++; } return 0; } int mpic_u3msi_init(struct mpic *mpic) { int rc; rc = mpic_msi_init_allocator(mpic); if (rc) { pr_debug("u3msi: Error allocating bitmap!\n"); return rc; } pr_debug("u3msi: Registering MPIC U3 MSI callbacks.\n"); BUG_ON(msi_mpic); msi_mpic = mpic; WARN_ON(ppc_md.setup_msi_irqs); ppc_md.setup_msi_irqs = u3msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = u3msi_teardown_msi_irqs; ppc_md.msi_check_device = u3msi_msi_check_device; return 0; }
gpl-2.0
taogb/linux
arch/arm/kernel/tcm.c
1190
8271
/* * Copyright (C) 2008-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * TCM memory handling for ARM systems * * Author: Linus Walleij <linus.walleij@stericsson.com> * Author: Rickard Andersson <rickard.andersson@stericsson.com> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/genalloc.h> #include <linux/string.h> /* memcpy */ #include <asm/cputype.h> #include <asm/mach/map.h> #include <asm/memory.h> #include <asm/system_info.h> static struct gen_pool *tcm_pool; static bool dtcm_present; static bool itcm_present; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; extern char __dtcm_start, __sdtcm_data, __edtcm_data; /* These will be increased as we run */ u32 dtcm_end = DTCM_OFFSET; u32 itcm_end = ITCM_OFFSET; /* * TCM memory resources */ static struct resource dtcm_res = { .name = "DTCM RAM", .start = DTCM_OFFSET, .end = DTCM_OFFSET, .flags = IORESOURCE_MEM }; static struct resource itcm_res = { .name = "ITCM RAM", .start = ITCM_OFFSET, .end = ITCM_OFFSET, .flags = IORESOURCE_MEM }; static struct map_desc dtcm_iomap[] __initdata = { { .virtual = DTCM_OFFSET, .pfn = __phys_to_pfn(DTCM_OFFSET), .length = 0, .type = MT_MEMORY_RW_DTCM } }; static struct map_desc itcm_iomap[] __initdata = { { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), .length = 0, .type = MT_MEMORY_RWX_ITCM, } }; /* * Allocate a chunk of TCM memory */ void *tcm_alloc(size_t len) { unsigned long vaddr; if (!tcm_pool) return NULL; vaddr = gen_pool_alloc(tcm_pool, len); if (!vaddr) return NULL; return (void *) vaddr; } EXPORT_SYMBOL(tcm_alloc); /* * Free a chunk of TCM memory */ void tcm_free(void *addr, size_t len) { gen_pool_free(tcm_pool, (unsigned long) addr, len); } EXPORT_SYMBOL(tcm_free); bool tcm_dtcm_present(void) { return dtcm_present; } EXPORT_SYMBOL(tcm_dtcm_present); bool tcm_itcm_present(void) { return itcm_present; } EXPORT_SYMBOL(tcm_itcm_present); static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, u32 *offset) { const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, -1, -1, -1, -1 }; u32 tcm_region; int tcm_size; /* * If there are more than one TCM bank of this type, * select the TCM bank to operate on in the TCM selection * register. */ if (banks > 1) asm("mcr p15, 0, %0, c9, c2, 0" : /* No output operands */ : "r" (bank)); /* Read the special TCM region register c9, 0 */ if (!type) asm("mrc p15, 0, %0, c9, c1, 0" : "=r" (tcm_region)); else asm("mrc p15, 0, %0, c9, c1, 1" : "=r" (tcm_region)); tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; if (tcm_size < 0) { pr_err("CPU: %sTCM%d of unknown size\n", type ? "I" : "D", bank); return -EINVAL; } else if (tcm_size > 32) { pr_err("CPU: %sTCM%d larger than 32k found\n", type ? "I" : "D", bank); return -EINVAL; } else { pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U), (tcm_region & 1) ? "" : "not "); } /* Not much fun you can do with a size 0 bank */ if (tcm_size == 0) return 0; /* Force move the TCM bank to where we want it, enable */ tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; if (!type) asm("mcr p15, 0, %0, c9, c1, 0" : /* No output operands */ : "r" (tcm_region)); else asm("mcr p15, 0, %0, c9, c1, 1" : /* No output operands */ : "r" (tcm_region)); /* Increase offset */ *offset += (tcm_size << 10); pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U)); return 0; } /* * This initializes the TCM memory */ void __init tcm_init(void) { u32 tcm_status; u8 dtcm_banks; u8 itcm_banks; size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; char *end; char *ram; int ret; int i; /* * Prior to ARMv5 there is no TCM, and trying to read the status * register will hang the processor. */ if (cpu_architecture() < CPU_ARCH_ARMv5) { if (dtcm_code_sz || itcm_code_sz) pr_info("CPU TCM: %u bytes of DTCM and %u bytes of " "ITCM code compiled in, but no TCM present " "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz); return; } tcm_status = read_cpuid_tcmstatus(); dtcm_banks = (tcm_status >> 16) & 0x03; itcm_banks = (tcm_status & 0x03); /* Values greater than 2 for D/ITCM banks are "reserved" */ if (dtcm_banks > 2) dtcm_banks = 0; if (itcm_banks > 2) itcm_banks = 0; /* Setup DTCM if present */ if (dtcm_banks > 0) { for (i = 0; i < dtcm_banks; i++) { ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end); if (ret) return; } /* This means you compiled more code than fits into DTCM */ if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { pr_info("CPU DTCM: %u bytes of code compiled to " "DTCM but only %lu bytes of DTCM present\n", dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); goto no_dtcm; } dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; iotable_init(dtcm_iomap, 1); /* Copy data from RAM to DTCM */ start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; memcpy(start, ram, dtcm_code_sz); pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); dtcm_present = true; } else if (dtcm_code_sz) { pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " "DTCM banks present in CPU\n", dtcm_code_sz); } no_dtcm: /* Setup ITCM if present */ if (itcm_banks > 0) { for (i = 0; i < itcm_banks; i++) { ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end); if (ret) return; } /* This means you compiled more code than fits into ITCM */ if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { pr_info("CPU ITCM: %u bytes of code compiled to " "ITCM but only %lu bytes of ITCM present\n", itcm_code_sz, (itcm_end - ITCM_OFFSET)); return; } itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); itcm_iomap[0].length = itcm_end - ITCM_OFFSET; iotable_init(itcm_iomap, 1); /* Copy code from RAM to ITCM */ start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; memcpy(start, ram, itcm_code_sz); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); itcm_present = true; } else if (itcm_code_sz) { pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " "ITCM banks present in CPU\n", itcm_code_sz); } } /* * This creates the TCM memory pool and has to be done later, * during the core_initicalls, since the allocator is not yet * up and running when the first initialization runs. */ static int __init setup_tcm_pool(void) { u32 dtcm_pool_start = (u32) &__edtcm_data; u32 itcm_pool_start = (u32) &__eitcm_text; int ret; /* * Set up malloc pool, 2^2 = 4 bytes granularity since * the TCM is sometimes just 4 KiB. NB: pages and cache * line alignments does not matter in TCM! */ tcm_pool = gen_pool_create(2, -1); pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ if (dtcm_present) { if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, dtcm_end - dtcm_pool_start, -1); if (ret) { pr_err("CPU DTCM: could not add DTCM " \ "remainder to pool!\n"); return ret; } pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", dtcm_end - dtcm_pool_start, dtcm_pool_start); } } /* Add the rest of ITCM to the TCM pool */ if (itcm_present) { if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, itcm_end - itcm_pool_start, -1); if (ret) { pr_err("CPU ITCM: could not add ITCM " \ "remainder to pool!\n"); return ret; } pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", itcm_end - itcm_pool_start, itcm_pool_start); } } return 0; } core_initcall(setup_tcm_pool);
gpl-2.0
geekboxzone/mmallow_kernel
drivers/acpi/acpica/hwxfsleep.c
1958
13247
/****************************************************************************** * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); /* * Dispatch table used to efficiently branch to the various sleep * functions. */ #define ACPI_SLEEP_FUNCTION_ID 0 #define ACPI_WAKE_PREP_FUNCTION_ID 1 #define ACPI_WAKE_FUNCTION_ID 2 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */ static struct acpi_sleep_functions acpi_sleep_dispatch[] = { {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep), acpi_hw_extended_sleep}, {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep), acpi_hw_extended_wake_prep}, {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake} }; /* * These functions are removed for the ACPI_REDUCED_HARDWARE case: * acpi_set_firmware_waking_vector * acpi_set_firmware_waking_vector64 * acpi_enter_sleep_state_s4bios */ #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_set_firmware_waking_vector * * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode * entry point. * * RETURN: Status * * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS * ******************************************************************************/ acpi_status acpi_set_firmware_waking_vector(u32 physical_address) { ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); /* * According to the ACPI specification 2.0c and later, the 64-bit * waking vector should be cleared and the 32-bit waking vector should * be used, unless we want the wake-up code to be called by the BIOS in * Protected Mode. Some systems (for example HP dv5-1004nr) are known * to fail to resume if the 64-bit vector is used. */ /* Set the 32-bit vector */ acpi_gbl_FACS->firmware_waking_vector = physical_address; /* Clear the 64-bit vector if it exists */ if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) { acpi_gbl_FACS->xfirmware_waking_vector = 0; } return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector) #if ACPI_MACHINE_WIDTH == 64 /******************************************************************************* * * FUNCTION: acpi_set_firmware_waking_vector64 * * PARAMETERS: physical_address - 64-bit physical address of ACPI protected * mode entry point. * * RETURN: Status * * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if * it exists in the table. This function is intended for use with * 64-bit host operating systems. * ******************************************************************************/ acpi_status acpi_set_firmware_waking_vector64(u64 physical_address) { ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64); /* Determine if the 64-bit vector actually exists */ if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Clear 32-bit vector, set the 64-bit X_ vector */ acpi_gbl_FACS->firmware_waking_vector = 0; acpi_gbl_FACS->xfirmware_waking_vector = physical_address; return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64) #endif /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_s4bios * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Perform a S4 bios request. * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void) { u32 in_value; acpi_status status; ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios); /* Clear the wake status bit (PM1) */ status = acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_clear_acpi_status(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * 1) Disable/Clear all GPEs * 2) Enable all wakeup GPEs */ status = acpi_hw_disable_all_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_system_awake_and_running = FALSE; status = acpi_hw_enable_all_wakeup_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_FLUSH_CPU_CACHE(); status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.s4_bios_request, 8); do { acpi_os_stall(ACPI_USEC_PER_MSEC); status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } while (!in_value); return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) #endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * * FUNCTION: acpi_hw_sleep_dispatch * * PARAMETERS: sleep_state - Which sleep state to enter/exit * function_id - Sleep, wake_prep, or Wake * * RETURN: Status from the invoked sleep handling function. * * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling * function. * ******************************************************************************/ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) { acpi_status status; struct acpi_sleep_functions *sleep_functions = &acpi_sleep_dispatch[function_id]; #if (!ACPI_REDUCED_HARDWARE) /* * If the Hardware Reduced flag is set (from the FADT), we must * use the extended sleep registers (FADT). Note: As per the ACPI * specification, these extended registers are to be used for HW-reduced * platforms only. They are not general-purpose replacements for the * legacy PM register sleep support. */ if (acpi_gbl_reduced_hardware) { status = sleep_functions->extended_function(sleep_state); } else { /* Legacy sleep */ status = sleep_functions->legacy_function(sleep_state); } return (status); #else /* * For the case where reduced-hardware-only code is being generated, * we know that only the extended sleep registers are available */ status = sleep_functions->extended_function(sleep_state); return (status); #endif /* !ACPI_REDUCED_HARDWARE */ } /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_prep * * PARAMETERS: sleep_state - Which sleep state to enter * * RETURN: Status * * DESCRIPTION: Prepare to enter a system sleep state. * This function must execute with interrupts enabled. * We break sleeping into 2 stages so that OSPM can handle * various OS-specific tasks between the two steps. * ******************************************************************************/ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) { acpi_status status; struct acpi_object_list arg_list; union acpi_object arg; u32 sst_value; ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep); status = acpi_get_sleep_type_data(sleep_state, &acpi_gbl_sleep_type_a, &acpi_gbl_sleep_type_b); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Execute the _PTS method (Prepare To Sleep) */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = sleep_state; status = acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { return_ACPI_STATUS(status); } /* Setup the argument to the _SST method (System STatus) */ switch (sleep_state) { case ACPI_STATE_S0: sst_value = ACPI_SST_WORKING; break; case ACPI_STATE_S1: case ACPI_STATE_S2: case ACPI_STATE_S3: sst_value = ACPI_SST_SLEEPING; break; case ACPI_STATE_S4: sst_value = ACPI_SST_SLEEP_CONTEXT; break; default: sst_value = ACPI_SST_INDICATOR_OFF; /* Default is off */ break; } /* * Set the system indicators to show the desired sleep state. * _SST is an optional method (return no error if not found) */ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value); return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state * * PARAMETERS: sleep_state - Which sleep state to enter * * RETURN: Status * * DESCRIPTION: Enter a system sleep state * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_enter_sleep_state); if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) || (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) { ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X", acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state) /******************************************************************************* * * FUNCTION: acpi_leave_sleep_state_prep * * PARAMETERS: sleep_state - Which sleep state we are exiting * * RETURN: Status * * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a * sleep. Called with interrupts DISABLED. * We break wake/resume into 2 stages so that OSPM can handle * various OS-specific tasks between the two steps. * ******************************************************************************/ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep) /******************************************************************************* * * FUNCTION: acpi_leave_sleep_state * * PARAMETERS: sleep_state - Which sleep state we are exiting * * RETURN: Status * * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep * Called with interrupts ENABLED. * ******************************************************************************/ acpi_status acpi_leave_sleep_state(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
gpl-2.0
Stuxnet-Kernel/kernel_shamu
drivers/hwmon/f71882fg.c
2214
84646
/*************************************************************************** * Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> * * Copyright (C) 2007-2011 Hans de Goede <hdegoede@redhat.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/acpi.h> #define DRVNAME "f71882fg" #define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ #define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ #define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ #define SIO_REG_DEVREV 0x22 /* Device revision */ #define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */ #define SIO_REG_ENABLE 0x30 /* Logical device enable */ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ #define SIO_F71808E_ID 0x0901 /* Chipset ID */ #define SIO_F71808A_ID 0x1001 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ #define SIO_F71869_ID 0x0814 /* Chipset ID */ #define SIO_F71869A_ID 0x1007 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ #define SIO_F71889_ID 0x0723 /* Chipset ID */ #define SIO_F71889E_ID 0x0909 /* Chipset ID */ #define SIO_F71889A_ID 0x1005 /* Chipset ID */ #define SIO_F8000_ID 0x0581 /* Chipset ID */ #define SIO_F81865_ID 0x0704 /* Chipset ID */ #define REGION_LENGTH 8 #define ADDR_REG_OFFSET 5 #define DATA_REG_OFFSET 6 #define F71882FG_REG_IN_STATUS 0x12 /* f7188x only */ #define F71882FG_REG_IN_BEEP 0x13 /* f7188x only */ #define F71882FG_REG_IN(nr) (0x20 + (nr)) #define F71882FG_REG_IN1_HIGH 0x32 /* f7188x only */ #define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr))) #define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr))) #define F71882FG_REG_FAN_FULL_SPEED(nr) (0xA4 + (16 * (nr))) #define F71882FG_REG_FAN_STATUS 0x92 #define F71882FG_REG_FAN_BEEP 0x93 #define F71882FG_REG_TEMP(nr) (0x70 + 2 * (nr)) #define F71882FG_REG_TEMP_OVT(nr) (0x80 + 2 * (nr)) #define F71882FG_REG_TEMP_HIGH(nr) (0x81 + 2 * (nr)) #define F71882FG_REG_TEMP_STATUS 0x62 #define F71882FG_REG_TEMP_BEEP 0x63 #define F71882FG_REG_TEMP_CONFIG 0x69 #define F71882FG_REG_TEMP_HYST(nr) (0x6C + (nr)) #define F71882FG_REG_TEMP_TYPE 0x6B #define F71882FG_REG_TEMP_DIODE_OPEN 0x6F #define F71882FG_REG_PWM(nr) (0xA3 + (16 * (nr))) #define F71882FG_REG_PWM_TYPE 0x94 #define F71882FG_REG_PWM_ENABLE 0x96 #define F71882FG_REG_FAN_HYST(nr) (0x98 + (nr)) #define F71882FG_REG_FAN_FAULT_T 0x9F #define F71882FG_FAN_NEG_TEMP_EN 0x20 #define F71882FG_FAN_PROG_SEL 0x80 #define F71882FG_REG_POINT_PWM(pwm, point) (0xAA + (point) + (16 * (pwm))) #define F71882FG_REG_POINT_TEMP(pwm, point) (0xA6 + (point) + (16 * (pwm))) #define F71882FG_REG_POINT_MAPPING(nr) (0xAF + 16 * (nr)) #define F71882FG_REG_START 0x01 #define F71882FG_MAX_INS 9 #define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg, f71889fg, f71889ed, f71889a, f8000, f81865f }; static const char *const f71882fg_names[] = { "f71808e", "f71808a", "f71858fg", "f71862fg", "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ "f71869a", "f71882fg", "f71889fg", /* f81801u too, same id */ "f71889ed", "f71889a", "f8000", "f81865f", }; static const char f71882fg_has_in[][F71882FG_MAX_INS] = { [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, [f71808a] = { 1, 1, 1, 1, 0, 0, 0, 1, 1 }, [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71889a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f8000] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, [f81865f] = { 1, 1, 1, 1, 1, 1, 1, 0, 0 }, }; static const char f71882fg_has_in1_alarm[] = { [f71808e] = 0, [f71808a] = 0, [f71858fg] = 0, [f71862fg] = 0, [f71869] = 0, [f71869a] = 0, [f71882fg] = 1, [f71889fg] = 1, [f71889ed] = 1, [f71889a] = 1, [f8000] = 0, [f81865f] = 1, }; static const char f71882fg_fan_has_beep[] = { [f71808e] = 0, [f71808a] = 0, [f71858fg] = 0, [f71862fg] = 1, [f71869] = 1, [f71869a] = 1, [f71882fg] = 1, [f71889fg] = 1, [f71889ed] = 1, [f71889a] = 1, [f8000] = 0, [f81865f] = 1, }; static const char f71882fg_nr_fans[] = { [f71808e] = 3, [f71808a] = 2, /* +1 fan which is monitor + simple pwm only */ [f71858fg] = 3, [f71862fg] = 3, [f71869] = 3, [f71869a] = 3, [f71882fg] = 4, [f71889fg] = 3, [f71889ed] = 3, [f71889a] = 3, [f8000] = 3, /* +1 fan which is monitor only */ [f81865f] = 2, }; static const char f71882fg_temp_has_beep[] = { [f71808e] = 0, [f71808a] = 1, [f71858fg] = 0, [f71862fg] = 1, [f71869] = 1, [f71869a] = 1, [f71882fg] = 1, [f71889fg] = 1, [f71889ed] = 1, [f71889a] = 1, [f8000] = 0, [f81865f] = 1, }; static const char f71882fg_nr_temps[] = { [f71808e] = 2, [f71808a] = 2, [f71858fg] = 3, [f71862fg] = 3, [f71869] = 3, [f71869a] = 3, [f71882fg] = 3, [f71889fg] = 3, [f71889ed] = 3, [f71889a] = 3, [f8000] = 3, [f81865f] = 2, }; static struct platform_device *f71882fg_pdev; /* Super-I/O Function prototypes */ static inline int superio_inb(int base, int reg); static inline int superio_inw(int base, int reg); static inline int superio_enter(int base); static inline void superio_select(int base, int ld); static inline void superio_exit(int base); struct f71882fg_sio_data { enum chips type; }; struct f71882fg_data { unsigned short addr; enum chips type; struct device *hwmon_dev; struct mutex update_lock; int temp_start; /* temp numbering start (0 or 1) */ char valid; /* !=0 if following fields are valid */ char auto_point_temp_signed; unsigned long last_updated; /* In jiffies */ unsigned long last_limits; /* In jiffies */ /* Register Values */ u8 in[F71882FG_MAX_INS]; u8 in1_max; u8 in_status; u8 in_beep; u16 fan[4]; u16 fan_target[4]; u16 fan_full_speed[4]; u8 fan_status; u8 fan_beep; /* * Note: all models have max 3 temperature channels, but on some * they are addressed as 0-2 and on others as 1-3, so for coding * convenience we reserve space for 4 channels */ u16 temp[4]; u8 temp_ovt[4]; u8 temp_high[4]; u8 temp_hyst[2]; /* 2 hysts stored per reg */ u8 temp_type[4]; u8 temp_status; u8 temp_beep; u8 temp_diode_open; u8 temp_config; u8 pwm[4]; u8 pwm_enable; u8 pwm_auto_point_hyst[2]; u8 pwm_auto_point_mapping[4]; u8 pwm_auto_point_pwm[4][5]; s8 pwm_auto_point_temp[4][4]; }; /* Sysfs in */ static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_in_beep(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_in_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_in_alarm(struct device *dev, struct device_attribute *devattr, char *buf); /* Sysfs Fan */ static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_fan_full_speed(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_fan_full_speed(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_fan_beep(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_fan_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf); /* Sysfs Temp */ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_beep(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf); /* PWM and Auto point control */ static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_simple_pwm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_simple_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_interpolate(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_interpolate(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); /* Sysfs misc */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf); static int f71882fg_probe(struct platform_device *pdev); static int f71882fg_remove(struct platform_device *pdev); static struct platform_driver f71882fg_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = f71882fg_probe, .remove = f71882fg_remove, }; static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* * Temp attr for the f71858fg, the f71858fg is special as it has its * temperature indexes start at 0 (the others start at 1) */ static struct sensor_device_attribute_2 f71858fg_temp_attr[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 0), SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 0), SENSOR_ATTR_2(temp1_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 0), SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 0), SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 0), SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 1), SENSOR_ATTR_2(temp2_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1), SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 1), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 2), SENSOR_ATTR_2(temp3_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2), SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 2), SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; /* Temp attr for the standard models */ static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 1), /* * Should really be temp1_max_alarm, but older versions did not handle * the max and crit alarms separately and lm_sensors v2 depends on the * presence of temp#_alarm files. The same goes for temp2/3 _alarm. */ SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1), SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 1), SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), }, { SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 2), /* Should be temp2_max_alarm, see temp1_alarm note */ SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2), SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 2), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }, { SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 3), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 3), /* Should be temp3_max_alarm, see temp1_alarm note */ SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3), SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 3), SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 3), SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7), SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3), } }; /* Temp attr for models which can beep on temp alarm */ static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { { SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 1), SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 5), }, { SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 2), SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 6), }, { SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 3), SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 7), } }; /* * Temp attr for the f8000 * Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max) * is used as hysteresis value to clear alarms * Also like the f71858fg its temperature indexes start at 0 */ static struct sensor_device_attribute_2 f8000_temp_attr[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 0), SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 0), SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; /* in attr for all models */ static struct sensor_device_attribute_2 fxxxx_in_attr[] = { SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), }; /* For models with in1 alarm capability */ static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = { SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max, 0, 1), SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep, 0, 1), SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1), }; /* Fan / PWM attr common to all models */ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { { SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0), SENSOR_ATTR_2(fan1_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 0), SENSOR_ATTR_2(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 0), SENSOR_ATTR_2(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 0), SENSOR_ATTR_2(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 0), SENSOR_ATTR_2(pwm1_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 0), }, { SENSOR_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 0, 1), SENSOR_ATTR_2(fan2_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 1), SENSOR_ATTR_2(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 1), SENSOR_ATTR_2(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 1), SENSOR_ATTR_2(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 1), SENSOR_ATTR_2(pwm2_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 1), }, { SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2), SENSOR_ATTR_2(fan3_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 2), SENSOR_ATTR_2(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 2), SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2), SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 2), SENSOR_ATTR_2(pwm3_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 2), }, { SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3), SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 3), SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3), SENSOR_ATTR_2(pwm4, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 3), SENSOR_ATTR_2(pwm4_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 3), SENSOR_ATTR_2(pwm4_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 3), } }; /* Attr for the third fan of the f71808a, which only has manual pwm */ static struct sensor_device_attribute_2 f71808a_fan3_attr[] = { SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2), SENSOR_ATTR_2(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 2), SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_simple_pwm, store_simple_pwm, 0, 2), }; /* Attr for models which can beep on Fan alarm */ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = { SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 0), SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 1), SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 2), SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 3), }; /* * PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the * standard models */ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[3][7] = { { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), }, { SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), }, { SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), } }; /* * PWM attr for the f71808e/f71869, almost identical to the f71862fg, but the * pwm setting when the temperature is above the pwmX_auto_point1_temp can be * programmed instead of being hardcoded to 0xff */ static struct sensor_device_attribute_2 f71869_auto_pwm_attr[3][8] = { { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), }, { SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), }, { SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), } }; /* PWM attr for the standard models */ static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 0), SENSOR_ATTR_2(pwm1_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 0), SENSOR_ATTR_2(pwm1_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 0), SENSOR_ATTR_2(pwm1_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 0), SENSOR_ATTR_2(pwm1_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), }, { SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 1), SENSOR_ATTR_2(pwm2_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 1), SENSOR_ATTR_2(pwm2_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 1), SENSOR_ATTR_2(pwm2_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 1), SENSOR_ATTR_2(pwm2_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), }, { SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 2), SENSOR_ATTR_2(pwm3_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 2), SENSOR_ATTR_2(pwm3_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 2), SENSOR_ATTR_2(pwm3_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 2), SENSOR_ATTR_2(pwm3_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), }, { SENSOR_ATTR_2(pwm4_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 3), SENSOR_ATTR_2(pwm4_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 3), SENSOR_ATTR_2(pwm4_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 3), SENSOR_ATTR_2(pwm4_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 3), SENSOR_ATTR_2(pwm4_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 3), SENSOR_ATTR_2(pwm4_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 3), SENSOR_ATTR_2(pwm4_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 3), SENSOR_ATTR_2(pwm4_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 3), SENSOR_ATTR_2(pwm4_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 3), SENSOR_ATTR_2(pwm4_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 3), SENSOR_ATTR_2(pwm4_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 3), SENSOR_ATTR_2(pwm4_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 3), SENSOR_ATTR_2(pwm4_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 3), SENSOR_ATTR_2(pwm4_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 3), } }; /* Fan attr specific to the f8000 (4th fan input can only measure speed) */ static struct sensor_device_attribute_2 f8000_fan_attr[] = { SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3), }; /* * PWM attr for the f8000, zones mapped to temp instead of to pwm! * Also the register block at offset A0 maps to TEMP1 (so our temp2, as the * F8000 starts counting temps at 0), B0 maps the TEMP2 and C0 maps to TEMP0 */ static struct sensor_device_attribute_2 f8000_auto_pwm_attr[3][14] = { { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(temp1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 2), SENSOR_ATTR_2(temp1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(temp1_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 2), SENSOR_ATTR_2(temp1_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 2), SENSOR_ATTR_2(temp1_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(temp1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(temp1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 2), SENSOR_ATTR_2(temp1_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 2), SENSOR_ATTR_2(temp1_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(temp1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(temp1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 2), SENSOR_ATTR_2(temp1_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 2), SENSOR_ATTR_2(temp1_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), }, { SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(temp2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 0), SENSOR_ATTR_2(temp2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(temp2_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 0), SENSOR_ATTR_2(temp2_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 0), SENSOR_ATTR_2(temp2_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(temp2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(temp2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 0), SENSOR_ATTR_2(temp2_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 0), SENSOR_ATTR_2(temp2_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(temp2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(temp2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 0), SENSOR_ATTR_2(temp2_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 0), SENSOR_ATTR_2(temp2_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), }, { SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(temp3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 1), SENSOR_ATTR_2(temp3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(temp3_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 1), SENSOR_ATTR_2(temp3_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 1), SENSOR_ATTR_2(temp3_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(temp3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(temp3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 1), SENSOR_ATTR_2(temp3_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 1), SENSOR_ATTR_2(temp3_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(temp3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(temp3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 1), SENSOR_ATTR_2(temp3_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 1), SENSOR_ATTR_2(temp3_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), } }; /* Super I/O functions */ static inline int superio_inb(int base, int reg) { outb(reg, base); return inb(base + 1); } static int superio_inw(int base, int reg) { int val; val = superio_inb(base, reg) << 8; val |= superio_inb(base, reg + 1); return val; } static inline int superio_enter(int base) { /* Don't step on other drivers' I/O space by accident */ if (!request_muxed_region(base, 2, DRVNAME)) { pr_err("I/O address 0x%04x already in use\n", base); return -EBUSY; } /* according to the datasheet the key must be send twice! */ outb(SIO_UNLOCK_KEY, base); outb(SIO_UNLOCK_KEY, base); return 0; } static inline void superio_select(int base, int ld) { outb(SIO_REG_LDSEL, base); outb(ld, base + 1); } static inline void superio_exit(int base) { outb(SIO_LOCK_KEY, base); release_region(base, 2); } static inline int fan_from_reg(u16 reg) { return reg ? (1500000 / reg) : 0; } static inline u16 fan_to_reg(int fan) { return fan ? (1500000 / fan) : 0; } static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg) { u8 val; outb(reg, data->addr + ADDR_REG_OFFSET); val = inb(data->addr + DATA_REG_OFFSET); return val; } static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg) { u16 val; val = f71882fg_read8(data, reg) << 8; val |= f71882fg_read8(data, reg + 1); return val; } static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val) { outb(reg, data->addr + ADDR_REG_OFFSET); outb(val, data->addr + DATA_REG_OFFSET); } static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val) { f71882fg_write8(data, reg, val >> 8); f71882fg_write8(data, reg + 1, val & 0xff); } static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) { if (data->type == f71858fg) return f71882fg_read16(data, F71882FG_REG_TEMP(nr)); else return f71882fg_read8(data, F71882FG_REG_TEMP(nr)); } static struct f71882fg_data *f71882fg_update_device(struct device *dev) { struct f71882fg_data *data = dev_get_drvdata(dev); int nr_fans = f71882fg_nr_fans[data->type]; int nr_temps = f71882fg_nr_temps[data->type]; int nr, reg, point; mutex_lock(&data->update_lock); /* Update once every 60 seconds */ if (time_after(jiffies, data->last_limits + 60 * HZ) || !data->valid) { if (f71882fg_has_in1_alarm[data->type]) { data->in1_max = f71882fg_read8(data, F71882FG_REG_IN1_HIGH); data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); } /* Get High & boundary temps*/ for (nr = data->temp_start; nr < nr_temps + data->temp_start; nr++) { data->temp_ovt[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_OVT(nr)); data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr)); } if (data->type != f8000) { data->temp_hyst[0] = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(0)); data->temp_hyst[1] = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(1)); } /* All but the f71858fg / f8000 have this register */ if ((data->type != f71858fg) && (data->type != f8000)) { reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); data->temp_type[1] = (reg & 0x02) ? 2 : 4; data->temp_type[2] = (reg & 0x04) ? 2 : 4; data->temp_type[3] = (reg & 0x08) ? 2 : 4; } if (f71882fg_fan_has_beep[data->type]) data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); if (f71882fg_temp_has_beep[data->type]) data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); data->pwm_auto_point_hyst[0] = f71882fg_read8(data, F71882FG_REG_FAN_HYST(0)); data->pwm_auto_point_hyst[1] = f71882fg_read8(data, F71882FG_REG_FAN_HYST(1)); for (nr = 0; nr < nr_fans; nr++) { data->pwm_auto_point_mapping[nr] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); switch (data->type) { default: for (point = 0; point < 5; point++) { data->pwm_auto_point_pwm[nr][point] = f71882fg_read8(data, F71882FG_REG_POINT_PWM (nr, point)); } for (point = 0; point < 4; point++) { data->pwm_auto_point_temp[nr][point] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, point)); } break; case f71808e: case f71869: data->pwm_auto_point_pwm[nr][0] = f71882fg_read8(data, F71882FG_REG_POINT_PWM(nr, 0)); /* Fall through */ case f71862fg: data->pwm_auto_point_pwm[nr][1] = f71882fg_read8(data, F71882FG_REG_POINT_PWM (nr, 1)); data->pwm_auto_point_pwm[nr][4] = f71882fg_read8(data, F71882FG_REG_POINT_PWM (nr, 4)); data->pwm_auto_point_temp[nr][0] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, 0)); data->pwm_auto_point_temp[nr][3] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, 3)); break; } } data->last_limits = jiffies; } /* Update every second */ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { data->temp_status = f71882fg_read8(data, F71882FG_REG_TEMP_STATUS); data->temp_diode_open = f71882fg_read8(data, F71882FG_REG_TEMP_DIODE_OPEN); for (nr = data->temp_start; nr < nr_temps + data->temp_start; nr++) data->temp[nr] = f71882fg_read_temp(data, nr); data->fan_status = f71882fg_read8(data, F71882FG_REG_FAN_STATUS); for (nr = 0; nr < nr_fans; nr++) { data->fan[nr] = f71882fg_read16(data, F71882FG_REG_FAN(nr)); data->fan_target[nr] = f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr)); data->fan_full_speed[nr] = f71882fg_read16(data, F71882FG_REG_FAN_FULL_SPEED(nr)); data->pwm[nr] = f71882fg_read8(data, F71882FG_REG_PWM(nr)); } /* Some models have 1 more fan with limited capabilities */ if (data->type == f71808a) { data->fan[2] = f71882fg_read16(data, F71882FG_REG_FAN(2)); data->pwm[2] = f71882fg_read8(data, F71882FG_REG_PWM(2)); } if (data->type == f8000) data->fan[3] = f71882fg_read16(data, F71882FG_REG_FAN(3)); if (f71882fg_has_in1_alarm[data->type]) data->in_status = f71882fg_read8(data, F71882FG_REG_IN_STATUS); for (nr = 0; nr < F71882FG_MAX_INS; nr++) if (f71882fg_has_in[data->type][nr]) data->in[nr] = f71882fg_read8(data, F71882FG_REG_IN(nr)); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* Sysfs Interface */ static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int speed = fan_from_reg(data->fan[nr]); if (speed == FAN_MIN_DETECT) speed = 0; return sprintf(buf, "%d\n", speed); } static ssize_t show_fan_full_speed(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int speed = fan_from_reg(data->fan_full_speed[nr]); return sprintf(buf, "%d\n", speed); } static ssize_t store_fan_full_speed(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; val = clamp_val(val, 23, 1500000); val = fan_to_reg(val); mutex_lock(&data->update_lock); f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val); data->fan_full_speed[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_beep(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->fan_beep & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_fan_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); if (val) data->fan_beep |= 1 << nr; else data->fan_beep &= ~(1 << nr); f71882fg_write8(data, F71882FG_REG_FAN_BEEP, data->fan_beep); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->fan_status & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->in[nr] * 8); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); return sprintf(buf, "%d\n", data->in1_max * 8); } static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err; long val; err = kstrtol(buf, 10, &val); if (err) return err; val /= 8; val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val); data->in1_max = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_beep(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->in_beep & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_in_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); if (val) data->in_beep |= 1 << nr; else data->in_beep &= ~(1 << nr); f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep); mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->in_status & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int sign, temp; if (data->type == f71858fg) { /* TEMP_TABLE_SEL 1 or 3 ? */ if (data->temp_config & 1) { sign = data->temp[nr] & 0x0001; temp = (data->temp[nr] >> 5) & 0x7ff; } else { sign = data->temp[nr] & 0x8000; temp = (data->temp[nr] >> 5) & 0x3ff; } temp *= 125; if (sign) temp -= 128000; } else temp = data->temp[nr] * 1000; return sprintf(buf, "%d\n", temp); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->temp_high[nr] * 1000); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; val /= 1000; val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val); data->temp_high[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int temp_max_hyst; mutex_lock(&data->update_lock); if (nr & 1) temp_max_hyst = data->temp_hyst[nr / 2] >> 4; else temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f; temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000; mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", temp_max_hyst); } static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; ssize_t ret = count; u8 reg; long val; err = kstrtol(buf, 10, &val); if (err) return err; val /= 1000; mutex_lock(&data->update_lock); /* convert abs to relative and check */ data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr)); val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]); val = data->temp_high[nr] - val; /* convert value to register contents */ reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2)); if (nr & 1) reg = (reg & 0x0f) | (val << 4); else reg = (reg & 0xf0) | val; f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg); data->temp_hyst[nr / 2] = reg; mutex_unlock(&data->update_lock); return ret; } static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000); } static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; val /= 1000; val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val); data->temp_ovt[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int temp_crit_hyst; mutex_lock(&data->update_lock); if (nr & 1) temp_crit_hyst = data->temp_hyst[nr / 2] >> 4; else temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f; temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000; mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", temp_crit_hyst); } static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->temp_type[nr]); } static ssize_t show_temp_beep(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->temp_beep & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_temp_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); if (val) data->temp_beep |= 1 << nr; else data->temp_beep &= ~(1 << nr); f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->temp_status & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->temp_diode_open & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int val, nr = to_sensor_dev_attr_2(devattr)->index; mutex_lock(&data->update_lock); if (data->pwm_enable & (1 << (2 * nr))) /* PWM mode */ val = data->pwm[nr]; else { /* RPM mode */ val = 255 * fan_from_reg(data->fan_target[nr]) / fan_from_reg(data->fan_full_speed[nr]); } mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", val); } static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) || (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) { count = -EROFS; goto leave; } if (data->pwm_enable & (1 << (2 * nr))) { /* PWM mode */ f71882fg_write8(data, F71882FG_REG_PWM(nr), val); data->pwm[nr] = val; } else { /* RPM mode */ int target, full_speed; full_speed = f71882fg_read16(data, F71882FG_REG_FAN_FULL_SPEED(nr)); target = fan_to_reg(val * fan_from_reg(full_speed) / 255); f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target); data->fan_target[nr] = target; data->fan_full_speed[nr] = full_speed; } leave: mutex_unlock(&data->update_lock); return count; } static ssize_t show_simple_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int val, nr = to_sensor_dev_attr_2(devattr)->index; val = data->pwm[nr]; return sprintf(buf, "%d\n", val); } static ssize_t store_simple_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_PWM(nr), val); data->pwm[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { int result = 0; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; switch ((data->pwm_enable >> 2 * nr) & 3) { case 0: case 1: result = 2; /* Normal auto mode */ break; case 2: result = 1; /* Manual mode */ break; case 3: if (data->type == f8000) result = 3; /* Thermostat mode */ else result = 1; /* Manual mode */ break; } return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; /* Special case for F8000 pwm channel 3 which only does auto mode */ if (data->type == f8000 && nr == 2 && val != 2) return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); /* Special case for F8000 auto PWM mode / Thermostat mode */ if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) { switch (val) { case 2: data->pwm_enable &= ~(2 << (2 * nr)); break; /* Normal auto mode */ case 3: data->pwm_enable |= 2 << (2 * nr); break; /* Thermostat mode */ default: count = -EINVAL; goto leave; } } else { switch (val) { case 1: /* The f71858fg does not support manual RPM mode */ if (data->type == f71858fg && ((data->pwm_enable >> (2 * nr)) & 1)) { count = -EINVAL; goto leave; } data->pwm_enable |= 2 << (2 * nr); break; /* Manual */ case 2: data->pwm_enable &= ~(2 << (2 * nr)); break; /* Normal auto mode */ default: count = -EINVAL; goto leave; } } f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable); leave: mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; mutex_lock(&data->update_lock); if (data->pwm_enable & (1 << (2 * pwm))) { /* PWM mode */ result = data->pwm_auto_point_pwm[pwm][point]; } else { /* RPM mode */ result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]); } mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; long val; err = kstrtol(buf, 10, &val); if (err) return err; val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); if (data->pwm_enable & (1 << (2 * pwm))) { /* PWM mode */ } else { /* RPM mode */ if (val < 29) /* Prevent negative numbers */ val = 255; else val = (255 - val) * 32 / val; } f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val); data->pwm_auto_point_pwm[pwm][point] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { int result = 0; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; mutex_lock(&data->update_lock); if (nr & 1) result = data->pwm_auto_point_hyst[nr / 2] >> 4; else result = data->pwm_auto_point_hyst[nr / 2] & 0x0f; result = 1000 * (data->pwm_auto_point_temp[nr][point] - result); mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; u8 reg; long val; err = kstrtol(buf, 10, &val); if (err) return err; val /= 1000; mutex_lock(&data->update_lock); data->pwm_auto_point_temp[nr][point] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point)); val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15, data->pwm_auto_point_temp[nr][point]); val = data->pwm_auto_point_temp[nr][point] - val; reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2)); if (nr & 1) reg = (reg & 0x0f) | (val << 4); else reg = (reg & 0xf0) | val; f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg); data->pwm_auto_point_hyst[nr / 2] = reg; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_interpolate(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; result = (data->pwm_auto_point_mapping[nr] >> 4) & 1; return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_interpolate(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->pwm_auto_point_mapping[nr] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); if (val) val = data->pwm_auto_point_mapping[nr] | (1 << 4); else val = data->pwm_auto_point_mapping[nr] & (~(1 << 4)); f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val); data->pwm_auto_point_mapping[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) - data->temp_start); return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = kstrtol(buf, 10, &val); if (err) return err; switch (val) { case 1: val = 0; break; case 2: val = 1; break; case 4: val = 2; break; default: return -EINVAL; } val += data->temp_start; mutex_lock(&data->update_lock); data->pwm_auto_point_mapping[nr] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val; f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val); data->pwm_auto_point_mapping[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; result = data->pwm_auto_point_temp[pwm][point]; return sprintf(buf, "%d\n", 1000 * result); } static ssize_t store_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; long val; err = kstrtol(buf, 10, &val); if (err) return err; val /= 1000; if (data->auto_point_temp_signed) val = clamp_val(val, -128, 127); else val = clamp_val(val, 0, 127); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val); data->pwm_auto_point_temp[pwm][point] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", f71882fg_names[data->type]); } static int f71882fg_create_sysfs_files(struct platform_device *pdev, struct sensor_device_attribute_2 *attr, int count) { int err, i; for (i = 0; i < count; i++) { err = device_create_file(&pdev->dev, &attr[i].dev_attr); if (err) return err; } return 0; } static void f71882fg_remove_sysfs_files(struct platform_device *pdev, struct sensor_device_attribute_2 *attr, int count) { int i; for (i = 0; i < count; i++) device_remove_file(&pdev->dev, &attr[i].dev_attr); } static int f71882fg_create_fan_sysfs_files( struct platform_device *pdev, int idx) { struct f71882fg_data *data = platform_get_drvdata(pdev); int err; /* Sanity check the pwm setting */ err = 0; switch (data->type) { case f71858fg: if (((data->pwm_enable >> (idx * 2)) & 3) == 3) err = 1; break; case f71862fg: if (((data->pwm_enable >> (idx * 2)) & 1) != 1) err = 1; break; case f8000: if (idx == 2) err = data->pwm_enable & 0x20; break; default: break; } if (err) { dev_err(&pdev->dev, "Invalid (reserved) pwm settings: 0x%02x, " "skipping fan %d\n", (data->pwm_enable >> (idx * 2)) & 3, idx + 1); return 0; /* This is a non fatal condition */ } err = f71882fg_create_sysfs_files(pdev, &fxxxx_fan_attr[idx][0], ARRAY_SIZE(fxxxx_fan_attr[0])); if (err) return err; if (f71882fg_fan_has_beep[data->type]) { err = f71882fg_create_sysfs_files(pdev, &fxxxx_fan_beep_attr[idx], 1); if (err) return err; } dev_info(&pdev->dev, "Fan: %d is in %s mode\n", idx + 1, (data->pwm_enable & (1 << (2 * idx))) ? "duty-cycle" : "RPM"); /* Check for unsupported auto pwm settings */ switch (data->type) { case f71808e: case f71808a: case f71869: case f71869a: case f71889fg: case f71889ed: case f71889a: data->pwm_auto_point_mapping[idx] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(idx)); if ((data->pwm_auto_point_mapping[idx] & 0x80) || (data->pwm_auto_point_mapping[idx] & 3) == 0) { dev_warn(&pdev->dev, "Auto pwm controlled by raw digital " "data, disabling pwm auto_point " "sysfs attributes for fan %d\n", idx + 1); return 0; /* This is a non fatal condition */ } break; default: break; } switch (data->type) { case f71862fg: err = f71882fg_create_sysfs_files(pdev, &f71862fg_auto_pwm_attr[idx][0], ARRAY_SIZE(f71862fg_auto_pwm_attr[0])); break; case f71808e: case f71869: err = f71882fg_create_sysfs_files(pdev, &f71869_auto_pwm_attr[idx][0], ARRAY_SIZE(f71869_auto_pwm_attr[0])); break; case f8000: err = f71882fg_create_sysfs_files(pdev, &f8000_auto_pwm_attr[idx][0], ARRAY_SIZE(f8000_auto_pwm_attr[0])); break; default: err = f71882fg_create_sysfs_files(pdev, &fxxxx_auto_pwm_attr[idx][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0])); } return err; } static int f71882fg_probe(struct platform_device *pdev) { struct f71882fg_data *data; struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; int nr_fans = f71882fg_nr_fans[sio_data->type]; int nr_temps = f71882fg_nr_temps[sio_data->type]; int err, i; u8 start_reg, reg; data = devm_kzalloc(&pdev->dev, sizeof(struct f71882fg_data), GFP_KERNEL); if (!data) return -ENOMEM; data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; data->type = sio_data->type; data->temp_start = (data->type == f71858fg || data->type == f8000) ? 0 : 1; mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); start_reg = f71882fg_read8(data, F71882FG_REG_START); if (start_reg & 0x04) { dev_warn(&pdev->dev, "Hardware monitor is powered down\n"); return -ENODEV; } if (!(start_reg & 0x03)) { dev_warn(&pdev->dev, "Hardware monitoring not activated\n"); return -ENODEV; } /* Register sysfs interface files */ err = device_create_file(&pdev->dev, &dev_attr_name); if (err) goto exit_unregister_sysfs; if (start_reg & 0x01) { switch (data->type) { case f71858fg: data->temp_config = f71882fg_read8(data, F71882FG_REG_TEMP_CONFIG); if (data->temp_config & 0x10) /* * The f71858fg temperature alarms behave as * the f8000 alarms in this mode */ err = f71882fg_create_sysfs_files(pdev, f8000_temp_attr, ARRAY_SIZE(f8000_temp_attr)); else err = f71882fg_create_sysfs_files(pdev, f71858fg_temp_attr, ARRAY_SIZE(f71858fg_temp_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, f8000_temp_attr, ARRAY_SIZE(f8000_temp_attr)); break; default: err = f71882fg_create_sysfs_files(pdev, &fxxxx_temp_attr[0][0], ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps); } if (err) goto exit_unregister_sysfs; if (f71882fg_temp_has_beep[data->type]) { err = f71882fg_create_sysfs_files(pdev, &fxxxx_temp_beep_attr[0][0], ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps); if (err) goto exit_unregister_sysfs; } for (i = 0; i < F71882FG_MAX_INS; i++) { if (f71882fg_has_in[data->type][i]) { err = device_create_file(&pdev->dev, &fxxxx_in_attr[i].dev_attr); if (err) goto exit_unregister_sysfs; } } if (f71882fg_has_in1_alarm[data->type]) { err = f71882fg_create_sysfs_files(pdev, fxxxx_in1_alarm_attr, ARRAY_SIZE(fxxxx_in1_alarm_attr)); if (err) goto exit_unregister_sysfs; } } if (start_reg & 0x02) { switch (data->type) { case f71808e: case f71808a: case f71869: case f71869a: /* These always have signed auto point temps */ data->auto_point_temp_signed = 1; /* Fall through to select correct fan/pwm reg bank! */ case f71889fg: case f71889ed: case f71889a: reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T); if (reg & F71882FG_FAN_NEG_TEMP_EN) data->auto_point_temp_signed = 1; /* Ensure banked pwm registers point to right bank */ reg &= ~F71882FG_FAN_PROG_SEL; f71882fg_write8(data, F71882FG_REG_FAN_FAULT_T, reg); break; default: break; } data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); for (i = 0; i < nr_fans; i++) { err = f71882fg_create_fan_sysfs_files(pdev, i); if (err) goto exit_unregister_sysfs; } /* Some types have 1 extra fan with limited functionality */ switch (data->type) { case f71808a: err = f71882fg_create_sysfs_files(pdev, f71808a_fan3_attr, ARRAY_SIZE(f71808a_fan3_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, f8000_fan_attr, ARRAY_SIZE(f8000_fan_attr)); break; default: break; } if (err) goto exit_unregister_sysfs; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); data->hwmon_dev = NULL; goto exit_unregister_sysfs; } return 0; exit_unregister_sysfs: f71882fg_remove(pdev); /* Will unregister the sysfs files for us */ return err; /* f71882fg_remove() also frees our data */ return err; } static int f71882fg_remove(struct platform_device *pdev) { struct f71882fg_data *data = platform_get_drvdata(pdev); int nr_fans = f71882fg_nr_fans[data->type]; int nr_temps = f71882fg_nr_temps[data->type]; int i; u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); device_remove_file(&pdev->dev, &dev_attr_name); if (start_reg & 0x01) { switch (data->type) { case f71858fg: if (data->temp_config & 0x10) f71882fg_remove_sysfs_files(pdev, f8000_temp_attr, ARRAY_SIZE(f8000_temp_attr)); else f71882fg_remove_sysfs_files(pdev, f71858fg_temp_attr, ARRAY_SIZE(f71858fg_temp_attr)); break; case f8000: f71882fg_remove_sysfs_files(pdev, f8000_temp_attr, ARRAY_SIZE(f8000_temp_attr)); break; default: f71882fg_remove_sysfs_files(pdev, &fxxxx_temp_attr[0][0], ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps); } if (f71882fg_temp_has_beep[data->type]) { f71882fg_remove_sysfs_files(pdev, &fxxxx_temp_beep_attr[0][0], ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps); } for (i = 0; i < F71882FG_MAX_INS; i++) { if (f71882fg_has_in[data->type][i]) { device_remove_file(&pdev->dev, &fxxxx_in_attr[i].dev_attr); } } if (f71882fg_has_in1_alarm[data->type]) { f71882fg_remove_sysfs_files(pdev, fxxxx_in1_alarm_attr, ARRAY_SIZE(fxxxx_in1_alarm_attr)); } } if (start_reg & 0x02) { f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0], ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans); if (f71882fg_fan_has_beep[data->type]) { f71882fg_remove_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); } switch (data->type) { case f71808a: f71882fg_remove_sysfs_files(pdev, &fxxxx_auto_pwm_attr[0][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); f71882fg_remove_sysfs_files(pdev, f71808a_fan3_attr, ARRAY_SIZE(f71808a_fan3_attr)); break; case f71862fg: f71882fg_remove_sysfs_files(pdev, &f71862fg_auto_pwm_attr[0][0], ARRAY_SIZE(f71862fg_auto_pwm_attr[0]) * nr_fans); break; case f71808e: case f71869: f71882fg_remove_sysfs_files(pdev, &f71869_auto_pwm_attr[0][0], ARRAY_SIZE(f71869_auto_pwm_attr[0]) * nr_fans); break; case f8000: f71882fg_remove_sysfs_files(pdev, f8000_fan_attr, ARRAY_SIZE(f8000_fan_attr)); f71882fg_remove_sysfs_files(pdev, &f8000_auto_pwm_attr[0][0], ARRAY_SIZE(f8000_auto_pwm_attr[0]) * nr_fans); break; default: f71882fg_remove_sysfs_files(pdev, &fxxxx_auto_pwm_attr[0][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); } } return 0; } static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data) { u16 devid; unsigned short address; int err = superio_enter(sioaddr); if (err) return err; devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) { pr_debug("Not a Fintek device\n"); err = -ENODEV; goto exit; } devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { case SIO_F71808E_ID: sio_data->type = f71808e; break; case SIO_F71808A_ID: sio_data->type = f71808a; break; case SIO_F71858_ID: sio_data->type = f71858fg; break; case SIO_F71862_ID: sio_data->type = f71862fg; break; case SIO_F71869_ID: sio_data->type = f71869; break; case SIO_F71869A_ID: sio_data->type = f71869a; break; case SIO_F71882_ID: sio_data->type = f71882fg; break; case SIO_F71889_ID: sio_data->type = f71889fg; break; case SIO_F71889E_ID: sio_data->type = f71889ed; break; case SIO_F71889A_ID: sio_data->type = f71889a; break; case SIO_F8000_ID: sio_data->type = f8000; break; case SIO_F81865_ID: sio_data->type = f81865f; break; default: pr_info("Unsupported Fintek device: %04x\n", (unsigned int)devid); err = -ENODEV; goto exit; } if (sio_data->type == f71858fg) superio_select(sioaddr, SIO_F71858FG_LD_HWM); else superio_select(sioaddr, SIO_F71882FG_LD_HWM); if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { pr_warn("Device not activated\n"); err = -ENODEV; goto exit; } address = superio_inw(sioaddr, SIO_REG_ADDR); if (address == 0) { pr_warn("Base address not set\n"); err = -ENODEV; goto exit; } address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ err = address; pr_info("Found %s chip at %#x, revision %d\n", f71882fg_names[sio_data->type], (unsigned int)address, (int)superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); return err; } static int __init f71882fg_device_add(int address, const struct f71882fg_sio_data *sio_data) { struct resource res = { .start = address, .end = address + REGION_LENGTH - 1, .flags = IORESOURCE_IO, }; int err; f71882fg_pdev = platform_device_alloc(DRVNAME, address); if (!f71882fg_pdev) return -ENOMEM; res.name = f71882fg_pdev->name; err = acpi_check_resource_conflict(&res); if (err) goto exit_device_put; err = platform_device_add_resources(f71882fg_pdev, &res, 1); if (err) { pr_err("Device resource addition failed\n"); goto exit_device_put; } err = platform_device_add_data(f71882fg_pdev, sio_data, sizeof(struct f71882fg_sio_data)); if (err) { pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(f71882fg_pdev); if (err) { pr_err("Device addition failed\n"); goto exit_device_put; } return 0; exit_device_put: platform_device_put(f71882fg_pdev); return err; } static int __init f71882fg_init(void) { int err; int address; struct f71882fg_sio_data sio_data; memset(&sio_data, 0, sizeof(sio_data)); address = f71882fg_find(0x2e, &sio_data); if (address < 0) address = f71882fg_find(0x4e, &sio_data); if (address < 0) return address; err = platform_driver_register(&f71882fg_driver); if (err) return err; err = f71882fg_device_add(address, &sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&f71882fg_driver); return err; } static void __exit f71882fg_exit(void) { platform_device_unregister(f71882fg_pdev); platform_driver_unregister(&f71882fg_driver); } MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver"); MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL"); module_init(f71882fg_init); module_exit(f71882fg_exit);
gpl-2.0
jjhiza/Monarch
drivers/usb/core/urb.c
2726
28965
#include <linux/module.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/log2.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/usb/hcd.h> #define to_urb(d) container_of(d, struct urb, kref) static void urb_destroy(struct kref *kref) { struct urb *urb = to_urb(kref); if (urb->transfer_flags & URB_FREE_BUFFER) kfree(urb->transfer_buffer); kfree(urb); } /** * usb_init_urb - initializes a urb so that it can be used by a USB driver * @urb: pointer to the urb to initialize * * Initializes a urb so that the USB subsystem can use it properly. * * If a urb is created with a call to usb_alloc_urb() it is not * necessary to call this function. Only use this if you allocate the * space for a struct urb on your own. If you call this function, be * careful when freeing the memory for your urb that it is no longer in * use by the USB core. * * Only use this function if you _really_ understand what you are doing. */ void usb_init_urb(struct urb *urb) { if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); INIT_LIST_HEAD(&urb->anchor_list); } } EXPORT_SYMBOL_GPL(usb_init_urb); /** * usb_alloc_urb - creates a new urb for a USB driver to use * @iso_packets: number of iso packets for this urb * @mem_flags: the type of memory to allocate, see kmalloc() for a list of * valid options for this. * * Creates an urb for the USB driver to use, initializes a few internal * structures, incrementes the usage counter, and returns a pointer to it. * * If no memory is available, NULL is returned. * * If the driver want to use this urb for interrupt, control, or bulk * endpoints, pass '0' as the number of iso packets. * * The driver must call usb_free_urb() when it is finished with the urb. */ struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; urb = kmalloc(sizeof(struct urb) + iso_packets * sizeof(struct usb_iso_packet_descriptor), mem_flags); if (!urb) { printk(KERN_ERR "alloc_urb: kmalloc failed\n"); return NULL; } usb_init_urb(urb); return urb; } EXPORT_SYMBOL_GPL(usb_alloc_urb); /** * usb_free_urb - frees the memory used by a urb when all users of it are finished * @urb: pointer to the urb to free, may be NULL * * Must be called when a user of a urb is finished with it. When the last user * of the urb calls this function, the memory of the urb is freed. * * Note: The transfer buffer associated with the urb is not freed unless the * URB_FREE_BUFFER transfer flag is set. */ void usb_free_urb(struct urb *urb) { if (urb) kref_put(&urb->kref, urb_destroy); } EXPORT_SYMBOL_GPL(usb_free_urb); /** * usb_get_urb - increments the reference count of the urb * @urb: pointer to the urb to modify, may be NULL * * This must be called whenever a urb is transferred from a device driver to a * host controller driver. This allows proper reference counting to happen * for urbs. * * A pointer to the urb with the incremented reference counter is returned. */ struct urb *usb_get_urb(struct urb *urb) { if (urb) kref_get(&urb->kref); return urb; } EXPORT_SYMBOL_GPL(usb_get_urb); /** * usb_anchor_urb - anchors an URB while it is processed * @urb: pointer to the urb to anchor * @anchor: pointer to the anchor * * This can be called to have access to URBs which are to be executed * without bothering to track them */ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) { unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); usb_get_urb(urb); list_add_tail(&urb->anchor_list, &anchor->urb_list); urb->anchor = anchor; if (unlikely(anchor->poisoned)) { atomic_inc(&urb->reject); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_anchor_urb); /* Callers must hold anchor->lock */ static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) { urb->anchor = NULL; list_del(&urb->anchor_list); usb_put_urb(urb); if (list_empty(&anchor->urb_list)) wake_up(&anchor->wait); } /** * usb_unanchor_urb - unanchors an URB * @urb: pointer to the urb to anchor * * Call this to stop the system keeping track of this URB */ void usb_unanchor_urb(struct urb *urb) { unsigned long flags; struct usb_anchor *anchor; if (!urb) return; anchor = urb->anchor; if (!anchor) return; spin_lock_irqsave(&anchor->lock, flags); /* * At this point, we could be competing with another thread which * has the same intention. To protect the urb from being unanchored * twice, only the winner of the race gets the job. */ if (likely(anchor == urb->anchor)) __usb_unanchor_urb(urb, anchor); spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * Successful submissions return 0; otherwise this routine returns a * negative error number. If the submission is successful, the complete() * callback from the URB will be called exactly once, when the USB core and * Host Controller Driver (HCD) are finished with the URB. When the completion * function is called, control of the URB is returned to the device * driver which issued the request. The completion handler may then * immediately free or reuse that URB. * * With few exceptions, USB device drivers should never access URB fields * provided by usbcore or the HCD until its complete() is called. * The exceptions relate to periodic transfer scheduling. For both * interrupt and isochronous urbs, as part of successful URB submission * urb->interval is modified to reflect the actual transfer period used * (normally some power of two units). And for isochronous urbs, * urb->start_frame is modified to reflect when the URB's transfers were * scheduled to start. Not all isochronous transfer scheduling policies * will work, but most host controller drivers should easily handle ISO * queues going from now until 10-200 msec into the future. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. With that queuing policy, an endpoint's queue would never * be empty. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queuing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier (successful) requests. * * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * For devices under xHCI, the bandwidth is reserved at configuration time, or * when the alt setting is selected. If there is not enough bus bandwidth, the * configuration/alt setting request will fail. Therefore, submissions to * periodic endpoints on devices under xHCI should never fail due to bandwidth * constraints. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; int is_out; if (!urb || urb->hcpriv || !urb->complete) return -EINVAL; dev = urb->dev; if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) return -ENODEV; /* For now, get the endpoint from the pipe. Eventually drivers * will be required to set urb->ep directly and we will eliminate * urb->pipe. */ ep = usb_pipe_endpoint(dev, urb->pipe); if (!ep) return -ENOENT; urb->ep = ep; urb->status = -EINPROGRESS; urb->actual_length = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ xfertype = usb_endpoint_type(&ep->desc); if (xfertype == USB_ENDPOINT_XFER_CONTROL) { struct usb_ctrlrequest *setup = (struct usb_ctrlrequest *) urb->setup_packet; if (!setup) return -ENOEXEC; is_out = !(setup->bRequestType & USB_DIR_IN) || !setup->wLength; } else { is_out = usb_endpoint_dir_out(&ep->desc); } /* Clear the internal flags and cache the direction for later use */ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | URB_DMA_SG_COMBINED); urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); if (xfertype != USB_ENDPOINT_XFER_CONTROL && dev->state < USB_STATE_CONFIGURED) return -ENODEV; max = usb_endpoint_maxp(&ep->desc); if (max <= 0) { dev_dbg(&dev->dev, "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", usb_endpoint_num(&ep->desc), is_out ? "out" : "in", __func__, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (xfertype == USB_ENDPOINT_XFER_ISOC) { int n, len; /* SuperSpeed isoc endpoints have up to 16 bursts of up to * 3 packets each */ if (dev->speed == USB_SPEED_SUPER) { int burst = 1 + ep->ss_ep_comp.bMaxBurst; int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); max *= burst; max *= mult; } /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); max &= 0x07ff; max *= mult; } if (urb->number_of_packets <= 0) return -EINVAL; for (n = 0; n < urb->number_of_packets; n++) { len = urb->iso_frame_desc[n].length; if (len < 0 || len > max) return -EMSGSIZE; urb->iso_frame_desc[n].status = -EXDEV; urb->iso_frame_desc[n].actual_length = 0; } } /* the I/O buffer must be mapped/unmapped, except when length=0 */ if (urb->transfer_buffer_length > INT_MAX) return -EMSGSIZE; #ifdef DEBUG /* stuff that drivers shouldn't do, but which shouldn't * cause problems in HCDs if they get it wrong. */ { unsigned int allowed; static int pipetypes[4] = { PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT }; /* Check that the pipe's type matches the endpoint's type */ if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); /* Check against a simple/standard policy */ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER); switch (xfertype) { case USB_ENDPOINT_XFER_BULK: if (is_out) allowed |= URB_ZERO_PACKET; /* FALLTHROUGH */ case USB_ENDPOINT_XFER_CONTROL: allowed |= URB_NO_FSBR; /* only affects UHCI */ /* FALLTHROUGH */ default: /* all non-iso endpoints */ if (!is_out) allowed |= URB_SHORT_NOT_OK; break; case USB_ENDPOINT_XFER_ISOC: allowed |= URB_ISO_ASAP; break; } allowed &= urb->transfer_flags; /* warn if submitter gave bogus flags */ if (allowed != urb->transfer_flags) dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n", urb->transfer_flags, allowed); } #endif /* * Force periodic transfer intervals to be legal values that are * a power of two (so HCDs don't need to). * * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC * supports different values... this uses EHCI/UHCI defaults (and * EHCI can use smaller non-default values). */ switch (xfertype) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* too small? */ switch (dev->speed) { case USB_SPEED_WIRELESS: if (urb->interval < 6) return -EINVAL; break; default: if (urb->interval <= 0) return -EINVAL; break; } /* too big? */ switch (dev->speed) { case USB_SPEED_SUPER: /* units are 125us */ /* Handle up to 2^(16-1) microframes */ if (urb->interval > (1 << 15)) return -EINVAL; max = 1 << 15; break; case USB_SPEED_WIRELESS: if (urb->interval > 16) return -EINVAL; break; case USB_SPEED_HIGH: /* units are microframes */ /* NOTE usb handles 2^15 */ if (urb->interval > (1024 * 8)) urb->interval = 1024 * 8; max = 1024 * 8; break; case USB_SPEED_FULL: /* units are frames/msec */ case USB_SPEED_LOW: if (xfertype == USB_ENDPOINT_XFER_INT) { if (urb->interval > 255) return -EINVAL; /* NOTE ohci only handles up to 32 */ max = 128; } else { if (urb->interval > 1024) urb->interval = 1024; /* NOTE usb and ohci handle up to 2^15 */ max = 1024; } break; default: return -EINVAL; } if (dev->speed != USB_SPEED_WIRELESS) { /* Round down to a power of 2, no more than max */ urb->interval = min(max, 1 << ilog2(urb->interval)); } } return usb_hcd_submit_urb(urb, mem_flags); } EXPORT_SYMBOL_GPL(usb_submit_urb); /*-------------------------------------------------------------------*/ /** * usb_unlink_urb - abort/cancel a transfer request for an endpoint * @urb: pointer to urb describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. URBs complete only once * per submission, and may be canceled only once per submission. * Successful cancellation means termination of @urb will be expedited * and the completion handler will be called with a status code * indicating that the request has been canceled (rather than any other * code). * * Drivers should not call this routine or related routines, such as * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect * method has returned. The disconnect function should synchronize with * a driver's I/O routines to insure that all URB-related activity has * completed before it returns. * * This request is asynchronous, however the HCD might call the ->complete() * callback during unlink. Therefore when drivers call usb_unlink_urb(), they * must not hold any locks that may be taken by the completion function. * Success is indicated by returning -EINPROGRESS, at which time the URB will * probably not yet have been given back to the device driver. When it is * eventually called, the completion function will see @urb->status == * -ECONNRESET. * Failure is indicated by usb_unlink_urb() returning any other value. * Unlinking will fail when @urb is not currently "linked" (i.e., it was * never submitted, or it was unlinked before, or the hardware is already * finished with it), even if the completion handler has not yet run. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * Unlinking and Endpoint Queues: * * [The behaviors and guarantees described below do not apply to virtual * root hubs but only to endpoint queues for physical USB devices.] * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller * hardware processes each request. But when an URB terminates with an * error its queue generally stops (see below), at least until that URB's * completion routine returns. It is guaranteed that a stopped queue * will not restart until all its unlinked URBs have been fully retired, * with their completion routines run, even if that's not until some time * after the original completion handler returns. The same behavior and * guarantee apply when an URB terminates because it was unlinked. * * Bulk and interrupt endpoint queues are guaranteed to stop whenever an * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, * and -EREMOTEIO. Control endpoint queues behave the same way except * that they are not guaranteed to stop for -EREMOTEIO errors. Queues * for isochronous endpoints are treated differently, because they must * advance at fixed rates. Such queues do not stop when an URB * encounters an error or is unlinked. An unlinked isochronous URB may * leave a gap in the stream of packets; it is undefined whether such * gaps can be filled in. * * Note that early termination of an URB because a short packet was * received will generate a -EREMOTEIO error if and only if the * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device * drivers can build deep queues for large or complex bulk transfers * and clean them up reliably after any sort of aborted transfer by * unlinking all pending URBs at the first fault. * * When a control URB terminates with an error other than -EREMOTEIO, it * is quite likely that the status stage of the transfer will not take * place. */ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; if (!urb->dev) return -ENODEV; if (!urb->ep) return -EIDRM; return usb_hcd_unlink_urb(urb, -ECONNRESET); } EXPORT_SYMBOL_GPL(usb_unlink_urb); /** * usb_kill_urb - cancel a transfer request and wait for it to finish * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and available for reuse. These features make * this an ideal way to stop I/O in a disconnect() callback or close() * function. If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * While the routine is running, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_kill_urb); /** * usb_poison_urb - reliably kill a transfer and prevent further use of an URB * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and cannot be reused. These features make * this an ideal way to stop I/O in a disconnect() callback. * If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * After and while the routine runs, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); } EXPORT_SYMBOL_GPL(usb_poison_urb); void usb_unpoison_urb(struct urb *urb) { if (!urb) return; atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_unpoison_urb); /** * usb_kill_anchored_urbs - cancel transfer requests en masse * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be killed starting * from the back of the queue * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; spin_lock_irq(&anchor->lock); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* we must make sure the URB isn't freed before we kill it*/ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_kill_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } spin_unlock_irq(&anchor->lock); } EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); /** * usb_poison_anchored_urbs - cease all traffic from an anchor * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be poisoned starting * from the back of the queue. Newly added URBs will also be * poisoned * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; spin_lock_irq(&anchor->lock); anchor->poisoned = 1; while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* we must make sure the URB isn't freed before we kill it*/ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_poison_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } spin_unlock_irq(&anchor->lock); } EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); /** * usb_unpoison_anchored_urbs - let an anchor be used successfully again * @anchor: anchor the requests are bound to * * Reverses the effect of usb_poison_anchored_urbs * the anchor can be used normally after it returns */ void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) { unsigned long flags; struct urb *lazarus; spin_lock_irqsave(&anchor->lock, flags); list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { usb_unpoison_urb(lazarus); } anchor->poisoned = 0; spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); /** * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be unlinked starting * from the back of the queue. This function is asynchronous. * The unlinking is just tiggered. It may happen after this * function has returned. * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_unlink_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; while ((victim = usb_get_from_anchor(anchor)) != NULL) { usb_unlink_urb(victim); usb_put_urb(victim); } } EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); /** * usb_wait_anchor_empty_timeout - wait for an anchor to be unused * @anchor: the anchor you want to become unused * @timeout: how long you are willing to wait in milliseconds * * Call this is you want to be sure all an anchor's * URBs have finished */ int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, unsigned int timeout) { return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list), msecs_to_jiffies(timeout)); } EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); /** * usb_get_from_anchor - get an anchor's oldest urb * @anchor: the anchor whose urb you want * * this will take the oldest urb from an anchor, * unanchor and return it */ struct urb *usb_get_from_anchor(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); if (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.next, struct urb, anchor_list); usb_get_urb(victim); __usb_unanchor_urb(victim, anchor); } else { victim = NULL; } spin_unlock_irqrestore(&anchor->lock, flags); return victim; } EXPORT_SYMBOL_GPL(usb_get_from_anchor); /** * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs * @anchor: the anchor whose urbs you want to unanchor * * use this to get rid of all an anchor's urbs */ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); __usb_unanchor_urb(victim, anchor); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); /** * usb_anchor_empty - is an anchor empty * @anchor: the anchor you want to query * * returns 1 if the anchor has no urbs associated with it */ int usb_anchor_empty(struct usb_anchor *anchor) { return list_empty(&anchor->urb_list); } EXPORT_SYMBOL_GPL(usb_anchor_empty);
gpl-2.0
Infusion-OS/android_kernel_lge_gee
fs/btrfs/ctree.c
4774
115160
/* * Copyright (C) 2007,2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/slab.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "locking.h" static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level); static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *ins_key, struct btrfs_path *path, int data_size, int extend); static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, struct extent_buffer *src, int empty); static int balance_node_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst_buf, struct extent_buffer *src_buf); static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); struct btrfs_path *btrfs_alloc_path(void) { struct btrfs_path *path; path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); return path; } /* * set all locked nodes in the path to blocking locks. This should * be done before scheduling */ noinline void btrfs_set_path_blocking(struct btrfs_path *p) { int i; for (i = 0; i < BTRFS_MAX_LEVEL; i++) { if (!p->nodes[i] || !p->locks[i]) continue; btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); if (p->locks[i] == BTRFS_READ_LOCK) p->locks[i] = BTRFS_READ_LOCK_BLOCKING; else if (p->locks[i] == BTRFS_WRITE_LOCK) p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; } } /* * reset all the locked nodes in the patch to spinning locks. * * held is used to keep lockdep happy, when lockdep is enabled * we set held to a blocking lock before we go around and * retake all the spinlocks in the path. You can safely use NULL * for held */ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, struct extent_buffer *held, int held_rw) { int i; #ifdef CONFIG_DEBUG_LOCK_ALLOC /* lockdep really cares that we take all of these spinlocks * in the right order. If any of the locks in the path are not * currently blocking, it is going to complain. So, make really * really sure by forcing the path to blocking before we clear * the path blocking. */ if (held) { btrfs_set_lock_blocking_rw(held, held_rw); if (held_rw == BTRFS_WRITE_LOCK) held_rw = BTRFS_WRITE_LOCK_BLOCKING; else if (held_rw == BTRFS_READ_LOCK) held_rw = BTRFS_READ_LOCK_BLOCKING; } btrfs_set_path_blocking(p); #endif for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { if (p->nodes[i] && p->locks[i]) { btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) p->locks[i] = BTRFS_WRITE_LOCK; else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) p->locks[i] = BTRFS_READ_LOCK; } } #ifdef CONFIG_DEBUG_LOCK_ALLOC if (held) btrfs_clear_lock_blocking_rw(held, held_rw); #endif } /* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { if (!p) return; btrfs_release_path(p); kmem_cache_free(btrfs_path_cachep, p); } /* * path release drops references on the extent buffers in the path * and it drops any locks held by this path * * It is safe to call this on paths that no locks or extent buffers held. */ noinline void btrfs_release_path(struct btrfs_path *p) { int i; for (i = 0; i < BTRFS_MAX_LEVEL; i++) { p->slots[i] = 0; if (!p->nodes[i]) continue; if (p->locks[i]) { btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); p->locks[i] = 0; } free_extent_buffer(p->nodes[i]); p->nodes[i] = NULL; } } /* * safely gets a reference on the root node of a tree. A lock * is not taken, so a concurrent writer may put a different node * at the root of the tree. See btrfs_lock_root_node for the * looping required. * * The extent buffer returned by this has a reference taken, so * it won't disappear. It may stop being the root of the tree * at any time because there are no locks held. */ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { rcu_read_lock(); eb = rcu_dereference(root->node); /* * RCU really hurts here, we could free up the root node because * it was cow'ed but we may not get the new root node yet so do * the inc_not_zero dance and if it doesn't work then * synchronize_rcu and try again. */ if (atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); break; } rcu_read_unlock(); synchronize_rcu(); } return eb; } /* loop around taking references on and locking the root node of the * tree until you end up with a lock on the root. A locked buffer * is returned, with a reference held. */ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_tree_lock(eb); if (eb == root->node) break; btrfs_tree_unlock(eb); free_extent_buffer(eb); } return eb; } /* loop around taking references on and locking the root node of the * tree until you end up with a lock on the root. A locked buffer * is returned, with a reference held. */ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_tree_read_lock(eb); if (eb == root->node) break; btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } return eb; } /* cowonly root (everything not a reference counted cow subvolume), just get * put onto a simple dirty list. transaction.c walks this to make sure they * get properly updated on disk. */ static void add_root_to_dirty_list(struct btrfs_root *root) { spin_lock(&root->fs_info->trans_lock); if (root->track_dirty && list_empty(&root->dirty_list)) { list_add(&root->dirty_list, &root->fs_info->dirty_cowonly_roots); } spin_unlock(&root->fs_info->trans_lock); } /* * used by snapshot creation to make a copy of a root for a tree with * a given objectid. The buffer with the new root node is returned in * cow_ret, and this func returns zero on success or a negative error code. */ int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer **cow_ret, u64 new_root_objectid) { struct extent_buffer *cow; int ret = 0; int level; struct btrfs_disk_key disk_key; WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); level = btrfs_header_level(buf); if (level == 0) btrfs_item_key(buf, &disk_key, 0); else btrfs_node_key(buf, &disk_key, 0); cow = btrfs_alloc_free_block(trans, root, buf->len, 0, new_root_objectid, &disk_key, level, buf->start, 0, 1); if (IS_ERR(cow)) return PTR_ERR(cow); copy_extent_buffer(cow, buf, 0, 0, cow->len); btrfs_set_header_bytenr(cow, cow->start); btrfs_set_header_generation(cow, trans->transid); btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | BTRFS_HEADER_FLAG_RELOC); if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); else btrfs_set_header_owner(cow, new_root_objectid); write_extent_buffer(cow, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(cow), BTRFS_FSID_SIZE); WARN_ON(btrfs_header_generation(buf) > trans->transid); if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1, 1); else ret = btrfs_inc_ref(trans, root, cow, 0, 1); if (ret) return ret; btrfs_mark_buffer_dirty(cow); *cow_ret = cow; return 0; } /* * check if the tree block can be shared by multiple trees */ int btrfs_block_can_be_shared(struct btrfs_root *root, struct extent_buffer *buf) { /* * Tree blocks not in refernece counted trees and tree roots * are never shared. If a block was allocated after the last * snapshot and the block was not allocated by tree relocation, * we know the block is not shared. */ if (root->ref_cows && buf != root->node && buf != root->commit_root && (btrfs_header_generation(buf) <= btrfs_root_last_snapshot(&root->root_item) || btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) return 1; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (root->ref_cows && btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) return 1; #endif return 0; } static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *cow, int *last_ref) { u64 refs; u64 owner; u64 flags; u64 new_flags = 0; int ret; /* * Backrefs update rules: * * Always use full backrefs for extent pointers in tree block * allocated by tree relocation. * * If a shared tree block is no longer referenced by its owner * tree (btrfs_header_owner(buf) == root->root_key.objectid), * use full backrefs for extent pointers in tree block. * * If a tree block is been relocating * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), * use full backrefs for extent pointers in tree block. * The reason for this is some operations (such as drop tree) * are only allowed for blocks use full backrefs. */ if (btrfs_block_can_be_shared(root, buf)) { ret = btrfs_lookup_extent_info(trans, root, buf->start, buf->len, &refs, &flags); if (ret) return ret; if (refs == 0) { ret = -EROFS; btrfs_std_error(root->fs_info, ret); return ret; } } else { refs = 1; if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; else flags = 0; } owner = btrfs_header_owner(buf); BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); if (refs > 1) { if ((owner == root->root_key.objectid || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { ret = btrfs_inc_ref(trans, root, buf, 1, 1); BUG_ON(ret); /* -ENOMEM */ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { ret = btrfs_dec_ref(trans, root, buf, 0, 1); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_inc_ref(trans, root, cow, 1, 1); BUG_ON(ret); /* -ENOMEM */ } new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; } else { if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1, 1); else ret = btrfs_inc_ref(trans, root, cow, 0, 1); BUG_ON(ret); /* -ENOMEM */ } if (new_flags != 0) { ret = btrfs_set_disk_extent_flags(trans, root, buf->start, buf->len, new_flags, 0); if (ret) return ret; } } else { if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) ret = btrfs_inc_ref(trans, root, cow, 1, 1); else ret = btrfs_inc_ref(trans, root, cow, 0, 1); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_dec_ref(trans, root, buf, 1, 1); BUG_ON(ret); /* -ENOMEM */ } clean_tree_block(trans, root, buf); *last_ref = 1; } return 0; } /* * does the dirty work in cow of a single block. The parent block (if * supplied) is updated to point to the new cow copy. The new buffer is marked * dirty and returned locked. If you modify the block it needs to be marked * dirty again. * * search_start -- an allocation hint for the new block * * empty_size -- a hint that you plan on doing more cow. This is the size in * bytes the allocator should try to find free next to the block it returns. * This is just a hint and may be ignored by the allocator. */ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret, u64 search_start, u64 empty_size) { struct btrfs_disk_key disk_key; struct extent_buffer *cow; int level, ret; int last_ref = 0; int unlock_orig = 0; u64 parent_start; if (*cow_ret == buf) unlock_orig = 1; btrfs_assert_tree_locked(buf); WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); level = btrfs_header_level(buf); if (level == 0) btrfs_item_key(buf, &disk_key, 0); else btrfs_node_key(buf, &disk_key, 0); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent) parent_start = parent->start; else parent_start = 0; } else parent_start = 0; cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, root->root_key.objectid, &disk_key, level, search_start, empty_size, 1); if (IS_ERR(cow)) return PTR_ERR(cow); /* cow is set to blocking by btrfs_init_new_buffer */ copy_extent_buffer(cow, buf, 0, 0, cow->len); btrfs_set_header_bytenr(cow, cow->start); btrfs_set_header_generation(cow, trans->transid); btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | BTRFS_HEADER_FLAG_RELOC); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); else btrfs_set_header_owner(cow, root->root_key.objectid); write_extent_buffer(cow, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(cow), BTRFS_FSID_SIZE); ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); if (ret) { btrfs_abort_transaction(trans, root, ret); return ret; } if (root->ref_cows) btrfs_reloc_cow_block(trans, root, buf, cow); if (buf == root->node) { WARN_ON(parent && parent != buf); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) parent_start = buf->start; else parent_start = 0; extent_buffer_get(cow); rcu_assign_pointer(root->node, cow); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref, 1); free_extent_buffer(buf); add_root_to_dirty_list(root); } else { if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) parent_start = parent->start; else parent_start = 0; WARN_ON(trans->transid != btrfs_header_generation(parent)); btrfs_set_node_blockptr(parent, parent_slot, cow->start); btrfs_set_node_ptr_generation(parent, parent_slot, trans->transid); btrfs_mark_buffer_dirty(parent); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref, 1); } if (unlock_orig) btrfs_tree_unlock(buf); free_extent_buffer_stale(buf); btrfs_mark_buffer_dirty(cow); *cow_ret = cow; return 0; } static inline int should_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf) { /* ensure we can see the force_cow */ smp_rmb(); /* * We do not need to cow a block if * 1) this block is not created or changed in this transaction; * 2) this block does not belong to TREE_RELOC tree; * 3) the root is not forced COW. * * What is forced COW: * when we create snapshot during commiting the transaction, * after we've finished coping src root, we must COW the shared * block to ensure the metadata consistency. */ if (btrfs_header_generation(buf) == trans->transid && !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && !root->force_cow) return 0; return 1; } /* * cows a single block, see __btrfs_cow_block for the real work. * This version of it has extra checks so that a block isn't cow'd more than * once per transaction, as long as it hasn't been written yet */ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret) { u64 search_start; int ret; if (trans->transaction != root->fs_info->running_transaction) { printk(KERN_CRIT "trans %llu running %llu\n", (unsigned long long)trans->transid, (unsigned long long) root->fs_info->running_transaction->transid); WARN_ON(1); } if (trans->transid != root->fs_info->generation) { printk(KERN_CRIT "trans %llu running %llu\n", (unsigned long long)trans->transid, (unsigned long long)root->fs_info->generation); WARN_ON(1); } if (!should_cow_block(trans, root, buf)) { *cow_ret = buf; return 0; } search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); if (parent) btrfs_set_lock_blocking(parent); btrfs_set_lock_blocking(buf); ret = __btrfs_cow_block(trans, root, buf, parent, parent_slot, cow_ret, search_start, 0); trace_btrfs_cow_block(root, buf, *cow_ret); return ret; } /* * helper function for defrag to decide if two blocks pointed to by a * node are actually close by */ static int close_blocks(u64 blocknr, u64 other, u32 blocksize) { if (blocknr < other && other - (blocknr + blocksize) < 32768) return 1; if (blocknr > other && blocknr - (other + blocksize) < 32768) return 1; return 0; } /* * compare two keys in a memcmp fashion */ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) { struct btrfs_key k1; btrfs_disk_key_to_cpu(&k1, disk); return btrfs_comp_cpu_keys(&k1, k2); } /* * same as comp_keys only with two btrfs_key's */ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) { if (k1->objectid > k2->objectid) return 1; if (k1->objectid < k2->objectid) return -1; if (k1->type > k2->type) return 1; if (k1->type < k2->type) return -1; if (k1->offset > k2->offset) return 1; if (k1->offset < k2->offset) return -1; return 0; } /* * this is used by the defrag code to go through all the * leaves pointed to by a node and reallocate them so that * disk order is close to key order */ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, int cache_only, u64 *last_ret, struct btrfs_key *progress) { struct extent_buffer *cur; u64 blocknr; u64 gen; u64 search_start = *last_ret; u64 last_block = 0; u64 other; u32 parent_nritems; int end_slot; int i; int err = 0; int parent_level; int uptodate; u32 blocksize; int progress_passed = 0; struct btrfs_disk_key disk_key; parent_level = btrfs_header_level(parent); if (cache_only && parent_level != 1) return 0; if (trans->transaction != root->fs_info->running_transaction) WARN_ON(1); if (trans->transid != root->fs_info->generation) WARN_ON(1); parent_nritems = btrfs_header_nritems(parent); blocksize = btrfs_level_size(root, parent_level - 1); end_slot = parent_nritems; if (parent_nritems == 1) return 0; btrfs_set_lock_blocking(parent); for (i = start_slot; i < end_slot; i++) { int close = 1; btrfs_node_key(parent, &disk_key, i); if (!progress_passed && comp_keys(&disk_key, progress) < 0) continue; progress_passed = 1; blocknr = btrfs_node_blockptr(parent, i); gen = btrfs_node_ptr_generation(parent, i); if (last_block == 0) last_block = blocknr; if (i > 0) { other = btrfs_node_blockptr(parent, i - 1); close = close_blocks(blocknr, other, blocksize); } if (!close && i < end_slot - 2) { other = btrfs_node_blockptr(parent, i + 1); close = close_blocks(blocknr, other, blocksize); } if (close) { last_block = blocknr; continue; } cur = btrfs_find_tree_block(root, blocknr, blocksize); if (cur) uptodate = btrfs_buffer_uptodate(cur, gen, 0); else uptodate = 0; if (!cur || !uptodate) { if (cache_only) { free_extent_buffer(cur); continue; } if (!cur) { cur = read_tree_block(root, blocknr, blocksize, gen); if (!cur) return -EIO; } else if (!uptodate) { btrfs_read_buffer(cur, gen); } } if (search_start == 0) search_start = last_block; btrfs_tree_lock(cur); btrfs_set_lock_blocking(cur); err = __btrfs_cow_block(trans, root, cur, parent, i, &cur, search_start, min(16 * blocksize, (end_slot - i) * blocksize)); if (err) { btrfs_tree_unlock(cur); free_extent_buffer(cur); break; } search_start = cur->start; last_block = cur->start; *last_ret = search_start; btrfs_tree_unlock(cur); free_extent_buffer(cur); } return err; } /* * The leaf data grows from end-to-front in the node. * this returns the address of the start of the last item, * which is the stop of the leaf data stack */ static inline unsigned int leaf_data_end(struct btrfs_root *root, struct extent_buffer *leaf) { u32 nr = btrfs_header_nritems(leaf); if (nr == 0) return BTRFS_LEAF_DATA_SIZE(root); return btrfs_item_offset_nr(leaf, nr - 1); } /* * search for key in the extent_buffer. The items start at offset p, * and they are item_size apart. There are 'max' items in p. * * the slot in the array is returned via slot, and it points to * the place where you would insert key if it is not found in * the array. * * slot may point to max if the key is bigger than all of the keys */ static noinline int generic_bin_search(struct extent_buffer *eb, unsigned long p, int item_size, struct btrfs_key *key, int max, int *slot) { int low = 0; int high = max; int mid; int ret; struct btrfs_disk_key *tmp = NULL; struct btrfs_disk_key unaligned; unsigned long offset; char *kaddr = NULL; unsigned long map_start = 0; unsigned long map_len = 0; int err; while (low < high) { mid = (low + high) / 2; offset = p + mid * item_size; if (!kaddr || offset < map_start || (offset + sizeof(struct btrfs_disk_key)) > map_start + map_len) { err = map_private_extent_buffer(eb, offset, sizeof(struct btrfs_disk_key), &kaddr, &map_start, &map_len); if (!err) { tmp = (struct btrfs_disk_key *)(kaddr + offset - map_start); } else { read_extent_buffer(eb, &unaligned, offset, sizeof(unaligned)); tmp = &unaligned; } } else { tmp = (struct btrfs_disk_key *)(kaddr + offset - map_start); } ret = comp_keys(tmp, key); if (ret < 0) low = mid + 1; else if (ret > 0) high = mid; else { *slot = mid; return 0; } } *slot = low; return 1; } /* * simple bin_search frontend that does the right thing for * leaves vs nodes */ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot) { if (level == 0) { return generic_bin_search(eb, offsetof(struct btrfs_leaf, items), sizeof(struct btrfs_item), key, btrfs_header_nritems(eb), slot); } else { return generic_bin_search(eb, offsetof(struct btrfs_node, ptrs), sizeof(struct btrfs_key_ptr), key, btrfs_header_nritems(eb), slot); } return -1; } int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot) { return bin_search(eb, key, level, slot); } static void root_add_used(struct btrfs_root *root, u32 size) { spin_lock(&root->accounting_lock); btrfs_set_root_used(&root->root_item, btrfs_root_used(&root->root_item) + size); spin_unlock(&root->accounting_lock); } static void root_sub_used(struct btrfs_root *root, u32 size) { spin_lock(&root->accounting_lock); btrfs_set_root_used(&root->root_item, btrfs_root_used(&root->root_item) - size); spin_unlock(&root->accounting_lock); } /* given a node and slot number, this reads the blocks it points to. The * extent buffer is returned with a reference taken (but unlocked). * NULL is returned on error. */ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, struct extent_buffer *parent, int slot) { int level = btrfs_header_level(parent); if (slot < 0) return NULL; if (slot >= btrfs_header_nritems(parent)) return NULL; BUG_ON(level == 0); return read_tree_block(root, btrfs_node_blockptr(parent, slot), btrfs_level_size(root, level - 1), btrfs_node_ptr_generation(parent, slot)); } /* * node level balancing, used to make sure nodes are in proper order for * item deletion. We balance from the top down, so we have to make sure * that a deletion won't leave an node completely empty later on. */ static noinline int balance_level(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { struct extent_buffer *right = NULL; struct extent_buffer *mid; struct extent_buffer *left = NULL; struct extent_buffer *parent = NULL; int ret = 0; int wret; int pslot; int orig_slot = path->slots[level]; u64 orig_ptr; if (level == 0) return 0; mid = path->nodes[level]; WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); WARN_ON(btrfs_header_generation(mid) != trans->transid); orig_ptr = btrfs_node_blockptr(mid, orig_slot); if (level < BTRFS_MAX_LEVEL - 1) { parent = path->nodes[level + 1]; pslot = path->slots[level + 1]; } /* * deal with the case where there is only one pointer in the root * by promoting the node below to a root */ if (!parent) { struct extent_buffer *child; if (btrfs_header_nritems(mid) != 1) return 0; /* promote the child to a root */ child = read_node_slot(root, mid, 0); if (!child) { ret = -EROFS; btrfs_std_error(root->fs_info, ret); goto enospc; } btrfs_tree_lock(child); btrfs_set_lock_blocking(child); ret = btrfs_cow_block(trans, root, child, mid, 0, &child); if (ret) { btrfs_tree_unlock(child); free_extent_buffer(child); goto enospc; } rcu_assign_pointer(root->node, child); add_root_to_dirty_list(root); btrfs_tree_unlock(child); path->locks[level] = 0; path->nodes[level] = NULL; clean_tree_block(trans, root, mid); btrfs_tree_unlock(mid); /* once for the path */ free_extent_buffer(mid); root_sub_used(root, mid->len); btrfs_free_tree_block(trans, root, mid, 0, 1, 0); /* once for the root ptr */ free_extent_buffer_stale(mid); return 0; } if (btrfs_header_nritems(mid) > BTRFS_NODEPTRS_PER_BLOCK(root) / 4) return 0; btrfs_header_nritems(mid); left = read_node_slot(root, parent, pslot - 1); if (left) { btrfs_tree_lock(left); btrfs_set_lock_blocking(left); wret = btrfs_cow_block(trans, root, left, parent, pslot - 1, &left); if (wret) { ret = wret; goto enospc; } } right = read_node_slot(root, parent, pslot + 1); if (right) { btrfs_tree_lock(right); btrfs_set_lock_blocking(right); wret = btrfs_cow_block(trans, root, right, parent, pslot + 1, &right); if (wret) { ret = wret; goto enospc; } } /* first, try to make some room in the middle buffer */ if (left) { orig_slot += btrfs_header_nritems(left); wret = push_node_left(trans, root, left, mid, 1); if (wret < 0) ret = wret; btrfs_header_nritems(mid); } /* * then try to empty the right most buffer into the middle */ if (right) { wret = push_node_left(trans, root, mid, right, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (btrfs_header_nritems(right) == 0) { clean_tree_block(trans, root, right); btrfs_tree_unlock(right); del_ptr(trans, root, path, level + 1, pslot + 1); root_sub_used(root, right->len); btrfs_free_tree_block(trans, root, right, 0, 1, 0); free_extent_buffer_stale(right); right = NULL; } else { struct btrfs_disk_key right_key; btrfs_node_key(right, &right_key, 0); btrfs_set_node_key(parent, &right_key, pslot + 1); btrfs_mark_buffer_dirty(parent); } } if (btrfs_header_nritems(mid) == 1) { /* * we're not allowed to leave a node with one item in the * tree during a delete. A deletion from lower in the tree * could try to delete the only pointer in this node. * So, pull some keys from the left. * There has to be a left pointer at this point because * otherwise we would have pulled some pointers from the * right */ if (!left) { ret = -EROFS; btrfs_std_error(root->fs_info, ret); goto enospc; } wret = balance_node_right(trans, root, mid, left); if (wret < 0) { ret = wret; goto enospc; } if (wret == 1) { wret = push_node_left(trans, root, left, mid, 1); if (wret < 0) ret = wret; } BUG_ON(wret == 1); } if (btrfs_header_nritems(mid) == 0) { clean_tree_block(trans, root, mid); btrfs_tree_unlock(mid); del_ptr(trans, root, path, level + 1, pslot); root_sub_used(root, mid->len); btrfs_free_tree_block(trans, root, mid, 0, 1, 0); free_extent_buffer_stale(mid); mid = NULL; } else { /* update the parent key to reflect our changes */ struct btrfs_disk_key mid_key; btrfs_node_key(mid, &mid_key, 0); btrfs_set_node_key(parent, &mid_key, pslot); btrfs_mark_buffer_dirty(parent); } /* update the path */ if (left) { if (btrfs_header_nritems(left) > orig_slot) { extent_buffer_get(left); /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; if (mid) { btrfs_tree_unlock(mid); free_extent_buffer(mid); } } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; } } /* double check we haven't messed things up */ if (orig_ptr != btrfs_node_blockptr(path->nodes[level], path->slots[level])) BUG(); enospc: if (right) { btrfs_tree_unlock(right); free_extent_buffer(right); } if (left) { if (path->nodes[level] != left) btrfs_tree_unlock(left); free_extent_buffer(left); } return ret; } /* Node balancing for insertion. Here we only split or push nodes around * when they are completely full. This is also done top down, so we * have to be pessimistic. */ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { struct extent_buffer *right = NULL; struct extent_buffer *mid; struct extent_buffer *left = NULL; struct extent_buffer *parent = NULL; int ret = 0; int wret; int pslot; int orig_slot = path->slots[level]; if (level == 0) return 1; mid = path->nodes[level]; WARN_ON(btrfs_header_generation(mid) != trans->transid); if (level < BTRFS_MAX_LEVEL - 1) { parent = path->nodes[level + 1]; pslot = path->slots[level + 1]; } if (!parent) return 1; left = read_node_slot(root, parent, pslot - 1); /* first, try to make some room in the middle buffer */ if (left) { u32 left_nr; btrfs_tree_lock(left); btrfs_set_lock_blocking(left); left_nr = btrfs_header_nritems(left); if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; } else { ret = btrfs_cow_block(trans, root, left, parent, pslot - 1, &left); if (ret) wret = 1; else { wret = push_node_left(trans, root, left, mid, 0); } } if (wret < 0) ret = wret; if (wret == 0) { struct btrfs_disk_key disk_key; orig_slot += left_nr; btrfs_node_key(mid, &disk_key, 0); btrfs_set_node_key(parent, &disk_key, pslot); btrfs_mark_buffer_dirty(parent); if (btrfs_header_nritems(left) > orig_slot) { path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; btrfs_tree_unlock(left); free_extent_buffer(left); } return 0; } btrfs_tree_unlock(left); free_extent_buffer(left); } right = read_node_slot(root, parent, pslot + 1); /* * then try to empty the right most buffer into the middle */ if (right) { u32 right_nr; btrfs_tree_lock(right); btrfs_set_lock_blocking(right); right_nr = btrfs_header_nritems(right); if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; } else { ret = btrfs_cow_block(trans, root, right, parent, pslot + 1, &right); if (ret) wret = 1; else { wret = balance_node_right(trans, root, right, mid); } } if (wret < 0) ret = wret; if (wret == 0) { struct btrfs_disk_key disk_key; btrfs_node_key(right, &disk_key, 0); btrfs_set_node_key(parent, &disk_key, pslot + 1); btrfs_mark_buffer_dirty(parent); if (btrfs_header_nritems(mid) <= orig_slot) { path->nodes[level] = right; path->slots[level + 1] += 1; path->slots[level] = orig_slot - btrfs_header_nritems(mid); btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; } btrfs_tree_unlock(right); free_extent_buffer(right); } return 1; } /* * readahead one full node of leaves, finding things that are close * to the block in 'slot', and triggering ra on them. */ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, int level, int slot, u64 objectid) { struct extent_buffer *node; struct btrfs_disk_key disk_key; u32 nritems; u64 search; u64 target; u64 nread = 0; u64 gen; int direction = path->reada; struct extent_buffer *eb; u32 nr; u32 blocksize; u32 nscan = 0; if (level != 1) return; if (!path->nodes[level]) return; node = path->nodes[level]; search = btrfs_node_blockptr(node, slot); blocksize = btrfs_level_size(root, level - 1); eb = btrfs_find_tree_block(root, search, blocksize); if (eb) { free_extent_buffer(eb); return; } target = search; nritems = btrfs_header_nritems(node); nr = slot; while (1) { if (direction < 0) { if (nr == 0) break; nr--; } else if (direction > 0) { nr++; if (nr >= nritems) break; } if (path->reada < 0 && objectid) { btrfs_node_key(node, &disk_key, nr); if (btrfs_disk_key_objectid(&disk_key) != objectid) break; } search = btrfs_node_blockptr(node, nr); if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); readahead_tree_block(root, search, blocksize, gen); nread += blocksize; } nscan++; if ((nread > 65536 || nscan > 32)) break; } } /* * returns -EAGAIN if it had to drop the path, or zero if everything was in * cache */ static noinline int reada_for_balance(struct btrfs_root *root, struct btrfs_path *path, int level) { int slot; int nritems; struct extent_buffer *parent; struct extent_buffer *eb; u64 gen; u64 block1 = 0; u64 block2 = 0; int ret = 0; int blocksize; parent = path->nodes[level + 1]; if (!parent) return 0; nritems = btrfs_header_nritems(parent); slot = path->slots[level + 1]; blocksize = btrfs_level_size(root, level); if (slot > 0) { block1 = btrfs_node_blockptr(parent, slot - 1); gen = btrfs_node_ptr_generation(parent, slot - 1); eb = btrfs_find_tree_block(root, block1, blocksize); /* * if we get -eagain from btrfs_buffer_uptodate, we * don't want to return eagain here. That will loop * forever */ if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) block1 = 0; free_extent_buffer(eb); } if (slot + 1 < nritems) { block2 = btrfs_node_blockptr(parent, slot + 1); gen = btrfs_node_ptr_generation(parent, slot + 1); eb = btrfs_find_tree_block(root, block2, blocksize); if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) block2 = 0; free_extent_buffer(eb); } if (block1 || block2) { ret = -EAGAIN; /* release the whole path */ btrfs_release_path(path); /* read the blocks */ if (block1) readahead_tree_block(root, block1, blocksize, 0); if (block2) readahead_tree_block(root, block2, blocksize, 0); if (block1) { eb = read_tree_block(root, block1, blocksize, 0); free_extent_buffer(eb); } if (block2) { eb = read_tree_block(root, block2, blocksize, 0); free_extent_buffer(eb); } } return ret; } /* * when we walk down the tree, it is usually safe to unlock the higher layers * in the tree. The exceptions are when our path goes through slot 0, because * operations on the tree might require changing key pointers higher up in the * tree. * * callers might also have set path->keep_locks, which tells this code to keep * the lock if the path points to the last slot in the block. This is part of * walking through the tree, and selecting the next slot in the higher block. * * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so * if lowest_unlock is 1, level 0 won't be unlocked */ static noinline void unlock_up(struct btrfs_path *path, int level, int lowest_unlock, int min_write_lock_level, int *write_lock_level) { int i; int skip_level = level; int no_skips = 0; struct extent_buffer *t; for (i = level; i < BTRFS_MAX_LEVEL; i++) { if (!path->nodes[i]) break; if (!path->locks[i]) break; if (!no_skips && path->slots[i] == 0) { skip_level = i + 1; continue; } if (!no_skips && path->keep_locks) { u32 nritems; t = path->nodes[i]; nritems = btrfs_header_nritems(t); if (nritems < 1 || path->slots[i] >= nritems - 1) { skip_level = i + 1; continue; } } if (skip_level < i && i >= lowest_unlock) no_skips = 1; t = path->nodes[i]; if (i >= lowest_unlock && i > skip_level && path->locks[i]) { btrfs_tree_unlock_rw(t, path->locks[i]); path->locks[i] = 0; if (write_lock_level && i > min_write_lock_level && i <= *write_lock_level) { *write_lock_level = i - 1; } } } } /* * This releases any locks held in the path starting at level and * going all the way up to the root. * * btrfs_search_slot will keep the lock held on higher nodes in a few * corner cases, such as COW of the block at slot zero in the node. This * ignores those rules, and it should only be called when there are no * more updates to be done higher up in the tree. */ noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) { int i; if (path->keep_locks) return; for (i = level; i < BTRFS_MAX_LEVEL; i++) { if (!path->nodes[i]) continue; if (!path->locks[i]) continue; btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); path->locks[i] = 0; } } /* * helper function for btrfs_search_slot. The goal is to find a block * in cache without setting the path to blocking. If we find the block * we return zero and the path is unchanged. * * If we can't find the block, we set the path blocking and do some * reada. -EAGAIN is returned and the search must be repeated. */ static int read_block_for_search(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *p, struct extent_buffer **eb_ret, int level, int slot, struct btrfs_key *key) { u64 blocknr; u64 gen; u32 blocksize; struct extent_buffer *b = *eb_ret; struct extent_buffer *tmp; int ret; blocknr = btrfs_node_blockptr(b, slot); gen = btrfs_node_ptr_generation(b, slot); blocksize = btrfs_level_size(root, level - 1); tmp = btrfs_find_tree_block(root, blocknr, blocksize); if (tmp) { /* first we do an atomic uptodate check */ if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) { if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { /* * we found an up to date block without * sleeping, return * right away */ *eb_ret = tmp; return 0; } /* the pages were up to date, but we failed * the generation number check. Do a full * read for the generation number that is correct. * We must do this without dropping locks so * we can trust our generation number */ free_extent_buffer(tmp); btrfs_set_path_blocking(p); /* now we're allowed to do a blocking uptodate check */ tmp = read_tree_block(root, blocknr, blocksize, gen); if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) { *eb_ret = tmp; return 0; } free_extent_buffer(tmp); btrfs_release_path(p); return -EIO; } } /* * reduce lock contention at high levels * of the btree by dropping locks before * we read. Don't release the lock on the current * level because we need to walk this node to figure * out which blocks to read. */ btrfs_unlock_up_safe(p, level + 1); btrfs_set_path_blocking(p); free_extent_buffer(tmp); if (p->reada) reada_for_search(root, p, level, slot, key->objectid); btrfs_release_path(p); ret = -EAGAIN; tmp = read_tree_block(root, blocknr, blocksize, 0); if (tmp) { /* * If the read above didn't mark this buffer up to date, * it will never end up being up to date. Set ret to EIO now * and give up so that our caller doesn't loop forever * on our EAGAINs. */ if (!btrfs_buffer_uptodate(tmp, 0, 0)) ret = -EIO; free_extent_buffer(tmp); } return ret; } /* * helper function for btrfs_search_slot. This does all of the checks * for node-level blocks and does any balancing required based on * the ins_len. * * If no extra work was required, zero is returned. If we had to * drop the path, -EAGAIN is returned and btrfs_search_slot must * start over */ static int setup_nodes_for_search(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *p, struct extent_buffer *b, int level, int ins_len, int *write_lock_level) { int ret; if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { int sret; if (*write_lock_level < level + 1) { *write_lock_level = level + 1; btrfs_release_path(p); goto again; } sret = reada_for_balance(root, p, level); if (sret) goto again; btrfs_set_path_blocking(p); sret = split_node(trans, root, p, level); btrfs_clear_path_blocking(p, NULL, 0); BUG_ON(sret > 0); if (sret) { ret = sret; goto done; } b = p->nodes[level]; } else if (ins_len < 0 && btrfs_header_nritems(b) < BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { int sret; if (*write_lock_level < level + 1) { *write_lock_level = level + 1; btrfs_release_path(p); goto again; } sret = reada_for_balance(root, p, level); if (sret) goto again; btrfs_set_path_blocking(p); sret = balance_level(trans, root, p, level); btrfs_clear_path_blocking(p, NULL, 0); if (sret) { ret = sret; goto done; } b = p->nodes[level]; if (!b) { btrfs_release_path(p); goto again; } BUG_ON(btrfs_header_nritems(b) == 1); } return 0; again: ret = -EAGAIN; done: return ret; } /* * look for key in the tree. path is filled in with nodes along the way * if key is found, we return zero and you can find the item in the leaf * level of the path (level 0) * * If the key isn't found, the path points to the slot where it should * be inserted, and 1 is returned. If there are other errors during the * search a negative error number is returned. * * if ins_len > 0, nodes and leaves will be split as we walk down the * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if * possible) */ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_path *p, int ins_len, int cow) { struct extent_buffer *b; int slot; int ret; int err; int level; int lowest_unlock = 1; int root_lock; /* everything at write_lock_level or lower must be write locked */ int write_lock_level = 0; u8 lowest_level = 0; int min_write_lock_level; lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len > 0); WARN_ON(p->nodes[0] != NULL); if (ins_len < 0) { lowest_unlock = 2; /* when we are removing items, we might have to go up to level * two as we update tree pointers Make sure we keep write * for those levels as well */ write_lock_level = 2; } else if (ins_len > 0) { /* * for inserting items, make sure we have a write lock on * level 1 so we can update keys */ write_lock_level = 1; } if (!cow) write_lock_level = -1; if (cow && (p->keep_locks || p->lowest_level)) write_lock_level = BTRFS_MAX_LEVEL; min_write_lock_level = write_lock_level; again: /* * we try very hard to do read locks on the root */ root_lock = BTRFS_READ_LOCK; level = 0; if (p->search_commit_root) { /* * the commit roots are read only * so we always do read locks */ b = root->commit_root; extent_buffer_get(b); level = btrfs_header_level(b); if (!p->skip_locking) btrfs_tree_read_lock(b); } else { if (p->skip_locking) { b = btrfs_root_node(root); level = btrfs_header_level(b); } else { /* we don't know the level of the root node * until we actually have it read locked */ b = btrfs_read_lock_root_node(root); level = btrfs_header_level(b); if (level <= write_lock_level) { /* whoops, must trade for write lock */ btrfs_tree_read_unlock(b); free_extent_buffer(b); b = btrfs_lock_root_node(root); root_lock = BTRFS_WRITE_LOCK; /* the level might have changed, check again */ level = btrfs_header_level(b); } } } p->nodes[level] = b; if (!p->skip_locking) p->locks[level] = root_lock; while (b) { level = btrfs_header_level(b); /* * setup the path here so we can release it under lock * contention with the cow code */ if (cow) { /* * if we don't really need to cow this block * then we don't want to set the path blocking, * so we test it here */ if (!should_cow_block(trans, root, b)) goto cow_done; btrfs_set_path_blocking(p); /* * must have write locks on this node and the * parent */ if (level + 1 > write_lock_level) { write_lock_level = level + 1; btrfs_release_path(p); goto again; } err = btrfs_cow_block(trans, root, b, p->nodes[level + 1], p->slots[level + 1], &b); if (err) { ret = err; goto done; } } cow_done: BUG_ON(!cow && ins_len); p->nodes[level] = b; btrfs_clear_path_blocking(p, NULL, 0); /* * we have a lock on b and as long as we aren't changing * the tree, there is no way to for the items in b to change. * It is safe to drop the lock on our parent before we * go through the expensive btree search on b. * * If cow is true, then we might be changing slot zero, * which may require changing the parent. So, we can't * drop the lock until after we know which slot we're * operating on. */ if (!cow) btrfs_unlock_up_safe(p, level + 1); ret = bin_search(b, key, level, &slot); if (level != 0) { int dec = 0; if (ret && slot > 0) { dec = 1; slot -= 1; } p->slots[level] = slot; err = setup_nodes_for_search(trans, root, p, b, level, ins_len, &write_lock_level); if (err == -EAGAIN) goto again; if (err) { ret = err; goto done; } b = p->nodes[level]; slot = p->slots[level]; /* * slot 0 is special, if we change the key * we have to update the parent pointer * which means we must have a write lock * on the parent */ if (slot == 0 && cow && write_lock_level < level + 1) { write_lock_level = level + 1; btrfs_release_path(p); goto again; } unlock_up(p, level, lowest_unlock, min_write_lock_level, &write_lock_level); if (level == lowest_level) { if (dec) p->slots[level]++; goto done; } err = read_block_for_search(trans, root, p, &b, level, slot, key); if (err == -EAGAIN) goto again; if (err) { ret = err; goto done; } if (!p->skip_locking) { level = btrfs_header_level(b); if (level <= write_lock_level) { err = btrfs_try_tree_write_lock(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_lock(b); btrfs_clear_path_blocking(p, b, BTRFS_WRITE_LOCK); } p->locks[level] = BTRFS_WRITE_LOCK; } else { err = btrfs_try_tree_read_lock(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); btrfs_clear_path_blocking(p, b, BTRFS_READ_LOCK); } p->locks[level] = BTRFS_READ_LOCK; } p->nodes[level] = b; } } else { p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(root, b) < ins_len) { if (write_lock_level < 1) { write_lock_level = 1; btrfs_release_path(p); goto again; } btrfs_set_path_blocking(p); err = split_leaf(trans, root, key, p, ins_len, ret == 0); btrfs_clear_path_blocking(p, NULL, 0); BUG_ON(err > 0); if (err) { ret = err; goto done; } } if (!p->search_for_split) unlock_up(p, level, lowest_unlock, min_write_lock_level, &write_lock_level); goto done; } } ret = 1; done: /* * we don't really know what they plan on doing with the path * from here on, so for now just mark it as blocking */ if (!p->leave_spinning) btrfs_set_path_blocking(p); if (ret < 0) btrfs_release_path(p); return ret; } /* * adjust the pointers going up the tree, starting at level * making sure the right key of each node is points to 'key'. * This is used after shifting pointers to the left, so it stops * fixing up pointers when a given leaf/node is not in slot 0 of the * higher levels * */ static void fixup_low_keys(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_disk_key *key, int level) { int i; struct extent_buffer *t; for (i = level; i < BTRFS_MAX_LEVEL; i++) { int tslot = path->slots[i]; if (!path->nodes[i]) break; t = path->nodes[i]; btrfs_set_node_key(t, key, tslot); btrfs_mark_buffer_dirty(path->nodes[i]); if (tslot != 0) break; } } /* * update item key. * * This function isn't completely safe. It's the caller's responsibility * that the new key won't break the order */ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key) { struct btrfs_disk_key disk_key; struct extent_buffer *eb; int slot; eb = path->nodes[0]; slot = path->slots[0]; if (slot > 0) { btrfs_item_key(eb, &disk_key, slot - 1); BUG_ON(comp_keys(&disk_key, new_key) >= 0); } if (slot < btrfs_header_nritems(eb) - 1) { btrfs_item_key(eb, &disk_key, slot + 1); BUG_ON(comp_keys(&disk_key, new_key) <= 0); } btrfs_cpu_key_to_disk(&disk_key, new_key); btrfs_set_item_key(eb, &disk_key, slot); btrfs_mark_buffer_dirty(eb); if (slot == 0) fixup_low_keys(trans, root, path, &disk_key, 1); } /* * try to push data from one node into the next node left in the * tree. * * returns 0 if some ptrs were pushed left, < 0 if there was some horrible * error, and > 0 if there was no room in the left hand block. */ static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, struct extent_buffer *src, int empty) { int push_items = 0; int src_nritems; int dst_nritems; int ret = 0; src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; WARN_ON(btrfs_header_generation(src) != trans->transid); WARN_ON(btrfs_header_generation(dst) != trans->transid); if (!empty && src_nritems <= 8) return 1; if (push_items <= 0) return 1; if (empty) { push_items = min(src_nritems, push_items); if (push_items < src_nritems) { /* leave at least 8 pointers in the node if * we aren't going to empty it */ if (src_nritems - push_items < 8) { if (push_items <= 8) return 1; push_items -= 8; } } } else push_items = min(src_nritems - 8, push_items); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), btrfs_node_key_ptr_offset(0), push_items * sizeof(struct btrfs_key_ptr)); if (push_items < src_nritems) { memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(push_items), (src_nritems - push_items) * sizeof(struct btrfs_key_ptr)); } btrfs_set_header_nritems(src, src_nritems - push_items); btrfs_set_header_nritems(dst, dst_nritems + push_items); btrfs_mark_buffer_dirty(src); btrfs_mark_buffer_dirty(dst); return ret; } /* * try to push data from one node into the next node right in the * tree. * * returns 0 if some ptrs were pushed, < 0 if there was some horrible * error, and > 0 if there was no room in the right hand block. * * this will only push up to 1/2 the contents of the left node over */ static int balance_node_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, struct extent_buffer *src) { int push_items = 0; int max_push; int src_nritems; int dst_nritems; int ret = 0; WARN_ON(btrfs_header_generation(src) != trans->transid); WARN_ON(btrfs_header_generation(dst) != trans->transid); src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; if (push_items <= 0) return 1; if (src_nritems < 4) return 1; max_push = src_nritems / 2 + 1; /* don't try to empty the node */ if (max_push >= src_nritems) return 1; if (max_push < push_items) push_items = max_push; memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), btrfs_node_key_ptr_offset(0), (dst_nritems) * sizeof(struct btrfs_key_ptr)); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(src_nritems - push_items), push_items * sizeof(struct btrfs_key_ptr)); btrfs_set_header_nritems(src, src_nritems - push_items); btrfs_set_header_nritems(dst, dst_nritems + push_items); btrfs_mark_buffer_dirty(src); btrfs_mark_buffer_dirty(dst); return ret; } /* * helper function to insert a new root level in the tree. * A new node is allocated, and a single item is inserted to * point to the existing root * * returns zero on success or < 0 on failure. */ static noinline int insert_new_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { u64 lower_gen; struct extent_buffer *lower; struct extent_buffer *c; struct extent_buffer *old; struct btrfs_disk_key lower_key; BUG_ON(path->nodes[level]); BUG_ON(path->nodes[level-1] != root->node); lower = path->nodes[level-1]; if (level == 1) btrfs_item_key(lower, &lower_key, 0); else btrfs_node_key(lower, &lower_key, 0); c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, root->root_key.objectid, &lower_key, level, root->node->start, 0, 0); if (IS_ERR(c)) return PTR_ERR(c); root_add_used(root, root->nodesize); memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_nritems(c, 1); btrfs_set_header_level(c, level); btrfs_set_header_bytenr(c, c->start); btrfs_set_header_generation(c, trans->transid); btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(c, root->root_key.objectid); write_extent_buffer(c, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(c), BTRFS_FSID_SIZE); write_extent_buffer(c, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE); btrfs_set_node_key(c, &lower_key, 0); btrfs_set_node_blockptr(c, 0, lower->start); lower_gen = btrfs_header_generation(lower); WARN_ON(lower_gen != trans->transid); btrfs_set_node_ptr_generation(c, 0, lower_gen); btrfs_mark_buffer_dirty(c); old = root->node; rcu_assign_pointer(root->node, c); /* the super has an extra ref to root->node */ free_extent_buffer(old); add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; path->locks[level] = BTRFS_WRITE_LOCK; path->slots[level] = 0; return 0; } /* * worker function to insert a single pointer in a node. * the node should have enough room for the pointer already * * slot and level indicate where you want the key to go, and * blocknr is the block the key points to. */ static void insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_disk_key *key, u64 bytenr, int slot, int level) { struct extent_buffer *lower; int nritems; BUG_ON(!path->nodes[level]); btrfs_assert_tree_locked(path->nodes[level]); lower = path->nodes[level]; nritems = btrfs_header_nritems(lower); BUG_ON(slot > nritems); BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); if (slot != nritems) { memmove_extent_buffer(lower, btrfs_node_key_ptr_offset(slot + 1), btrfs_node_key_ptr_offset(slot), (nritems - slot) * sizeof(struct btrfs_key_ptr)); } btrfs_set_node_key(lower, key, slot); btrfs_set_node_blockptr(lower, slot, bytenr); WARN_ON(trans->transid == 0); btrfs_set_node_ptr_generation(lower, slot, trans->transid); btrfs_set_header_nritems(lower, nritems + 1); btrfs_mark_buffer_dirty(lower); } /* * split the node at the specified level in path in two. * The path is corrected to point to the appropriate node after the split * * Before splitting this tries to make some room in the node by pushing * left and right, if either one works, it returns right away. * * returns 0 on success and < 0 on failure */ static noinline int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { struct extent_buffer *c; struct extent_buffer *split; struct btrfs_disk_key disk_key; int mid; int ret; u32 c_nritems; c = path->nodes[level]; WARN_ON(btrfs_header_generation(c) != trans->transid); if (c == root->node) { /* trying to split the root, lets make a new one */ ret = insert_new_root(trans, root, path, level + 1); if (ret) return ret; } else { ret = push_nodes_for_insert(trans, root, path, level); c = path->nodes[level]; if (!ret && btrfs_header_nritems(c) < BTRFS_NODEPTRS_PER_BLOCK(root) - 3) return 0; if (ret < 0) return ret; } c_nritems = btrfs_header_nritems(c); mid = (c_nritems + 1) / 2; btrfs_node_key(c, &disk_key, mid); split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, root->root_key.objectid, &disk_key, level, c->start, 0, 0); if (IS_ERR(split)) return PTR_ERR(split); root_add_used(root, root->nodesize); memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_level(split, btrfs_header_level(c)); btrfs_set_header_bytenr(split, split->start); btrfs_set_header_generation(split, trans->transid); btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(split, root->root_key.objectid); write_extent_buffer(split, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(split), BTRFS_FSID_SIZE); write_extent_buffer(split, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(split), BTRFS_UUID_SIZE); copy_extent_buffer(split, c, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(mid), (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); btrfs_set_header_nritems(split, c_nritems - mid); btrfs_set_header_nritems(c, mid); ret = 0; btrfs_mark_buffer_dirty(c); btrfs_mark_buffer_dirty(split); insert_ptr(trans, root, path, &disk_key, split->start, path->slots[level + 1] + 1, level + 1); if (path->slots[level] >= mid) { path->slots[level] -= mid; btrfs_tree_unlock(c); free_extent_buffer(c); path->nodes[level] = split; path->slots[level + 1] += 1; } else { btrfs_tree_unlock(split); free_extent_buffer(split); } return ret; } /* * how many bytes are required to store the items in a leaf. start * and nr indicate which items in the leaf to check. This totals up the * space used both by the item structs and the item data */ static int leaf_space_used(struct extent_buffer *l, int start, int nr) { int data_len; int nritems = btrfs_header_nritems(l); int end = min(nritems, start + nr) - 1; if (!nr) return 0; data_len = btrfs_item_end_nr(l, start); data_len = data_len - btrfs_item_offset_nr(l, end); data_len += sizeof(struct btrfs_item) * nr; WARN_ON(data_len < 0); return data_len; } /* * The space between the end of the leaf items and * the start of the leaf data. IOW, how much room * the leaf has left for both items and data */ noinline int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf) { int nritems = btrfs_header_nritems(leaf); int ret; ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); if (ret < 0) { printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, " "used %d nritems %d\n", ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), leaf_space_used(leaf, 0, nritems), nritems); } return ret; } /* * min slot controls the lowest index we're willing to push to the * right. We'll push up to and including min_slot, but no lower */ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *right, int free_space, u32 left_nritems, u32 min_slot) { struct extent_buffer *left = path->nodes[0]; struct extent_buffer *upper = path->nodes[1]; struct btrfs_map_token token; struct btrfs_disk_key disk_key; int slot; u32 i; int push_space = 0; int push_items = 0; struct btrfs_item *item; u32 nr; u32 right_nritems; u32 data_end; u32 this_item_size; btrfs_init_map_token(&token); if (empty) nr = 0; else nr = max_t(u32, 1, min_slot); if (path->slots[0] >= left_nritems) push_space += data_size; slot = path->slots[1]; i = left_nritems - 1; while (i >= nr) { item = btrfs_item_nr(left, i); if (!empty && push_items > 0) { if (path->slots[0] > i) break; if (path->slots[0] == i) { int space = btrfs_leaf_free_space(root, left); if (space + push_space * 2 > free_space) break; } } if (path->slots[0] == i) push_space += data_size; this_item_size = btrfs_item_size(left, item); if (this_item_size + sizeof(*item) + push_space > free_space) break; push_items++; push_space += this_item_size + sizeof(*item); if (i == 0) break; i--; } if (push_items == 0) goto out_unlock; if (!empty && push_items == left_nritems) WARN_ON(1); /* push left to right */ right_nritems = btrfs_header_nritems(right); push_space = btrfs_item_end_nr(left, left_nritems - push_items); push_space -= leaf_data_end(root, left); /* make room in the right data area */ data_end = leaf_data_end(root, right); memmove_extent_buffer(right, btrfs_leaf_data(right) + data_end - push_space, btrfs_leaf_data(right) + data_end, BTRFS_LEAF_DATA_SIZE(root) - data_end); /* copy from the left data area */ copy_extent_buffer(right, left, btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - push_space, btrfs_leaf_data(left) + leaf_data_end(root, left), push_space); memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), btrfs_item_nr_offset(0), right_nritems * sizeof(struct btrfs_item)); /* copy the items from left to right */ copy_extent_buffer(right, left, btrfs_item_nr_offset(0), btrfs_item_nr_offset(left_nritems - push_items), push_items * sizeof(struct btrfs_item)); /* update the item pointers */ right_nritems += push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(root); for (i = 0; i < right_nritems; i++) { item = btrfs_item_nr(right, i); push_space -= btrfs_token_item_size(right, item, &token); btrfs_set_token_item_offset(right, item, push_space, &token); } left_nritems -= push_items; btrfs_set_header_nritems(left, left_nritems); if (left_nritems) btrfs_mark_buffer_dirty(left); else clean_tree_block(trans, root, left); btrfs_mark_buffer_dirty(right); btrfs_item_key(right, &disk_key, 0); btrfs_set_node_key(upper, &disk_key, slot + 1); btrfs_mark_buffer_dirty(upper); /* then fixup the leaf pointer in the path */ if (path->slots[0] >= left_nritems) { path->slots[0] -= left_nritems; if (btrfs_header_nritems(path->nodes[0]) == 0) clean_tree_block(trans, root, path->nodes[0]); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[1] += 1; } else { btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; out_unlock: btrfs_tree_unlock(right); free_extent_buffer(right); return 1; } /* * push some data in the path leaf to the right, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise * * returns 1 if the push failed because the other node didn't have enough * room, 0 if everything worked out and < 0 if there were major errors. * * this will push starting from min_slot to the end of the leaf. It won't * push any slot lower than min_slot */ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int min_data_size, int data_size, int empty, u32 min_slot) { struct extent_buffer *left = path->nodes[0]; struct extent_buffer *right; struct extent_buffer *upper; int slot; int free_space; u32 left_nritems; int ret; if (!path->nodes[1]) return 1; slot = path->slots[1]; upper = path->nodes[1]; if (slot >= btrfs_header_nritems(upper) - 1) return 1; btrfs_assert_tree_locked(path->nodes[1]); right = read_node_slot(root, upper, slot + 1); if (right == NULL) return 1; btrfs_tree_lock(right); btrfs_set_lock_blocking(right); free_space = btrfs_leaf_free_space(root, right); if (free_space < data_size) goto out_unlock; /* cow and double check */ ret = btrfs_cow_block(trans, root, right, upper, slot + 1, &right); if (ret) goto out_unlock; free_space = btrfs_leaf_free_space(root, right); if (free_space < data_size) goto out_unlock; left_nritems = btrfs_header_nritems(left); if (left_nritems == 0) goto out_unlock; return __push_leaf_right(trans, root, path, min_data_size, empty, right, free_space, left_nritems, min_slot); out_unlock: btrfs_tree_unlock(right); free_extent_buffer(right); return 1; } /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise * * max_slot can put a limit on how far into the leaf we'll push items. The * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the * items */ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *left, int free_space, u32 right_nritems, u32 max_slot) { struct btrfs_disk_key disk_key; struct extent_buffer *right = path->nodes[0]; int i; int push_space = 0; int push_items = 0; struct btrfs_item *item; u32 old_left_nritems; u32 nr; int ret = 0; u32 this_item_size; u32 old_left_item_size; struct btrfs_map_token token; btrfs_init_map_token(&token); if (empty) nr = min(right_nritems, max_slot); else nr = min(right_nritems - 1, max_slot); for (i = 0; i < nr; i++) { item = btrfs_item_nr(right, i); if (!empty && push_items > 0) { if (path->slots[0] < i) break; if (path->slots[0] == i) { int space = btrfs_leaf_free_space(root, right); if (space + push_space * 2 > free_space) break; } } if (path->slots[0] == i) push_space += data_size; this_item_size = btrfs_item_size(right, item); if (this_item_size + sizeof(*item) + push_space > free_space) break; push_items++; push_space += this_item_size + sizeof(*item); } if (push_items == 0) { ret = 1; goto out; } if (!empty && push_items == btrfs_header_nritems(right)) WARN_ON(1); /* push data from right to left */ copy_extent_buffer(left, right, btrfs_item_nr_offset(btrfs_header_nritems(left)), btrfs_item_nr_offset(0), push_items * sizeof(struct btrfs_item)); push_space = BTRFS_LEAF_DATA_SIZE(root) - btrfs_item_offset_nr(right, push_items - 1); copy_extent_buffer(left, right, btrfs_leaf_data(left) + leaf_data_end(root, left) - push_space, btrfs_leaf_data(right) + btrfs_item_offset_nr(right, push_items - 1), push_space); old_left_nritems = btrfs_header_nritems(left); BUG_ON(old_left_nritems <= 0); old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { u32 ioff; item = btrfs_item_nr(left, i); ioff = btrfs_token_item_offset(left, item, &token); btrfs_set_token_item_offset(left, item, ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), &token); } btrfs_set_header_nritems(left, old_left_nritems + push_items); /* fixup right node */ if (push_items > right_nritems) { printk(KERN_CRIT "push items %d nr %u\n", push_items, right_nritems); WARN_ON(1); } if (push_items < right_nritems) { push_space = btrfs_item_offset_nr(right, push_items - 1) - leaf_data_end(root, right); memmove_extent_buffer(right, btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - push_space, btrfs_leaf_data(right) + leaf_data_end(root, right), push_space); memmove_extent_buffer(right, btrfs_item_nr_offset(0), btrfs_item_nr_offset(push_items), (btrfs_header_nritems(right) - push_items) * sizeof(struct btrfs_item)); } right_nritems -= push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(root); for (i = 0; i < right_nritems; i++) { item = btrfs_item_nr(right, i); push_space = push_space - btrfs_token_item_size(right, item, &token); btrfs_set_token_item_offset(right, item, push_space, &token); } btrfs_mark_buffer_dirty(left); if (right_nritems) btrfs_mark_buffer_dirty(right); else clean_tree_block(trans, root, right); btrfs_item_key(right, &disk_key, 0); fixup_low_keys(trans, root, path, &disk_key, 1); /* then fixup the leaf pointer in the path */ if (path->slots[0] < push_items) { path->slots[0] += old_left_nritems; btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = left; path->slots[1] -= 1; } else { btrfs_tree_unlock(left); free_extent_buffer(left); path->slots[0] -= push_items; } BUG_ON(path->slots[0] < 0); return ret; out: btrfs_tree_unlock(left); free_extent_buffer(left); return ret; } /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise * * max_slot can put a limit on how far into the leaf we'll push items. The * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the * items */ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int min_data_size, int data_size, int empty, u32 max_slot) { struct extent_buffer *right = path->nodes[0]; struct extent_buffer *left; int slot; int free_space; u32 right_nritems; int ret = 0; slot = path->slots[1]; if (slot == 0) return 1; if (!path->nodes[1]) return 1; right_nritems = btrfs_header_nritems(right); if (right_nritems == 0) return 1; btrfs_assert_tree_locked(path->nodes[1]); left = read_node_slot(root, path->nodes[1], slot - 1); if (left == NULL) return 1; btrfs_tree_lock(left); btrfs_set_lock_blocking(left); free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size) { ret = 1; goto out; } /* cow and double check */ ret = btrfs_cow_block(trans, root, left, path->nodes[1], slot - 1, &left); if (ret) { /* we hit -ENOSPC, but it isn't fatal here */ if (ret == -ENOSPC) ret = 1; goto out; } free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size) { ret = 1; goto out; } return __push_leaf_left(trans, root, path, min_data_size, empty, left, free_space, right_nritems, max_slot); out: btrfs_tree_unlock(left); free_extent_buffer(left); return ret; } /* * split the path's leaf in two, making sure there is at least data_size * available for the resulting leaf level of the path. */ static noinline void copy_for_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *l, struct extent_buffer *right, int slot, int mid, int nritems) { int data_copy_size; int rt_data_off; int i; struct btrfs_disk_key disk_key; struct btrfs_map_token token; btrfs_init_map_token(&token); nritems = nritems - mid; btrfs_set_header_nritems(right, nritems); data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); copy_extent_buffer(right, l, btrfs_item_nr_offset(0), btrfs_item_nr_offset(mid), nritems * sizeof(struct btrfs_item)); copy_extent_buffer(right, l, btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - data_copy_size, btrfs_leaf_data(l) + leaf_data_end(root, l), data_copy_size); rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - btrfs_item_end_nr(l, mid); for (i = 0; i < nritems; i++) { struct btrfs_item *item = btrfs_item_nr(right, i); u32 ioff; ioff = btrfs_token_item_offset(right, item, &token); btrfs_set_token_item_offset(right, item, ioff + rt_data_off, &token); } btrfs_set_header_nritems(l, mid); btrfs_item_key(right, &disk_key, 0); insert_ptr(trans, root, path, &disk_key, right->start, path->slots[1] + 1, 1); btrfs_mark_buffer_dirty(right); btrfs_mark_buffer_dirty(l); BUG_ON(path->slots[0] != slot); if (mid <= slot) { btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] -= mid; path->slots[1] += 1; } else { btrfs_tree_unlock(right); free_extent_buffer(right); } BUG_ON(path->slots[0] < 0); } /* * double splits happen when we need to insert a big item in the middle * of a leaf. A double split can leave us with 3 mostly empty leaves: * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] * A B C * * We avoid this by trying to push the items on either side of our target * into the adjacent leaves. If all goes well we can avoid the double split * completely. */ static noinline int push_for_double_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size) { int ret; int progress = 0; int slot; u32 nritems; slot = path->slots[0]; /* * try to push all the items after our slot into the * right leaf */ ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); if (ret < 0) return ret; if (ret == 0) progress++; nritems = btrfs_header_nritems(path->nodes[0]); /* * our goal is to get our slot at the start or end of a leaf. If * we've done so we're done */ if (path->slots[0] == 0 || path->slots[0] == nritems) return 0; if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) return 0; /* try to push all the items before our slot into the next leaf */ slot = path->slots[0]; ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); if (ret < 0) return ret; if (ret == 0) progress++; if (progress) return 0; return 1; } /* * split the path's leaf in two, making sure there is at least data_size * available for the resulting leaf level of the path. * * returns 0 if all went well and < 0 on failure. */ static noinline int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *ins_key, struct btrfs_path *path, int data_size, int extend) { struct btrfs_disk_key disk_key; struct extent_buffer *l; u32 nritems; int mid; int slot; struct extent_buffer *right; int ret = 0; int wret; int split; int num_doubles = 0; int tried_avoid_double = 0; l = path->nodes[0]; slot = path->slots[0]; if (extend && data_size + btrfs_item_size_nr(l, slot) + sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) return -EOVERFLOW; /* first try to make some room by pushing left and right */ if (data_size) { wret = push_leaf_right(trans, root, path, data_size, data_size, 0, 0); if (wret < 0) return wret; if (wret) { wret = push_leaf_left(trans, root, path, data_size, data_size, 0, (u32)-1); if (wret < 0) return wret; } l = path->nodes[0]; /* did the pushes work? */ if (btrfs_leaf_free_space(root, l) >= data_size) return 0; } if (!path->nodes[1]) { ret = insert_new_root(trans, root, path, 1); if (ret) return ret; } again: split = 1; l = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(l); mid = (nritems + 1) / 2; if (mid <= slot) { if (nritems == 1 || leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (slot >= nritems) { split = 0; } else { mid = slot; if (mid != nritems && leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (data_size && !tried_avoid_double) goto push_for_double; split = 2; } } } } else { if (leaf_space_used(l, 0, mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (!extend && data_size && slot == 0) { split = 0; } else if ((extend || !data_size) && slot == 0) { mid = 1; } else { mid = slot; if (mid != nritems && leaf_space_used(l, mid, nritems - mid) + data_size > BTRFS_LEAF_DATA_SIZE(root)) { if (data_size && !tried_avoid_double) goto push_for_double; split = 2 ; } } } } if (split == 0) btrfs_cpu_key_to_disk(&disk_key, ins_key); else btrfs_item_key(l, &disk_key, mid); right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, root->root_key.objectid, &disk_key, 0, l->start, 0, 0); if (IS_ERR(right)) return PTR_ERR(right); root_add_used(root, root->leafsize); memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(right, right->start); btrfs_set_header_generation(right, trans->transid); btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(right, root->root_key.objectid); btrfs_set_header_level(right, 0); write_extent_buffer(right, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(right), BTRFS_FSID_SIZE); write_extent_buffer(right, root->fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(right), BTRFS_UUID_SIZE); if (split == 0) { if (mid <= slot) { btrfs_set_header_nritems(right, 0); insert_ptr(trans, root, path, &disk_key, right->start, path->slots[1] + 1, 1); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; path->slots[1] += 1; } else { btrfs_set_header_nritems(right, 0); insert_ptr(trans, root, path, &disk_key, right->start, path->slots[1], 1); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; if (path->slots[1] == 0) fixup_low_keys(trans, root, path, &disk_key, 1); } btrfs_mark_buffer_dirty(right); return ret; } copy_for_split(trans, root, path, l, right, slot, mid, nritems); if (split == 2) { BUG_ON(num_doubles != 0); num_doubles++; goto again; } return 0; push_for_double: push_for_double_split(trans, root, path, data_size); tried_avoid_double = 1; if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) return 0; goto again; } static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int ins_len) { struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; u64 extent_len = 0; u32 item_size; int ret; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && key.type != BTRFS_EXTENT_CSUM_KEY); if (btrfs_leaf_free_space(root, leaf) >= ins_len) return 0; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if (key.type == BTRFS_EXTENT_DATA_KEY) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_len = btrfs_file_extent_num_bytes(leaf, fi); } btrfs_release_path(path); path->keep_locks = 1; path->search_for_split = 1; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); path->search_for_split = 0; if (ret < 0) goto err; ret = -EAGAIN; leaf = path->nodes[0]; /* if our item isn't there or got smaller, return now */ if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) goto err; /* the leaf has changed, it now has room. return now */ if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) goto err; if (key.type == BTRFS_EXTENT_DATA_KEY) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) goto err; } btrfs_set_path_blocking(path); ret = split_leaf(trans, root, &key, path, ins_len, 1); if (ret) goto err; path->keep_locks = 0; btrfs_unlock_up_safe(path, 1); return 0; err: path->keep_locks = 0; return ret; } static noinline int split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key, unsigned long split_offset) { struct extent_buffer *leaf; struct btrfs_item *item; struct btrfs_item *new_item; int slot; char *buf; u32 nritems; u32 item_size; u32 orig_offset; struct btrfs_disk_key disk_key; leaf = path->nodes[0]; BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); btrfs_set_path_blocking(path); item = btrfs_item_nr(leaf, path->slots[0]); orig_offset = btrfs_item_offset(leaf, item); item_size = btrfs_item_size(leaf, item); buf = kmalloc(item_size, GFP_NOFS); if (!buf) return -ENOMEM; read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, path->slots[0]), item_size); slot = path->slots[0] + 1; nritems = btrfs_header_nritems(leaf); if (slot != nritems) { /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); } btrfs_cpu_key_to_disk(&disk_key, new_key); btrfs_set_item_key(leaf, &disk_key, slot); new_item = btrfs_item_nr(leaf, slot); btrfs_set_item_offset(leaf, new_item, orig_offset); btrfs_set_item_size(leaf, new_item, item_size - split_offset); btrfs_set_item_offset(leaf, item, orig_offset + item_size - split_offset); btrfs_set_item_size(leaf, item, split_offset); btrfs_set_header_nritems(leaf, nritems + 1); /* write the data for the start of the original item */ write_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, path->slots[0]), split_offset); /* write the data for the new item */ write_extent_buffer(leaf, buf + split_offset, btrfs_item_ptr_offset(leaf, slot), item_size - split_offset); btrfs_mark_buffer_dirty(leaf); BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); kfree(buf); return 0; } /* * This function splits a single item into two items, * giving 'new_key' to the new item and splitting the * old one at split_offset (from the start of the item). * * The path may be released by this operation. After * the split, the path is pointing to the old item. The * new item is going to be in the same node as the old one. * * Note, the item being split must be smaller enough to live alone on * a tree block with room for one extra struct btrfs_item * * This allows us to split the item in place, keeping a lock on the * leaf the entire time. */ int btrfs_split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key, unsigned long split_offset) { int ret; ret = setup_leaf_for_split(trans, root, path, sizeof(struct btrfs_item)); if (ret) return ret; ret = split_item(trans, root, path, new_key, split_offset); return ret; } /* * This function duplicate a item, giving 'new_key' to the new item. * It guarantees both items live in the same tree leaf and the new item * is contiguous with the original item. * * This allows us to split file extent in place, keeping a lock on the * leaf the entire time. */ int btrfs_duplicate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key) { struct extent_buffer *leaf; int ret; u32 item_size; leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); ret = setup_leaf_for_split(trans, root, path, item_size + sizeof(struct btrfs_item)); if (ret) return ret; path->slots[0]++; setup_items_for_insert(trans, root, path, new_key, &item_size, item_size, item_size + sizeof(struct btrfs_item), 1); leaf = path->nodes[0]; memcpy_extent_buffer(leaf, btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_item_ptr_offset(leaf, path->slots[0] - 1), item_size); return 0; } /* * make the item pointed to by the path smaller. new_size indicates * how small to make it, and from_end tells us if we just chop bytes * off the end of the item or if we shift the item to chop bytes off * the front. */ void btrfs_truncate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u32 new_size, int from_end) { int slot; struct extent_buffer *leaf; struct btrfs_item *item; u32 nritems; unsigned int data_end; unsigned int old_data_start; unsigned int old_size; unsigned int size_diff; int i; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; slot = path->slots[0]; old_size = btrfs_item_size_nr(leaf, slot); if (old_size == new_size) return; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); old_data_start = btrfs_item_offset_nr(leaf, slot); size_diff = old_size - new_size; BUG_ON(slot < 0); BUG_ON(slot >= nritems); /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff + size_diff, &token); } /* shift the data */ if (from_end) { memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + size_diff, btrfs_leaf_data(leaf) + data_end, old_data_start + new_size - data_end); } else { struct btrfs_disk_key disk_key; u64 offset; btrfs_item_key(leaf, &disk_key, slot); if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { unsigned long ptr; struct btrfs_file_extent_item *fi; fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); fi = (struct btrfs_file_extent_item *)( (unsigned long)fi - size_diff); if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { ptr = btrfs_item_ptr_offset(leaf, slot); memmove_extent_buffer(leaf, ptr, (unsigned long)fi, offsetof(struct btrfs_file_extent_item, disk_bytenr)); } } memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + size_diff, btrfs_leaf_data(leaf) + data_end, old_data_start - data_end); offset = btrfs_disk_key_offset(&disk_key); btrfs_set_disk_key_offset(&disk_key, offset + size_diff); btrfs_set_item_key(leaf, &disk_key, slot); if (slot == 0) fixup_low_keys(trans, root, path, &disk_key, 1); } item = btrfs_item_nr(leaf, slot); btrfs_set_item_size(leaf, item, new_size); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } } /* * make the item pointed to by the path bigger, data_size is the new size. */ void btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u32 data_size) { int slot; struct extent_buffer *leaf; struct btrfs_item *item; u32 nritems; unsigned int data_end; unsigned int old_data; unsigned int old_size; int i; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < data_size) { btrfs_print_leaf(root, leaf); BUG(); } slot = path->slots[0]; old_data = btrfs_item_end_nr(leaf, slot); BUG_ON(slot < 0); if (slot >= nritems) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "slot %d too large, nritems %d\n", slot, nritems); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - data_size, &token); } /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - data_size, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; old_size = btrfs_item_size_nr(leaf, slot); item = btrfs_item_nr(leaf, slot); btrfs_set_item_size(leaf, item, old_size + data_size); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } } /* * Given a key and some data, insert items into the tree. * This does all the path init required, making room in the tree if needed. * Returns the number of keys that were inserted. */ int btrfs_insert_some_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, int nr) { struct extent_buffer *leaf; struct btrfs_item *item; int ret = 0; int slot; int i; u32 nritems; u32 total_data = 0; u32 total_size = 0; unsigned int data_end; struct btrfs_disk_key disk_key; struct btrfs_key found_key; struct btrfs_map_token token; btrfs_init_map_token(&token); for (i = 0; i < nr; i++) { if (total_size + data_size[i] + sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) { break; nr = i; } total_data += data_size[i]; total_size += data_size[i] + sizeof(struct btrfs_item); } BUG_ON(nr == 0); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); if (ret == 0) return -EEXIST; if (ret < 0) goto out; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < total_size) { for (i = nr; i >= 0; i--) { total_data -= data_size[i]; total_size -= data_size[i] + sizeof(struct btrfs_item); if (total_size < btrfs_leaf_free_space(root, leaf)) break; } nr = i; } slot = path->slots[0]; BUG_ON(slot < 0); if (slot != nritems) { unsigned int old_data = btrfs_item_end_nr(leaf, slot); item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &found_key, slot); /* figure out how many keys we can insert in here */ total_data = data_size[0]; for (i = 1; i < nr; i++) { if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0) break; total_data += data_size[i]; } nr = i; if (old_data < data_end) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - total_data, &token); } /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - total_data, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; } else { /* * this sucks but it has to be done, if we are inserting at * the end of the leaf only insert 1 of the items, since we * have no way of knowing whats on the next leaf and we'd have * to drop our current locks to figure it out */ nr = 1; } /* setup the item for the new data */ for (i = 0; i < nr; i++) { btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); btrfs_set_item_key(leaf, &disk_key, slot + i); item = btrfs_item_nr(leaf, slot + i); btrfs_set_token_item_offset(leaf, item, data_end - data_size[i], &token); data_end -= data_size[i]; btrfs_set_token_item_size(leaf, item, data_size[i], &token); } btrfs_set_header_nritems(leaf, nritems + nr); btrfs_mark_buffer_dirty(leaf); ret = 0; if (slot == 0) { btrfs_cpu_key_to_disk(&disk_key, cpu_key); fixup_low_keys(trans, root, path, &disk_key, 1); } if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } out: if (!ret) ret = nr; return ret; } /* * this is a helper for btrfs_insert_empty_items, the main goal here is * to save stack depth by doing the bulk of the work in a function * that doesn't call btrfs_search_slot */ void setup_items_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; u32 nritems; unsigned int data_end; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; int slot; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < total_size) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "not enough freespace need %u have %d\n", total_size, btrfs_leaf_free_space(root, leaf)); BUG(); } if (slot != nritems) { unsigned int old_data = btrfs_item_end_nr(leaf, slot); if (old_data < data_end) { btrfs_print_leaf(root, leaf); printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - total_data, &token); } /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - total_data, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; } /* setup the item for the new data */ for (i = 0; i < nr; i++) { btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); btrfs_set_item_key(leaf, &disk_key, slot + i); item = btrfs_item_nr(leaf, slot + i); btrfs_set_token_item_offset(leaf, item, data_end - data_size[i], &token); data_end -= data_size[i]; btrfs_set_token_item_size(leaf, item, data_size[i], &token); } btrfs_set_header_nritems(leaf, nritems + nr); if (slot == 0) { btrfs_cpu_key_to_disk(&disk_key, cpu_key); fixup_low_keys(trans, root, path, &disk_key, 1); } btrfs_unlock_up_safe(path, 1); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } } /* * Given a key and some data, insert items into the tree. * This does all the path init required, making room in the tree if needed. */ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, int nr) { int ret = 0; int slot; int i; u32 total_size = 0; u32 total_data = 0; for (i = 0; i < nr; i++) total_data += data_size[i]; total_size = total_data + (nr * sizeof(struct btrfs_item)); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); if (ret == 0) return -EEXIST; if (ret < 0) return ret; slot = path->slots[0]; BUG_ON(slot < 0); setup_items_for_insert(trans, root, path, cpu_key, data_size, total_data, total_size, nr); return 0; } /* * Given a key and some data, insert an item into the tree. * This does all the path init required, making room in the tree if needed. */ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *cpu_key, void *data, u32 data_size) { int ret = 0; struct btrfs_path *path; struct extent_buffer *leaf; unsigned long ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (!ret) { leaf = path->nodes[0]; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, data, ptr, data_size); btrfs_mark_buffer_dirty(leaf); } btrfs_free_path(path); return ret; } /* * delete the pointer from a given node. * * the tree should have been previously balanced so the deletion does not * empty a node. */ static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot) { struct extent_buffer *parent = path->nodes[level]; u32 nritems; nritems = btrfs_header_nritems(parent); if (slot != nritems - 1) { memmove_extent_buffer(parent, btrfs_node_key_ptr_offset(slot), btrfs_node_key_ptr_offset(slot + 1), sizeof(struct btrfs_key_ptr) * (nritems - slot - 1)); } nritems--; btrfs_set_header_nritems(parent, nritems); if (nritems == 0 && parent == root->node) { BUG_ON(btrfs_header_level(root->node) != 1); /* just turn the root into a leaf and break */ btrfs_set_header_level(root->node, 0); } else if (slot == 0) { struct btrfs_disk_key disk_key; btrfs_node_key(parent, &disk_key, 0); fixup_low_keys(trans, root, path, &disk_key, level + 1); } btrfs_mark_buffer_dirty(parent); } /* * a helper function to delete the leaf pointed to by path->slots[1] and * path->nodes[1]. * * This deletes the pointer in path->nodes[1] and frees the leaf * block extent. zero is returned if it all worked out, < 0 otherwise. * * The path must have already been setup for deleting the leaf, including * all the proper balancing. path->nodes[1] must be locked. */ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *leaf) { WARN_ON(btrfs_header_generation(leaf) != trans->transid); del_ptr(trans, root, path, 1, path->slots[1]); /* * btrfs_free_extent is expensive, we want to make sure we * aren't holding any locks when we call it */ btrfs_unlock_up_safe(path, 0); root_sub_used(root, leaf->len); extent_buffer_get(leaf); btrfs_free_tree_block(trans, root, leaf, 0, 1, 0); free_extent_buffer_stale(leaf); } /* * delete the item at the leaf level in path. If that empties * the leaf, remove it from the tree */ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int slot, int nr) { struct extent_buffer *leaf; struct btrfs_item *item; int last_off; int dsize = 0; int ret = 0; int wret; int i; u32 nritems; struct btrfs_map_token token; btrfs_init_map_token(&token); leaf = path->nodes[0]; last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); for (i = 0; i < nr; i++) dsize += btrfs_item_size_nr(leaf, slot + i); nritems = btrfs_header_nritems(leaf); if (slot + nr != nritems) { int data_end = leaf_data_end(root, leaf); memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + dsize, btrfs_leaf_data(leaf) + data_end, last_off - data_end); for (i = slot + nr; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff + dsize, &token); } memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), btrfs_item_nr_offset(slot + nr), sizeof(struct btrfs_item) * (nritems - slot - nr)); } btrfs_set_header_nritems(leaf, nritems - nr); nritems -= nr; /* delete the leaf if we've emptied it */ if (nritems == 0) { if (leaf == root->node) { btrfs_set_header_level(leaf, 0); } else { btrfs_set_path_blocking(path); clean_tree_block(trans, root, leaf); btrfs_del_leaf(trans, root, path, leaf); } } else { int used = leaf_space_used(leaf, 0, nritems); if (slot == 0) { struct btrfs_disk_key disk_key; btrfs_item_key(leaf, &disk_key, 0); fixup_low_keys(trans, root, path, &disk_key, 1); } /* delete the leaf if it is mostly empty */ if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { /* push_leaf_left fixes the path. * make sure the path still points to our leaf * for possible call to del_ptr below */ slot = path->slots[1]; extent_buffer_get(leaf); btrfs_set_path_blocking(path); wret = push_leaf_left(trans, root, path, 1, 1, 1, (u32)-1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (path->nodes[0] == leaf && btrfs_header_nritems(leaf)) { wret = push_leaf_right(trans, root, path, 1, 1, 1, 0); if (wret < 0 && wret != -ENOSPC) ret = wret; } if (btrfs_header_nritems(leaf) == 0) { path->slots[1] = slot; btrfs_del_leaf(trans, root, path, leaf); free_extent_buffer(leaf); ret = 0; } else { /* if we're still in the path, make sure * we're dirty. Otherwise, one of the * push_leaf functions must have already * dirtied this buffer */ if (path->nodes[0] == leaf) btrfs_mark_buffer_dirty(leaf); free_extent_buffer(leaf); } } else { btrfs_mark_buffer_dirty(leaf); } } return ret; } /* * search the tree again to find a leaf with lesser keys * returns 0 if it found something or 1 if there are no lesser leaves. * returns < 0 on io errors. * * This may release the path, and so you may lose any locks held at the * time you call it. */ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { struct btrfs_key key; struct btrfs_disk_key found_key; int ret; btrfs_item_key_to_cpu(path->nodes[0], &key, 0); if (key.offset > 0) key.offset--; else if (key.type > 0) key.type--; else if (key.objectid > 0) key.objectid--; else return 1; btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; btrfs_item_key(path->nodes[0], &found_key, 0); ret = comp_keys(&found_key, &key); if (ret < 0) return 0; return 1; } /* * A helper function to walk down the tree starting at min_key, and looking * for nodes or leaves that are either in cache or have a minimum * transaction id. This is used by the btree defrag code, and tree logging * * This does not cow, but it does stuff the starting key it finds back * into min_key, so you can call btrfs_search_slot with cow=1 on the * key and get a writable path. * * This does lock as it descends, and path->keep_locks should be set * to 1 by the caller. * * This honors path->lowest_level to prevent descent past a given level * of the tree. * * min_trans indicates the oldest transaction that you are interested * in walking through. Any nodes or leaves older than min_trans are * skipped over (without reading them). * * returns zero if something useful was found, < 0 on error and 1 if there * was nothing in the tree that matched the search criteria. */ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, struct btrfs_key *max_key, struct btrfs_path *path, int cache_only, u64 min_trans) { struct extent_buffer *cur; struct btrfs_key found_key; int slot; int sret; u32 nritems; int level; int ret = 1; WARN_ON(!path->keep_locks); again: cur = btrfs_read_lock_root_node(root); level = btrfs_header_level(cur); WARN_ON(path->nodes[level]); path->nodes[level] = cur; path->locks[level] = BTRFS_READ_LOCK; if (btrfs_header_generation(cur) < min_trans) { ret = 1; goto out; } while (1) { nritems = btrfs_header_nritems(cur); level = btrfs_header_level(cur); sret = bin_search(cur, min_key, level, &slot); /* at the lowest level, we're done, setup the path and exit */ if (level == path->lowest_level) { if (slot >= nritems) goto find_next_key; ret = 0; path->slots[level] = slot; btrfs_item_key_to_cpu(cur, &found_key, slot); goto out; } if (sret && slot > 0) slot--; /* * check this node pointer against the cache_only and * min_trans parameters. If it isn't in cache or is too * old, skip to the next one. */ while (slot < nritems) { u64 blockptr; u64 gen; struct extent_buffer *tmp; struct btrfs_disk_key disk_key; blockptr = btrfs_node_blockptr(cur, slot); gen = btrfs_node_ptr_generation(cur, slot); if (gen < min_trans) { slot++; continue; } if (!cache_only) break; if (max_key) { btrfs_node_key(cur, &disk_key, slot); if (comp_keys(&disk_key, max_key) >= 0) { ret = 1; goto out; } } tmp = btrfs_find_tree_block(root, blockptr, btrfs_level_size(root, level - 1)); if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) { free_extent_buffer(tmp); break; } if (tmp) free_extent_buffer(tmp); slot++; } find_next_key: /* * we didn't find a candidate key in this node, walk forward * and find another one */ if (slot >= nritems) { path->slots[level] = slot; btrfs_set_path_blocking(path); sret = btrfs_find_next_key(root, path, min_key, level, cache_only, min_trans); if (sret == 0) { btrfs_release_path(path); goto again; } else { goto out; } } /* save our key for returning back */ btrfs_node_key_to_cpu(cur, &found_key, slot); path->slots[level] = slot; if (level == path->lowest_level) { ret = 0; unlock_up(path, level, 1, 0, NULL); goto out; } btrfs_set_path_blocking(path); cur = read_node_slot(root, cur, slot); BUG_ON(!cur); /* -ENOMEM */ btrfs_tree_read_lock(cur); path->locks[level - 1] = BTRFS_READ_LOCK; path->nodes[level - 1] = cur; unlock_up(path, level, 1, 0, NULL); btrfs_clear_path_blocking(path, NULL, 0); } out: if (ret == 0) memcpy(min_key, &found_key, sizeof(found_key)); btrfs_set_path_blocking(path); return ret; } /* * this is similar to btrfs_next_leaf, but does not try to preserve * and fixup the path. It looks for and returns the next key in the * tree based on the current path and the cache_only and min_trans * parameters. * * 0 is returned if another key is found, < 0 if there are any errors * and 1 is returned if there are no higher keys in the tree * * path->keep_locks should be set to 1 on the search made before * calling this function. */ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, int level, int cache_only, u64 min_trans) { int slot; struct extent_buffer *c; WARN_ON(!path->keep_locks); while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; slot = path->slots[level] + 1; c = path->nodes[level]; next: if (slot >= btrfs_header_nritems(c)) { int ret; int orig_lowest; struct btrfs_key cur_key; if (level + 1 >= BTRFS_MAX_LEVEL || !path->nodes[level + 1]) return 1; if (path->locks[level + 1]) { level++; continue; } slot = btrfs_header_nritems(c) - 1; if (level == 0) btrfs_item_key_to_cpu(c, &cur_key, slot); else btrfs_node_key_to_cpu(c, &cur_key, slot); orig_lowest = path->lowest_level; btrfs_release_path(path); path->lowest_level = level; ret = btrfs_search_slot(NULL, root, &cur_key, path, 0, 0); path->lowest_level = orig_lowest; if (ret < 0) return ret; c = path->nodes[level]; slot = path->slots[level]; if (ret == 0) slot++; goto next; } if (level == 0) btrfs_item_key_to_cpu(c, key, slot); else { u64 blockptr = btrfs_node_blockptr(c, slot); u64 gen = btrfs_node_ptr_generation(c, slot); if (cache_only) { struct extent_buffer *cur; cur = btrfs_find_tree_block(root, blockptr, btrfs_level_size(root, level - 1)); if (!cur || btrfs_buffer_uptodate(cur, gen, 1) <= 0) { slot++; if (cur) free_extent_buffer(cur); goto next; } free_extent_buffer(cur); } if (gen < min_trans) { slot++; goto next; } btrfs_node_key_to_cpu(c, key, slot); } return 0; } return 1; } /* * search the tree again to find a leaf with greater keys * returns 0 if it found something or 1 if there are no greater leaves. * returns < 0 on io errors. */ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) { int slot; int level; struct extent_buffer *c; struct extent_buffer *next; struct btrfs_key key; u32 nritems; int ret; int old_spinning = path->leave_spinning; int next_rw_lock = 0; nritems = btrfs_header_nritems(path->nodes[0]); if (nritems == 0) return 1; btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); again: level = 1; next = NULL; next_rw_lock = 0; btrfs_release_path(path); path->keep_locks = 1; path->leave_spinning = 1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); path->keep_locks = 0; if (ret < 0) return ret; nritems = btrfs_header_nritems(path->nodes[0]); /* * by releasing the path above we dropped all our locks. A balance * could have added more items next to the key that used to be * at the very end of the block. So, check again here and * advance the path if there are now more items available. */ if (nritems > 0 && path->slots[0] < nritems - 1) { if (ret == 0) path->slots[0]++; ret = 0; goto done; } while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) { ret = 1; goto done; } slot = path->slots[level] + 1; c = path->nodes[level]; if (slot >= btrfs_header_nritems(c)) { level++; if (level == BTRFS_MAX_LEVEL) { ret = 1; goto done; } continue; } if (next) { btrfs_tree_unlock_rw(next, next_rw_lock); free_extent_buffer(next); } next = c; next_rw_lock = path->locks[level]; ret = read_block_for_search(NULL, root, path, &next, level, slot, &key); if (ret == -EAGAIN) goto again; if (ret < 0) { btrfs_release_path(path); goto done; } if (!path->skip_locking) { ret = btrfs_try_tree_read_lock(next); if (!ret) { btrfs_set_path_blocking(path); btrfs_tree_read_lock(next); btrfs_clear_path_blocking(path, next, BTRFS_READ_LOCK); } next_rw_lock = BTRFS_READ_LOCK; } break; } path->slots[level] = slot; while (1) { level--; c = path->nodes[level]; if (path->locks[level]) btrfs_tree_unlock_rw(c, path->locks[level]); free_extent_buffer(c); path->nodes[level] = next; path->slots[level] = 0; if (!path->skip_locking) path->locks[level] = next_rw_lock; if (!level) break; ret = read_block_for_search(NULL, root, path, &next, level, 0, &key); if (ret == -EAGAIN) goto again; if (ret < 0) { btrfs_release_path(path); goto done; } if (!path->skip_locking) { ret = btrfs_try_tree_read_lock(next); if (!ret) { btrfs_set_path_blocking(path); btrfs_tree_read_lock(next); btrfs_clear_path_blocking(path, next, BTRFS_READ_LOCK); } next_rw_lock = BTRFS_READ_LOCK; } } ret = 0; done: unlock_up(path, 0, 1, 0, NULL); path->leave_spinning = old_spinning; if (!old_spinning) btrfs_set_path_blocking(path); return ret; } /* * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps * searching until it gets past min_objectid or finds an item of 'type' * * returns 0 if something is found, 1 if nothing was found and < 0 on error */ int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type) { struct btrfs_key found_key; struct extent_buffer *leaf; u32 nritems; int ret; while (1) { if (path->slots[0] == 0) { btrfs_set_path_blocking(path); ret = btrfs_prev_leaf(root, path); if (ret != 0) return ret; } else { path->slots[0]--; } leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); if (nritems == 0) return 1; if (path->slots[0] == nritems) path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid < min_objectid) break; if (found_key.type == type) return 0; if (found_key.objectid == min_objectid && found_key.type < type) break; } return 1; }
gpl-2.0
jongwonk/s5pv210_linux_kernel
drivers/video/fb_defio.c
5030
6352
/* * linux/drivers/video/fb_defio.c * * Copyright (C) 2006 Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/list.h> /* to support deferred IO */ #include <linux/rmap.h> #include <linux/pagemap.h> struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) { void *screen_base = (void __force *) info->screen_base; struct page *page; if (is_vmalloc_addr(screen_base + offs)) page = vmalloc_to_page(screen_base + offs); else page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); return page; } /* this is to find and return the vmalloc-ed fb pages */ static int fb_deferred_io_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { unsigned long offset; struct page *page; struct fb_info *info = vma->vm_private_data; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= info->fix.smem_len) return VM_FAULT_SIGBUS; page = fb_deferred_io_page(info, offset); if (!page) return VM_FAULT_SIGBUS; get_page(page); if (vma->vm_file) page->mapping = vma->vm_file->f_mapping; else printk(KERN_ERR "no mapping available\n"); BUG_ON(!page->mapping); page->index = vmf->pgoff; vmf->page = page; return 0; } int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct fb_info *info = file->private_data; struct inode *inode = file->f_path.dentry->d_inode; int err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; /* Skip if deferred io is compiled-in but disabled on this fbdev */ if (!info->fbdefio) return 0; mutex_lock(&inode->i_mutex); /* Kill off the delayed work */ cancel_delayed_work_sync(&info->deferred_work); /* Run it immediately */ err = schedule_delayed_work(&info->deferred_work, 0); mutex_unlock(&inode->i_mutex); return err; } EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); /* vm_ops->page_mkwrite handler */ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct fb_info *info = vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; struct page *cur; /* this is a callback we get when userspace first tries to write to the page. we schedule a workqueue. that workqueue will eventually mkclean the touched pages and execute the deferred framebuffer IO. then if userspace touches a page again, we repeat the same scheme */ /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); /* * We want the page to remain locked from ->page_mkwrite until * the PTE is marked dirty to avoid page_mkclean() being called * before the PTE is updated, which would leave the page ignored * by defio. * Do this by locking the page here and informing the caller * about it with VM_FAULT_LOCKED. */ lock_page(page); /* we loop through the pagelist before adding in order to keep the pagelist sorted */ list_for_each_entry(cur, &fbdefio->pagelist, lru) { /* this check is to catch the case where a new process could start writing to the same page through a new pte. this new access can cause the mkwrite even when the original ps's pte is marked writable */ if (unlikely(cur == page)) goto page_already_added; else if (cur->index > page->index) break; } list_add_tail(&page->lru, &cur->lru); page_already_added: mutex_unlock(&fbdefio->lock); /* come back after delay to process the deferred IO */ schedule_delayed_work(&info->deferred_work, fbdefio->delay); return VM_FAULT_LOCKED; } static const struct vm_operations_struct fb_deferred_io_vm_ops = { .fault = fb_deferred_io_fault, .page_mkwrite = fb_deferred_io_mkwrite, }; static int fb_deferred_io_set_page_dirty(struct page *page) { if (!PageDirty(page)) SetPageDirty(page); return 0; } static const struct address_space_operations fb_deferred_io_aops = { .set_page_dirty = fb_deferred_io_set_page_dirty, }; static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) { vma->vm_ops = &fb_deferred_io_vm_ops; vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND ); if (!(info->flags & FBINFO_VIRTFB)) vma->vm_flags |= VM_IO; vma->vm_private_data = info; return 0; } /* workqueue callback */ static void fb_deferred_io_work(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); struct list_head *node, *next; struct page *cur; struct fb_deferred_io *fbdefio = info->fbdefio; /* here we mkclean the pages, then do all deferred IO */ mutex_lock(&fbdefio->lock); list_for_each_entry(cur, &fbdefio->pagelist, lru) { lock_page(cur); page_mkclean(cur); unlock_page(cur); } /* driver's callback with pagelist */ fbdefio->deferred_io(info, &fbdefio->pagelist); /* clear the list */ list_for_each_safe(node, next, &fbdefio->pagelist) { list_del(node); } mutex_unlock(&fbdefio->lock); } void fb_deferred_io_init(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; BUG_ON(!fbdefio); mutex_init(&fbdefio->lock); info->fbops->fb_mmap = fb_deferred_io_mmap; INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); INIT_LIST_HEAD(&fbdefio->pagelist); if (fbdefio->delay == 0) /* set a default of 1 s */ fbdefio->delay = HZ; } EXPORT_SYMBOL_GPL(fb_deferred_io_init); void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file) { file->f_mapping->a_ops = &fb_deferred_io_aops; } EXPORT_SYMBOL_GPL(fb_deferred_io_open); void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; struct page *page; int i; BUG_ON(!fbdefio); cancel_delayed_work_sync(&info->deferred_work); /* clear out the mapping that we setup */ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { page = fb_deferred_io_page(info, i); page->mapping = NULL; } info->fbops->fb_mmap = NULL; mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
Ateeq72/weekly-kernel
drivers/block/mg_disk.c
8358
26577
/* * drivers/block/mg_disk.c * * Support for the mGine m[g]flash IO mode. * Based on legacy hd.c * * (c) 2008 mGine Co.,LTD * (c) 2008 unsik Kim <donari75@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/ata.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/mg_disk.h> #include <linux/slab.h> #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) /* name for block device */ #define MG_DISK_NAME "mgd" #define MG_DISK_MAJ 0 #define MG_DISK_MAX_PART 16 #define MG_SECTOR_SIZE 512 #define MG_MAX_SECTS 256 /* Register offsets */ #define MG_BUFF_OFFSET 0x8000 #define MG_REG_OFFSET 0xC000 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) /* handy status */ #define MG_STAT_READY (ATA_DRDY | ATA_DSC) #define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \ ATA_ERR))) == MG_STAT_READY) /* error code for others */ #define MG_ERR_NONE 0 #define MG_ERR_TIMEOUT 0x100 #define MG_ERR_INIT_STAT 0x101 #define MG_ERR_TRANSLATION 0x102 #define MG_ERR_CTRL_RST 0x103 #define MG_ERR_INV_STAT 0x104 #define MG_ERR_RSTOUT 0x105 #define MG_MAX_ERRORS 6 /* Max read/write errors */ /* command */ #define MG_CMD_RD 0x20 #define MG_CMD_WR 0x30 #define MG_CMD_SLEEP 0x99 #define MG_CMD_WAKEUP 0xC3 #define MG_CMD_ID 0xEC #define MG_CMD_WR_CONF 0x3C #define MG_CMD_RD_CONF 0x40 /* operation mode */ #define MG_OP_CASCADE (1 << 0) #define MG_OP_CASCADE_SYNC_RD (1 << 1) #define MG_OP_CASCADE_SYNC_WR (1 << 2) #define MG_OP_INTERLEAVE (1 << 3) /* synchronous */ #define MG_BURST_LAT_4 (3 << 4) #define MG_BURST_LAT_5 (4 << 4) #define MG_BURST_LAT_6 (5 << 4) #define MG_BURST_LAT_7 (6 << 4) #define MG_BURST_LAT_8 (7 << 4) #define MG_BURST_LEN_4 (1 << 1) #define MG_BURST_LEN_8 (2 << 1) #define MG_BURST_LEN_16 (3 << 1) #define MG_BURST_LEN_32 (4 << 1) #define MG_BURST_LEN_CONT (0 << 1) /* timeout value (unit: ms) */ #define MG_TMAX_CONF_TO_CMD 1 #define MG_TMAX_WAIT_RD_DRQ 10 #define MG_TMAX_WAIT_WR_DRQ 500 #define MG_TMAX_RST_TO_BUSY 10 #define MG_TMAX_HDRST_TO_RDY 500 #define MG_TMAX_SWRST_TO_RDY 500 #define MG_TMAX_RSTOUT 3000 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) /* main structure for mflash driver */ struct mg_host { struct device *dev; struct request_queue *breq; struct request *req; spinlock_t lock; struct gendisk *gd; struct timer_list timer; void (*mg_do_intr) (struct mg_host *); u16 id[ATA_ID_WORDS]; u16 cyls; u16 heads; u16 sectors; u32 n_sectors; u32 nres_sectors; void __iomem *dev_base; unsigned int irq; unsigned int rst; unsigned int rstout; u32 major; u32 error; }; /* * Debugging macro and defines */ #undef DO_MG_DEBUG #ifdef DO_MG_DEBUG # define MG_DBG(fmt, args...) \ printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) #else /* CONFIG_MG_DEBUG */ # define MG_DBG(fmt, args...) do { } while (0) #endif /* CONFIG_MG_DEBUG */ static void mg_request(struct request_queue *); static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes) { if (__blk_end_request(host->req, err, nr_bytes)) return true; host->req = NULL; return false; } static bool mg_end_request_cur(struct mg_host *host, int err) { return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); } static void mg_dump_status(const char *msg, unsigned int stat, struct mg_host *host) { char *name = MG_DISK_NAME; if (host->req) name = host->req->rq_disk->disk_name; printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); if (stat & ATA_BUSY) printk("Busy "); if (stat & ATA_DRDY) printk("DriveReady "); if (stat & ATA_DF) printk("WriteFault "); if (stat & ATA_DSC) printk("SeekComplete "); if (stat & ATA_DRQ) printk("DataRequest "); if (stat & ATA_CORR) printk("CorrectedError "); if (stat & ATA_ERR) printk("Error "); printk("}\n"); if ((stat & ATA_ERR) == 0) { host->error = 0; } else { host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, host->error & 0xff); if (host->error & ATA_BBK) printk("BadSector "); if (host->error & ATA_UNC) printk("UncorrectableError "); if (host->error & ATA_IDNF) printk("SectorIdNotFound "); if (host->error & ATA_ABORTED) printk("DriveStatusError "); if (host->error & ATA_AMNF) printk("AddrMarkNotFound "); printk("}"); if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { if (host->req) printk(", sector=%u", (unsigned int)blk_rq_pos(host->req)); } printk("\n"); } } static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec) { u8 status; unsigned long expire, cur_jiffies; struct mg_drv_data *prv_data = host->dev->platform_data; host->error = MG_ERR_NONE; expire = jiffies + msecs_to_jiffies(msec); /* These 2 times dummy status read prevents reading invalid * status. A very little time (3 times of mflash operating clk) * is required for busy bit is set. Use dummy read instead of * busy wait, because mflash's PLL is machine dependent. */ if (prv_data->use_polling) { status = inb((unsigned long)host->dev_base + MG_REG_STATUS); status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } status = inb((unsigned long)host->dev_base + MG_REG_STATUS); do { cur_jiffies = jiffies; if (status & ATA_BUSY) { if (expect == ATA_BUSY) break; } else { /* Check the error condition! */ if (status & ATA_ERR) { mg_dump_status("mg_wait", status, host); break; } if (expect == MG_STAT_READY) if (MG_READY_OK(status)) break; if (expect == ATA_DRQ) if (status & ATA_DRQ) break; } if (!msec) { mg_dump_status("not ready", status, host); return MG_ERR_INV_STAT; } status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } while (time_before(cur_jiffies, expire)); if (time_after_eq(cur_jiffies, expire) && msec) host->error = MG_ERR_TIMEOUT; return host->error; } static unsigned int mg_wait_rstout(u32 rstout, u32 msec) { unsigned long expire; expire = jiffies + msecs_to_jiffies(msec); while (time_before(jiffies, expire)) { if (gpio_get_value(rstout) == 1) return MG_ERR_NONE; msleep(10); } return MG_ERR_RSTOUT; } static void mg_unexpected_intr(struct mg_host *host) { u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_dump_status("mg_unexpected_intr", status, host); } static irqreturn_t mg_irq(int irq, void *dev_id) { struct mg_host *host = dev_id; void (*handler)(struct mg_host *) = host->mg_do_intr; spin_lock(&host->lock); host->mg_do_intr = NULL; del_timer(&host->timer); if (!handler) handler = mg_unexpected_intr; handler(host); spin_unlock(&host->lock); return IRQ_HANDLED; } /* local copy of ata_id_string() */ static void mg_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned int c; BUG_ON(len & 1); while (len > 0) { c = id[ofs] >> 8; *s = c; s++; c = id[ofs] & 0xff; *s = c; s++; ofs++; len -= 2; } } /* local copy of ata_id_c_string() */ static void mg_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned char *p; mg_id_string(id, s, ofs, len - 1); p = s + strnlen(s, len - 1); while (p > s && p[-1] == ' ') p--; *p = '\0'; } static int mg_get_disk_id(struct mg_host *host) { u32 i; s32 err; const u16 *id = host->id; struct mg_drv_data *prv_data = host->dev->platform_data; char fwrev[ATA_ID_FW_REV_LEN + 1]; char model[ATA_ID_PROD_LEN + 1]; char serial[ATA_ID_SERNO_LEN + 1]; if (!prv_data->use_polling) outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ); if (err) return err; for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++) host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + i * 2)); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD); if (err) return err; if ((id[ATA_ID_FIELD_VALID] & 1) == 0) return MG_ERR_TRANSLATION; host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY); host->cyls = id[ATA_ID_CYLS]; host->heads = id[ATA_ID_HEADS]; host->sectors = id[ATA_ID_SECTORS]; if (MG_RES_SEC && host->heads && host->sectors) { /* modify cyls, n_sectors */ host->cyls = (host->n_sectors - MG_RES_SEC) / host->heads / host->sectors; host->nres_sectors = host->n_sectors - host->cyls * host->heads * host->sectors; host->n_sectors -= host->nres_sectors; } mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); printk(KERN_INFO "mg_disk: model: %s\n", model); printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); printk(KERN_INFO "mg_disk: serial: %s\n", serial); printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n", host->n_sectors, host->nres_sectors); if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return err; } static int mg_disk_init(struct mg_host *host) { struct mg_drv_data *prv_data = host->dev->platform_data; s32 err; u8 init_status; /* hdd rst low */ gpio_set_value(host->rst, 0); err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); if (err) return err; /* hdd rst high */ gpio_set_value(host->rst, 1); err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY); if (err) return err; /* soft reset on */ outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0), (unsigned long)host->dev_base + MG_REG_DRV_CTRL); err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); if (err) return err; /* soft reset off */ outb(prv_data->use_polling ? ATA_NIEN : 0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); if (err) return err; init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf; if (init_status == 0xf) return MG_ERR_INIT_STAT; return err; } static void mg_bad_rw_intr(struct mg_host *host) { if (host->req) if (++host->req->errors >= MG_MAX_ERRORS || host->error == MG_ERR_TIMEOUT) mg_end_request_cur(host, -EIO); } static unsigned int mg_out(struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt, unsigned int cmd, void (*intr_addr)(struct mg_host *)) { struct mg_drv_data *prv_data = host->dev->platform_data; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return host->error; if (!prv_data->use_polling) { host->mg_do_intr = intr_addr; mod_timer(&host->timer, jiffies + 3 * HZ); } if (MG_RES_SEC) sect_num += MG_RES_SEC; outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT); outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM); outb((u8)(sect_num >> 8), (unsigned long)host->dev_base + MG_REG_CYL_LOW); outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + MG_REG_CYL_HIGH); outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS), (unsigned long)host->dev_base + MG_REG_DRV_HEAD); outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); return MG_ERR_NONE; } static void mg_read_one(struct mg_host *host, struct request *req) { u16 *buff = (u16 *)req->buffer; u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + (i << 1)); } static void mg_read(struct request *req) { struct mg_host *host = req->rq_disk->private_data; if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), MG_CMD_RD, NULL) != MG_ERR_NONE) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", blk_rq_sectors(req), blk_rq_pos(req), req->buffer); do { if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } mg_read_one(host, req); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } static void mg_write_one(struct mg_host *host, struct request *req) { u16 *buff = (u16 *)req->buffer; u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + (i << 1)); } static void mg_write(struct request *req) { struct mg_host *host = req->rq_disk->private_data; unsigned int rem = blk_rq_sectors(req); if (mg_out(host, blk_rq_pos(req), rem, MG_CMD_WR, NULL) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", rem, blk_rq_pos(req), req->buffer); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } do { mg_write_one(host, req); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); rem--; if (rem > 1 && mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } else if (mg_wait(host, MG_STAT_READY, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } static void mg_read_intr(struct mg_host *host) { struct request *req = host->req; u32 i; /* check status */ do { i = inb((unsigned long)host->dev_base + MG_REG_STATUS); if (i & ATA_BUSY) break; if (!MG_READY_OK(i)) break; if (i & ATA_DRQ) goto ok_to_read; } while (0); mg_dump_status("mg_read_intr", i, host); mg_bad_rw_intr(host); mg_request(host->breq); return; ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); if (mg_end_request(host, 0, MG_SECTOR_SIZE)) { /* set handler if read remains */ host->mg_do_intr = mg_read_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } else /* goto next request */ mg_request(host->breq); } static void mg_write_intr(struct mg_host *host) { struct request *req = host->req; u32 i; bool rem; /* check status */ do { i = inb((unsigned long)host->dev_base + MG_REG_STATUS); if (i & ATA_BUSY) break; if (!MG_READY_OK(i)) break; if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ)) goto ok_to_write; } while (0); mg_dump_status("mg_write_intr", i, host); mg_bad_rw_intr(host); mg_request(host->breq); return; ok_to_write: if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req), req->buffer); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } /* send write confirm */ outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); if (!rem) mg_request(host->breq); } void mg_times_out(unsigned long data) { struct mg_host *host = (struct mg_host *)data; char *name; spin_lock_irq(&host->lock); if (!host->req) goto out_unlock; host->mg_do_intr = NULL; name = host->req->rq_disk->disk_name; printk(KERN_DEBUG "%s: timeout\n", name); host->error = MG_ERR_TIMEOUT; mg_bad_rw_intr(host); out_unlock: mg_request(host->breq); spin_unlock_irq(&host->lock); } static void mg_request_poll(struct request_queue *q) { struct mg_host *host = q->queuedata; while (1) { if (!host->req) { host->req = blk_fetch_request(q); if (!host->req) break; } if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } if (rq_data_dir(host->req) == READ) mg_read(host->req); else mg_write(host->req); } } static unsigned int mg_issue_req(struct request *req, struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt) { switch (rq_data_dir(req)) { case READ: if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } break; case WRITE: /* TODO : handler */ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } del_timer(&host->timer); mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ); outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (host->error) { mg_bad_rw_intr(host); return host->error; } mg_write_one(host, req); mod_timer(&host->timer, jiffies + 3 * HZ); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); break; } return MG_ERR_NONE; } /* This function also called from IRQ context */ static void mg_request(struct request_queue *q) { struct mg_host *host = q->queuedata; struct request *req; u32 sect_num, sect_cnt; while (1) { if (!host->req) { host->req = blk_fetch_request(q); if (!host->req) break; } req = host->req; /* check unwanted request call */ if (host->mg_do_intr) return; del_timer(&host->timer); sect_num = blk_rq_pos(req); /* deal whole segments */ sect_cnt = blk_rq_sectors(req); /* sanity check */ if (sect_num >= get_capacity(req->rq_disk) || ((sect_num + sect_cnt) > get_capacity(req->rq_disk))) { printk(KERN_WARNING "%s: bad access: sector=%d, count=%d\n", req->rq_disk->disk_name, sect_num, sect_cnt); mg_end_request_cur(host, -EIO); continue; } if (unlikely(req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } if (!mg_issue_req(req, host, sect_num, sect_cnt)) return; } } static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mg_host *host = bdev->bd_disk->private_data; geo->cylinders = (unsigned short)host->cyls; geo->heads = (unsigned char)host->heads; geo->sectors = (unsigned char)host->sectors; return 0; } static const struct block_device_operations mg_disk_ops = { .getgeo = mg_getgeo }; static int mg_suspend(struct platform_device *plat_dev, pm_message_t state) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; if (!prv_data->use_polling) outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); /* wait until mflash deep sleep */ msleep(1); if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return -EIO; } return 0; } static int mg_resume(struct platform_device *plat_dev) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND); /* wait until mflash wakeup */ msleep(1); if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return 0; } static int mg_probe(struct platform_device *plat_dev) { struct mg_host *host; struct resource *rsc; struct mg_drv_data *prv_data = plat_dev->dev.platform_data; int err = 0; if (!prv_data) { printk(KERN_ERR "%s:%d fail (no driver_data)\n", __func__, __LINE__); err = -EINVAL; goto probe_err; } /* alloc mg_host */ host = kzalloc(sizeof(struct mg_host), GFP_KERNEL); if (!host) { printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n", __func__, __LINE__); err = -ENOMEM; goto probe_err; } host->major = MG_DISK_MAJ; /* link each other */ prv_data->host = host; host->dev = &plat_dev->dev; /* io remap */ rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); if (!rsc) { printk(KERN_ERR "%s:%d platform_get_resource fail\n", __func__, __LINE__); err = -EINVAL; goto probe_err_2; } host->dev_base = ioremap(rsc->start, resource_size(rsc)); if (!host->dev_base) { printk(KERN_ERR "%s:%d ioremap fail\n", __func__, __LINE__); err = -EIO; goto probe_err_2; } MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base); /* get reset pin */ rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, MG_RST_PIN); if (!rsc) { printk(KERN_ERR "%s:%d get reset pin fail\n", __func__, __LINE__); err = -EIO; goto probe_err_3; } host->rst = rsc->start; /* init rst pin */ err = gpio_request(host->rst, MG_RST_PIN); if (err) goto probe_err_3; gpio_direction_output(host->rst, 1); /* reset out pin */ if (!(prv_data->dev_attr & MG_DEV_MASK)) goto probe_err_3a; if (prv_data->dev_attr != MG_BOOT_DEV) { rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, MG_RSTOUT_PIN); if (!rsc) { printk(KERN_ERR "%s:%d get reset-out pin fail\n", __func__, __LINE__); err = -EIO; goto probe_err_3a; } host->rstout = rsc->start; err = gpio_request(host->rstout, MG_RSTOUT_PIN); if (err) goto probe_err_3a; gpio_direction_input(host->rstout); } /* disk reset */ if (prv_data->dev_attr == MG_STORAGE_DEV) { /* If POR seq. not yet finised, wait */ err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT); if (err) goto probe_err_3b; err = mg_disk_init(host); if (err) { printk(KERN_ERR "%s:%d fail (err code : %d)\n", __func__, __LINE__, err); err = -EIO; goto probe_err_3b; } } /* get irq resource */ if (!prv_data->use_polling) { host->irq = platform_get_irq(plat_dev, 0); if (host->irq == -ENXIO) { err = host->irq; goto probe_err_3b; } err = request_irq(host->irq, mg_irq, IRQF_DISABLED | IRQF_TRIGGER_RISING, MG_DEV_NAME, host); if (err) { printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n", __func__, __LINE__, err); goto probe_err_3b; } } /* get disk id */ err = mg_get_disk_id(host); if (err) { printk(KERN_ERR "%s:%d fail (err code : %d)\n", __func__, __LINE__, err); err = -EIO; goto probe_err_4; } err = register_blkdev(host->major, MG_DISK_NAME); if (err < 0) { printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n", __func__, __LINE__, err); goto probe_err_4; } if (!host->major) host->major = err; spin_lock_init(&host->lock); if (prv_data->use_polling) host->breq = blk_init_queue(mg_request_poll, &host->lock); else host->breq = blk_init_queue(mg_request, &host->lock); if (!host->breq) { err = -ENOMEM; printk(KERN_ERR "%s:%d (blk_init_queue) fail\n", __func__, __LINE__); goto probe_err_5; } host->breq->queuedata = host; /* mflash is random device, thanx for the noop */ err = elevator_change(host->breq, "noop"); if (err) { printk(KERN_ERR "%s:%d (elevator_init) fail\n", __func__, __LINE__); goto probe_err_6; } blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); init_timer(&host->timer); host->timer.function = mg_times_out; host->timer.data = (unsigned long)host; host->gd = alloc_disk(MG_DISK_MAX_PART); if (!host->gd) { printk(KERN_ERR "%s:%d (alloc_disk) fail\n", __func__, __LINE__); err = -ENOMEM; goto probe_err_7; } host->gd->major = host->major; host->gd->first_minor = 0; host->gd->fops = &mg_disk_ops; host->gd->queue = host->breq; host->gd->private_data = host; sprintf(host->gd->disk_name, MG_DISK_NAME"a"); set_capacity(host->gd, host->n_sectors); add_disk(host->gd); return err; probe_err_7: del_timer_sync(&host->timer); probe_err_6: blk_cleanup_queue(host->breq); probe_err_5: unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME); probe_err_4: if (!prv_data->use_polling) free_irq(host->irq, host); probe_err_3b: gpio_free(host->rstout); probe_err_3a: gpio_free(host->rst); probe_err_3: iounmap(host->dev_base); probe_err_2: kfree(host); probe_err: return err; } static int mg_remove(struct platform_device *plat_dev) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; int err = 0; /* delete timer */ del_timer_sync(&host->timer); /* remove disk */ if (host->gd) { del_gendisk(host->gd); put_disk(host->gd); } /* remove queue */ if (host->breq) blk_cleanup_queue(host->breq); /* unregister blk device */ unregister_blkdev(host->major, MG_DISK_NAME); /* free irq */ if (!prv_data->use_polling) free_irq(host->irq, host); /* free reset-out pin */ if (prv_data->dev_attr != MG_BOOT_DEV) gpio_free(host->rstout); /* free rst pin */ if (host->rst) gpio_free(host->rst); /* unmap io */ if (host->dev_base) iounmap(host->dev_base); /* free mg_host */ kfree(host); return err; } static struct platform_driver mg_disk_driver = { .probe = mg_probe, .remove = mg_remove, .suspend = mg_suspend, .resume = mg_resume, .driver = { .name = MG_DEV_NAME, .owner = THIS_MODULE, } }; /**************************************************************************** * * Module stuff * ****************************************************************************/ static int __init mg_init(void) { printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n"); return platform_driver_register(&mg_disk_driver); } static void __exit mg_exit(void) { printk(KERN_INFO "mflash driver : bye bye\n"); platform_driver_unregister(&mg_disk_driver); } module_init(mg_init); module_exit(mg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("unsik Kim <donari75@gmail.com>"); MODULE_DESCRIPTION("mGine m[g]flash device driver");
gpl-2.0
HomuHomu/GT-N7000-ICS-kernel
arch/powerpc/platforms/chrp/pegasos_eth.c
9382
4625
/* * Copyright (C) 2005 Sven Luther <sl@bplan-gmbh.de> * Thanks to : * Dale Farnsworth <dale@farnsworth.org> * Mark A. Greer <mgreer@mvista.com> * Nicolas DET <nd@bplan-gmbh.de> * Benjamin Herrenschmidt <benh@kernel.crashing.org> * And anyone else who helped me on this. */ #include <linux/types.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mv643xx.h> #include <linux/pci.h> #define PEGASOS2_MARVELL_REGBASE (0xf1000000) #define PEGASOS2_MARVELL_REGSIZE (0x00004000) #define PEGASOS2_SRAM_BASE (0xf2000000) #define PEGASOS2_SRAM_SIZE (256*1024) #define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE) #define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) ) #define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #undef BE_VERBOSE static struct resource mv643xx_eth_shared_resources[] = { [0] = { .name = "ethernet shared base", .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS, .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + MV643XX_ETH_SHARED_REGS_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device mv643xx_eth_shared_device = { .name = MV643XX_ETH_SHARED_NAME, .id = 0, .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources), .resource = mv643xx_eth_shared_resources, }; static struct resource mv643xx_eth_port1_resources[] = { [0] = { .name = "eth port1 irq", .start = 9, .end = 9, .flags = IORESOURCE_IRQ, }, }; static struct mv643xx_eth_platform_data eth_port1_pd = { .shared = &mv643xx_eth_shared_device, .port_number = 1, .phy_addr = MV643XX_ETH_PHY_ADDR(7), .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH_PORT1, .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH_PORT1 + PEGASOS2_SRAM_TXRING_SIZE, .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE, .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16, }; static struct platform_device eth_port1_device = { .name = MV643XX_ETH_NAME, .id = 1, .num_resources = ARRAY_SIZE(mv643xx_eth_port1_resources), .resource = mv643xx_eth_port1_resources, .dev = { .platform_data = &eth_port1_pd, }, }; static struct platform_device *mv643xx_eth_pd_devs[] __initdata = { &mv643xx_eth_shared_device, &eth_port1_device, }; /***********/ /***********/ #define MV_READ(offset,val) { val = readl(mv643xx_reg_base + offset); } #define MV_WRITE(offset,data) writel(data, mv643xx_reg_base + offset) static void __iomem *mv643xx_reg_base; static int Enable_SRAM(void) { u32 ALong; if (mv643xx_reg_base == NULL) mv643xx_reg_base = ioremap(PEGASOS2_MARVELL_REGBASE, PEGASOS2_MARVELL_REGSIZE); if (mv643xx_reg_base == NULL) return -ENOMEM; #ifdef BE_VERBOSE printk("Pegasos II/Marvell MV64361: register remapped from %p to %p\n", (void *)PEGASOS2_MARVELL_REGBASE, (void *)mv643xx_reg_base); #endif MV_WRITE(MV64340_SRAM_CONFIG, 0); MV_WRITE(MV64340_INTEGRATED_SRAM_BASE_ADDR, PEGASOS2_SRAM_BASE >> 16); MV_READ(MV64340_BASE_ADDR_ENABLE, ALong); ALong &= ~(1 << 19); MV_WRITE(MV64340_BASE_ADDR_ENABLE, ALong); ALong = 0x02; ALong |= PEGASOS2_SRAM_BASE & 0xffff0000; MV_WRITE(MV643XX_ETH_BAR_4, ALong); MV_WRITE(MV643XX_ETH_SIZE_REG_4, (PEGASOS2_SRAM_SIZE-1) & 0xffff0000); MV_READ(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong); ALong &= ~(1 << 4); MV_WRITE(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong); #ifdef BE_VERBOSE printk("Pegasos II/Marvell MV64361: register unmapped\n"); printk("Pegasos II/Marvell MV64361: SRAM at %p, size=%x\n", (void*) PEGASOS2_SRAM_BASE, PEGASOS2_SRAM_SIZE); #endif iounmap(mv643xx_reg_base); mv643xx_reg_base = NULL; return 1; } /***********/ /***********/ static int __init mv643xx_eth_add_pds(void) { int ret = 0; static struct pci_device_id pci_marvell_mv64360[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360) }, { } }; #ifdef BE_VERBOSE printk("Pegasos II/Marvell MV64361: init\n"); #endif if (pci_dev_present(pci_marvell_mv64360)) { ret = platform_add_devices(mv643xx_eth_pd_devs, ARRAY_SIZE(mv643xx_eth_pd_devs)); if ( Enable_SRAM() < 0) { eth_port1_pd.tx_sram_addr = 0; eth_port1_pd.tx_sram_size = 0; eth_port1_pd.rx_sram_addr = 0; eth_port1_pd.rx_sram_size = 0; #ifdef BE_VERBOSE printk("Pegasos II/Marvell MV64361: Can't enable the " "SRAM\n"); #endif } } #ifdef BE_VERBOSE printk("Pegasos II/Marvell MV64361: init is over\n"); #endif return ret; } device_initcall(mv643xx_eth_add_pds);
gpl-2.0
Silviumik/Silviu_Kernel_I9195_LTE_KitKat
Documentation/blackfin/gptimers-example.c
12966
1923
/* * Simple gptimers example * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:drivers:gptimers * * Copyright 2007-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/module.h> #include <asm/gptimers.h> #include <asm/portmux.h> /* ... random driver includes ... */ #define DRIVER_NAME "gptimer_example" struct gptimer_data { uint32_t period, width; }; static struct gptimer_data data; /* ... random driver state ... */ static irqreturn_t gptimer_example_irq(int irq, void *dev_id) { struct gptimer_data *data = dev_id; /* make sure it was our timer which caused the interrupt */ if (!get_gptimer_intr(TIMER5_id)) return IRQ_NONE; /* read the width/period values that were captured for the waveform */ data->width = get_gptimer_pwidth(TIMER5_id); data->period = get_gptimer_period(TIMER5_id); /* acknowledge the interrupt */ clear_gptimer_intr(TIMER5_id); /* tell the upper layers we took care of things */ return IRQ_HANDLED; } /* ... random driver code ... */ static int __init gptimer_example_init(void) { int ret; /* grab the peripheral pins */ ret = peripheral_request(P_TMR5, DRIVER_NAME); if (ret) { printk(KERN_NOTICE DRIVER_NAME ": peripheral request failed\n"); return ret; } /* grab the IRQ for the timer */ ret = request_irq(IRQ_TIMER5, gptimer_example_irq, IRQF_SHARED, DRIVER_NAME, &data); if (ret) { printk(KERN_NOTICE DRIVER_NAME ": IRQ request failed\n"); peripheral_free(P_TMR5); return ret; } /* setup the timer and enable it */ set_gptimer_config(TIMER5_id, WDTH_CAP | PULSE_HI | PERIOD_CNT | IRQ_ENA); enable_gptimers(TIMER5bit); return 0; } module_init(gptimer_example_init); static void __exit gptimer_example_exit(void) { disable_gptimers(TIMER5bit); free_irq(IRQ_TIMER5, &data); peripheral_free(P_TMR5); } module_exit(gptimer_example_exit); MODULE_LICENSE("BSD");
gpl-2.0
tb-303/GFRG110
drivers/message/i2o/bus-osm.c
13478
4125
/* * Bus Adapter OSM * * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Fixes/additions: * Markus Lidel <Markus.Lidel@shadowconnect.com> * initial version. */ #include <linux/module.h> #include <linux/i2o.h> #define OSM_NAME "bus-osm" #define OSM_VERSION "1.317" #define OSM_DESCRIPTION "I2O Bus Adapter OSM" static struct i2o_driver i2o_bus_driver; /* Bus OSM class handling definition */ static struct i2o_class_id i2o_bus_class_id[] = { {I2O_CLASS_BUS_ADAPTER}, {I2O_CLASS_END} }; /** * i2o_bus_scan - Scan the bus for new devices * @dev: I2O device of the bus, which should be scanned * * Scans the bus dev for new / removed devices. After the scan a new LCT * will be fetched automatically. * * Returns 0 on success or negative error code on failure. */ static int i2o_bus_scan(struct i2o_device *dev) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return -ETIMEDOUT; msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data. tid); return i2o_msg_post_wait(dev->iop, msg, 60); }; /** * i2o_bus_store_scan - Scan the I2O Bus Adapter * @d: device which should be scanned * @attr: device_attribute * @buf: output buffer * @count: buffer size * * Returns count. */ static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct i2o_device *i2o_dev = to_i2o_device(d); int rc; if ((rc = i2o_bus_scan(i2o_dev))) osm_warn("bus scan failed %d\n", rc); return count; } /* Bus Adapter OSM device attributes */ static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan); /** * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it * @dev: device to verify if it is a I2O Bus Adapter device * * Because we want all Bus Adapters always return 0. * Except when we fail. Then we are sad. * * Returns 0, except when we fail to excel. */ static int i2o_bus_probe(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(get_device(dev)); int rc; rc = device_create_file(dev, &dev_attr_scan); if (rc) goto err_out; osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid); return 0; err_out: put_device(dev); return rc; }; /** * i2o_bus_remove - remove the I2O Bus Adapter device from the system again * @dev: I2O Bus Adapter device which should be removed * * Always returns 0. */ static int i2o_bus_remove(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(dev); device_remove_file(dev, &dev_attr_scan); put_device(dev); osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); return 0; }; /* Bus Adapter OSM driver struct */ static struct i2o_driver i2o_bus_driver = { .name = OSM_NAME, .classes = i2o_bus_class_id, .driver = { .probe = i2o_bus_probe, .remove = i2o_bus_remove, }, }; /** * i2o_bus_init - Bus Adapter OSM initialization function * * Only register the Bus Adapter OSM in the I2O core. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_bus_init(void) { int rc; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); /* Register Bus Adapter OSM into I2O core */ rc = i2o_driver_register(&i2o_bus_driver); if (rc) { osm_err("Could not register Bus Adapter OSM\n"); return rc; } return 0; }; /** * i2o_bus_exit - Bus Adapter OSM exit function * * Unregisters Bus Adapter OSM from I2O core. */ static void __exit i2o_bus_exit(void) { i2o_driver_unregister(&i2o_bus_driver); }; MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_bus_init); module_exit(i2o_bus_exit);
gpl-2.0
phuthinh100/Kernel-JB--sky-A830L
drivers/char/uv_mmtimer.c
13734
5647
/* * Timer device implementation for SGI UV platform. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mmtimer.h> #include <linux/miscdevice.h> #include <linux/posix-timers.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> #include <asm/genapic.h> #include <asm/uv/uv_hub.h> #include <asm/uv/bios.h> #include <asm/uv/uv.h> MODULE_AUTHOR("Dimitri Sivanich <sivanich@sgi.com>"); MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer"); MODULE_LICENSE("GPL"); /* name of the device, usually in /dev */ #define UV_MMTIMER_NAME "mmtimer" #define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer" #define UV_MMTIMER_VERSION "1.0" static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma); /* * Period in femtoseconds (10^-15 s) */ static unsigned long uv_mmtimer_femtoperiod; static const struct file_operations uv_mmtimer_fops = { .owner = THIS_MODULE, .mmap = uv_mmtimer_mmap, .unlocked_ioctl = uv_mmtimer_ioctl, .llseek = noop_llseek, }; /** * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer * @file: file structure for the device * @cmd: command to execute * @arg: optional argument to command * * Executes the command specified by @cmd. Returns 0 for success, < 0 for * failure. * * Valid commands: * * %MMTIMER_GETOFFSET - Should return the offset (relative to the start * of the page where the registers are mapped) for the counter in question. * * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15) * seconds * * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address * specified by @arg * * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter * * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace * * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it * in the address specified by @arg. */ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ /* * Starting with HUB rev 2.0, the UV RTC register is * replicated across all cachelines of it's own page. * This allows faster simultaneous reads from a given socket. * * The offset returned is in 64 bit units. */ if (uv_get_min_hub_revision_id() == 1) ret = 0; else ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE) / 8; break; case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ if (copy_to_user((unsigned long __user *)arg, &uv_mmtimer_femtoperiod, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETFREQ: /* frequency in Hz */ if (copy_to_user((unsigned long __user *)arg, &sn_rtc_cycles_per_second, sizeof(unsigned long))) ret = -EFAULT; break; case MMTIMER_GETBITS: /* number of bits in the clock */ ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); break; case MMTIMER_MMAPAVAIL: ret = 1; break; case MMTIMER_GETCOUNTER: if (copy_to_user((unsigned long __user *)arg, (unsigned long *)uv_local_mmr_address(UVH_RTC), sizeof(unsigned long))) ret = -EFAULT; break; default: ret = -ENOTTY; break; } return ret; } /** * uv_mmtimer_mmap - maps the clock's registers into userspace * @file: file structure for the device * @vma: VMA to map the registers into * * Calls remap_pfn_range() to map the clock's registers into * the calling process' address space. */ static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long uv_mmtimer_addr; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_flags & VM_WRITE) return -EPERM; if (PAGE_SIZE > (1 << 16)) return -ENOSYS; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC; uv_mmtimer_addr &= ~(PAGE_SIZE - 1); uv_mmtimer_addr &= 0xfffffffffffffffUL; if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n"); return -EAGAIN; } return 0; } static struct miscdevice uv_mmtimer_miscdev = { MISC_DYNAMIC_MINOR, UV_MMTIMER_NAME, &uv_mmtimer_fops }; /** * uv_mmtimer_init - device initialization routine * * Does initial setup for the uv_mmtimer device. */ static int __init uv_mmtimer_init(void) { if (!is_uv_system()) { printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME); return -1; } /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", UV_MMTIMER_NAME); return -1; } uv_mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; if (misc_register(&uv_mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", UV_MMTIMER_NAME); return -1; } printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC, UV_MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; } module_init(uv_mmtimer_init);
gpl-2.0
xingrz/android_kernel_nubia_msm8996
sound/soc/codecs/wsa881x-tables-analog.c
167
5157
/* * Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/regmap.h> #include <linux/device.h> #include "wsa881x-registers-analog.h" const u8 wsa881x_ana_reg_readable[WSA881X_CACHE_SIZE] = { [WSA881X_CHIP_ID0] = 1, [WSA881X_CHIP_ID1] = 1, [WSA881X_CHIP_ID2] = 1, [WSA881X_CHIP_ID3] = 1, [WSA881X_BUS_ID] = 1, [WSA881X_CDC_RST_CTL] = 1, [WSA881X_CDC_TOP_CLK_CTL] = 1, [WSA881X_CDC_ANA_CLK_CTL] = 1, [WSA881X_CDC_DIG_CLK_CTL] = 1, [WSA881X_CLOCK_CONFIG] = 1, [WSA881X_ANA_CTL] = 1, [WSA881X_SWR_RESET_EN] = 1, [WSA881X_RESET_CTL] = 1, [WSA881X_TADC_VALUE_CTL] = 1, [WSA881X_TEMP_DETECT_CTL] = 1, [WSA881X_TEMP_MSB] = 1, [WSA881X_TEMP_LSB] = 1, [WSA881X_TEMP_CONFIG0] = 1, [WSA881X_TEMP_CONFIG1] = 1, [WSA881X_CDC_CLIP_CTL] = 1, [WSA881X_SDM_PDM9_LSB] = 1, [WSA881X_SDM_PDM9_MSB] = 1, [WSA881X_CDC_RX_CTL] = 1, [WSA881X_DEM_BYPASS_DATA0] = 1, [WSA881X_DEM_BYPASS_DATA1] = 1, [WSA881X_DEM_BYPASS_DATA2] = 1, [WSA881X_DEM_BYPASS_DATA3] = 1, [WSA881X_OTP_CTRL0] = 1, [WSA881X_OTP_CTRL1] = 1, [WSA881X_HDRIVE_CTL_GROUP1] = 1, [WSA881X_INTR_MODE] = 1, [WSA881X_INTR_MASK] = 1, [WSA881X_INTR_STATUS] = 1, [WSA881X_INTR_CLEAR] = 1, [WSA881X_INTR_LEVEL] = 1, [WSA881X_INTR_SET] = 1, [WSA881X_INTR_TEST] = 1, [WSA881X_PDM_TEST_MODE] = 1, [WSA881X_ATE_TEST_MODE] = 1, [WSA881X_PIN_CTL_MODE] = 1, [WSA881X_PIN_CTL_OE] = 1, [WSA881X_PIN_WDATA_IOPAD] = 1, [WSA881X_PIN_STATUS] = 1, [WSA881X_DIG_DEBUG_MODE] = 1, [WSA881X_DIG_DEBUG_SEL] = 1, [WSA881X_DIG_DEBUG_EN] = 1, [WSA881X_SWR_HM_TEST1] = 1, [WSA881X_SWR_HM_TEST2] = 1, [WSA881X_TEMP_DETECT_DBG_CTL] = 1, [WSA881X_TEMP_DEBUG_MSB] = 1, [WSA881X_TEMP_DEBUG_LSB] = 1, [WSA881X_SAMPLE_EDGE_SEL] = 1, [WSA881X_IOPAD_CTL] = 1, [WSA881X_SPARE_0] = 1, [WSA881X_SPARE_1] = 1, [WSA881X_SPARE_2] = 1, [WSA881X_OTP_REG_0] = 1, [WSA881X_OTP_REG_1] = 1, [WSA881X_OTP_REG_2] = 1, [WSA881X_OTP_REG_3] = 1, [WSA881X_OTP_REG_4] = 1, [WSA881X_OTP_REG_5] = 1, [WSA881X_OTP_REG_6] = 1, [WSA881X_OTP_REG_7] = 1, [WSA881X_OTP_REG_8] = 1, [WSA881X_OTP_REG_9] = 1, [WSA881X_OTP_REG_10] = 1, [WSA881X_OTP_REG_11] = 1, [WSA881X_OTP_REG_12] = 1, [WSA881X_OTP_REG_13] = 1, [WSA881X_OTP_REG_14] = 1, [WSA881X_OTP_REG_15] = 1, [WSA881X_OTP_REG_16] = 1, [WSA881X_OTP_REG_17] = 1, [WSA881X_OTP_REG_18] = 1, [WSA881X_OTP_REG_19] = 1, [WSA881X_OTP_REG_20] = 1, [WSA881X_OTP_REG_21] = 1, [WSA881X_OTP_REG_22] = 1, [WSA881X_OTP_REG_23] = 1, [WSA881X_OTP_REG_24] = 1, [WSA881X_OTP_REG_25] = 1, [WSA881X_OTP_REG_26] = 1, [WSA881X_OTP_REG_27] = 1, [WSA881X_OTP_REG_28] = 1, [WSA881X_OTP_REG_29] = 1, [WSA881X_OTP_REG_30] = 1, [WSA881X_OTP_REG_31] = 1, [WSA881X_OTP_REG_63] = 1, /* Analog Registers */ [WSA881X_BIAS_REF_CTRL] = 1, [WSA881X_BIAS_TEST] = 1, [WSA881X_BIAS_BIAS] = 1, [WSA881X_TEMP_OP] = 1, [WSA881X_TEMP_IREF_CTRL] = 1, [WSA881X_TEMP_ISENS_CTRL] = 1, [WSA881X_TEMP_CLK_CTRL] = 1, [WSA881X_TEMP_TEST] = 1, [WSA881X_TEMP_BIAS] = 1, [WSA881X_TEMP_ADC_CTRL] = 1, [WSA881X_TEMP_DOUT_MSB] = 1, [WSA881X_TEMP_DOUT_LSB] = 1, [WSA881X_ADC_EN_MODU_V] = 1, [WSA881X_ADC_EN_MODU_I] = 1, [WSA881X_ADC_EN_DET_TEST_V] = 1, [WSA881X_ADC_EN_DET_TEST_I] = 1, [WSA881X_ADC_SEL_IBIAS] = 1, [WSA881X_ADC_EN_SEL_IBIAS] = 1, [WSA881X_SPKR_DRV_EN] = 1, [WSA881X_SPKR_DRV_GAIN] = 1, [WSA881X_SPKR_DAC_CTL] = 1, [WSA881X_SPKR_DRV_DBG] = 1, [WSA881X_SPKR_PWRSTG_DBG] = 1, [WSA881X_SPKR_OCP_CTL] = 1, [WSA881X_SPKR_CLIP_CTL] = 1, [WSA881X_SPKR_BBM_CTL] = 1, [WSA881X_SPKR_MISC_CTL1] = 1, [WSA881X_SPKR_MISC_CTL2] = 1, [WSA881X_SPKR_BIAS_INT] = 1, [WSA881X_SPKR_PA_INT] = 1, [WSA881X_SPKR_BIAS_CAL] = 1, [WSA881X_SPKR_BIAS_PSRR] = 1, [WSA881X_SPKR_STATUS1] = 1, [WSA881X_SPKR_STATUS2] = 1, [WSA881X_BOOST_EN_CTL] = 1, [WSA881X_BOOST_CURRENT_LIMIT] = 1, [WSA881X_BOOST_PS_CTL] = 1, [WSA881X_BOOST_PRESET_OUT1] = 1, [WSA881X_BOOST_PRESET_OUT2] = 1, [WSA881X_BOOST_FORCE_OUT] = 1, [WSA881X_BOOST_LDO_PROG] = 1, [WSA881X_BOOST_SLOPE_COMP_ISENSE_FB] = 1, [WSA881X_BOOST_RON_CTL] = 1, [WSA881X_BOOST_LOOP_STABILITY] = 1, [WSA881X_BOOST_ZX_CTL] = 1, [WSA881X_BOOST_START_CTL] = 1, [WSA881X_BOOST_MISC1_CTL] = 1, [WSA881X_BOOST_MISC2_CTL] = 1, [WSA881X_BOOST_MISC3_CTL] = 1, [WSA881X_BOOST_ATEST_CTL] = 1, [WSA881X_SPKR_PROT_FE_GAIN] = 1, [WSA881X_SPKR_PROT_FE_CM_LDO_SET] = 1, [WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1] = 1, [WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2] = 1, [WSA881X_SPKR_PROT_ATEST1] = 1, [WSA881X_SPKR_PROT_ATEST2] = 1, [WSA881X_SPKR_PROT_FE_VSENSE_VCM] = 1, [WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1] = 1, [WSA881X_BONGO_RESRV_REG1] = 1, [WSA881X_BONGO_RESRV_REG2] = 1, [WSA881X_SPKR_PROT_SAR] = 1, [WSA881X_SPKR_STATUS3] = 1, };
gpl-2.0
WildfireDEV/android_kernel_samsung_s6
drivers/devfreq/exynos7_bus_isp.c
167
8940
/* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com * Taikyung Yu(taikyung.yu@samsung.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/pm_qos.h> #include <linux/devfreq.h> #include <linux/reboot.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/exynos-ss.h> #include <mach/tmu.h> #include <mach/asv-exynos.h> #include "devfreq_exynos.h" #include "governor.h" #define DEVFREQ_INITIAL_FREQ (500000) #define DEVFREQ_POLLING_PERIOD (0) /* extern */ extern struct devfreq_opp_table devfreq_isp_opp_list[]; static struct devfreq_simple_ondemand_data exynos7_devfreq_isp_governor_data = { .pm_qos_class = PM_QOS_CAM_THROUGHPUT, .upthreshold = 95, .cal_qos_max = 540000, }; static struct exynos_devfreq_platdata exynos7420_qos_isp = { .default_qos = 500000, }; static struct pm_qos_request exynos7_isp_qos; static struct pm_qos_request boot_isp_qos; struct pm_qos_request min_isp_thermal_qos; static int exynos7_devfreq_isp_target(struct device *dev, unsigned long *target_freq, u32 flags) { int ret = 0; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct devfreq_data_isp *data = platform_get_drvdata(pdev); struct devfreq *devfreq_isp = data->devfreq; struct opp *target_opp; int target_idx, old_idx; unsigned long target_volt; unsigned long old_freq; mutex_lock(&data->lock); rcu_read_lock(); target_opp = devfreq_recommended_opp(dev, target_freq, flags); if (IS_ERR(target_opp)) { rcu_read_unlock(); mutex_unlock(&data->lock); dev_err(dev, "DEVFREQ(ISP) : Invalid OPP to find\n"); ret = PTR_ERR(target_opp); goto out; } *target_freq = opp_get_freq(target_opp); target_volt = opp_get_voltage(target_opp); rcu_read_unlock(); target_idx = devfreq_get_opp_idx(devfreq_isp_opp_list, data->max_state, *target_freq); old_idx = devfreq_get_opp_idx(devfreq_isp_opp_list, data->max_state, devfreq_isp->previous_freq); old_freq = devfreq_isp->previous_freq; if (target_idx < 0 || old_idx < 0) { ret = -EINVAL; goto out; } if (old_freq == *target_freq) goto out; #ifdef CONFIG_EXYNOS_THERMAL target_volt = get_limit_voltage(target_volt, data->volt_offset, data->volt_of_avail_max_freq); #endif pr_debug("ISP LV_%d(%lu) ================> LV_%d(%lu, volt: %lu)\n", old_idx, old_freq, target_idx, *target_freq, target_volt); exynos_ss_freq(ESS_FLAG_ISP, old_freq, ESS_FLAG_IN); if (old_freq < *target_freq) { if (data->isp_set_volt) data->isp_set_volt(data, target_volt, REGULATOR_MAX_MICROVOLT); if (data->isp_set_freq) data->isp_set_freq(data, target_idx, old_idx); } else { if (data->isp_set_freq) data->isp_set_freq(data, target_idx, old_idx); if (data->isp_set_volt) data->isp_set_volt(data, target_volt, REGULATOR_MAX_MICROVOLT); } exynos_ss_freq(ESS_FLAG_ISP, *target_freq, ESS_FLAG_OUT); data->cur_freq = *target_freq; out: mutex_unlock(&data->lock); return ret; } static struct devfreq_dev_profile exynos7_devfreq_isp_profile = { .initial_freq = DEVFREQ_INITIAL_FREQ, .polling_ms = DEVFREQ_POLLING_PERIOD, .target = exynos7_devfreq_isp_target, .get_dev_status = NULL, }; static int exynos7_init_isp_table(struct device *dev, struct devfreq_data_isp *data) { unsigned int i; unsigned int ret; unsigned int freq; unsigned int volt; for (i = 0; i < data->max_state; ++i) { freq = devfreq_isp_opp_list[i].freq; volt = get_match_volt(ID_ISP, freq); if (!volt) volt = devfreq_isp_opp_list[i].volt; devfreq_isp_opp_list[i].volt = volt; exynos7_devfreq_isp_profile.freq_table[i] = freq; ret = opp_add(dev, freq, volt); if (ret) { pr_err("DEVFREQ(ISP) : Failed to add opp entries %uKhz, %uuV\n", freq, volt); return ret; } else { pr_info("DEVFREQ(ISP) : %7uKhz, %7uuV\n", freq, volt); } } data->volt_of_avail_max_freq = get_volt_of_avail_max_freq(dev); pr_info("DEVFREQ(ISP) : voltage of available max freq : %7uuV\n", data->volt_of_avail_max_freq); return 0; } static int exynos7_devfreq_isp_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v) { if (pm_qos_request_active(&boot_isp_qos)) pm_qos_update_request(&boot_isp_qos, exynos7_devfreq_isp_profile.initial_freq); return NOTIFY_DONE; } static struct notifier_block exynos7_isp_reboot_notifier = { .notifier_call = exynos7_devfreq_isp_reboot_notifier, }; static int exynos7_devfreq_isp_probe(struct platform_device *pdev) { int ret = 0; struct devfreq_data_isp *data; struct exynos_devfreq_platdata *plat_data; data = kzalloc(sizeof(struct devfreq_data_isp), GFP_KERNEL); if (data == NULL) { pr_err("DEVFREQ(ISP) : Failed to allocate private data\n"); ret = -ENOMEM; goto err_data; } ret = exynos7420_devfreq_isp_init(data); if (ret) { pr_err("DEVFREQ(ISP) : Failed to intialize data\n"); goto err_freqtable; } exynos7_devfreq_isp_profile.max_state = data->max_state; exynos7_devfreq_isp_profile.freq_table = kzalloc(sizeof(int) * data->max_state, GFP_KERNEL); if (exynos7_devfreq_isp_profile.freq_table == NULL) { pr_err("DEVFREQ(ISP) : Failed to allocate freq table\n"); ret = -ENOMEM; goto err_freqtable; } ret = exynos7_init_isp_table(&pdev->dev, data); if (ret) goto err_inittable; platform_set_drvdata(pdev, data); mutex_init(&data->lock); data->initial_freq = exynos7_devfreq_isp_profile.initial_freq; data->cur_freq = exynos7_devfreq_isp_profile.initial_freq; data->volt_offset = 0; data->dev = &pdev->dev; data->vdd_disp_cam0 = regulator_get(NULL, "vdd_disp_cam0"); if (data->vdd_disp_cam0) data->old_volt = regulator_get_voltage(data->vdd_disp_cam0); data->devfreq = devfreq_add_device(data->dev, &exynos7_devfreq_isp_profile, "simple_ondemand", &exynos7_devfreq_isp_governor_data); plat_data = data->dev->platform_data; data->devfreq->min_freq = plat_data->default_qos; data->devfreq->max_freq = exynos7_devfreq_isp_governor_data.cal_qos_max; register_reboot_notifier(&exynos7_isp_reboot_notifier); #ifdef CONFIG_EXYNOS_THERMAL exynos_tmu_add_notifier(&data->tmu_notifier); #endif data->use_dvfs = true; return ret; err_inittable: devfreq_remove_device(data->devfreq); kfree(exynos7_devfreq_isp_profile.freq_table); err_freqtable: kfree(data); err_data: return ret; } static int exynos7_devfreq_isp_remove(struct platform_device *pdev) { struct devfreq_data_isp *data = platform_get_drvdata(pdev); devfreq_remove_device(data->devfreq); pm_qos_remove_request(&min_isp_thermal_qos); pm_qos_remove_request(&exynos7_isp_qos); pm_qos_remove_request(&boot_isp_qos); regulator_put(data->vdd_disp_cam0); kfree(data); platform_set_drvdata(pdev, NULL); return 0; } static int exynos7_devfreq_isp_suspend(struct device *dev) { if (pm_qos_request_active(&exynos7_isp_qos)) pm_qos_update_request(&exynos7_isp_qos, exynos7_devfreq_isp_profile.initial_freq); return 0; } static int exynos7_devfreq_isp_resume(struct device *dev) { struct exynos_devfreq_platdata *pdata = dev->platform_data; if (pm_qos_request_active(&exynos7_isp_qos)) pm_qos_update_request(&exynos7_isp_qos, pdata->default_qos); return 0; } static struct dev_pm_ops exynos7_devfreq_isp_pm = { .suspend = exynos7_devfreq_isp_suspend, .resume = exynos7_devfreq_isp_resume, }; static struct platform_driver exynos7_devfreq_isp_driver = { .probe = exynos7_devfreq_isp_probe, .remove = exynos7_devfreq_isp_remove, .driver = { .name = "exynos7-devfreq-isp", .owner = THIS_MODULE, .pm = &exynos7_devfreq_isp_pm, }, }; static struct platform_device exynos7_devfreq_isp_device = { .name = "exynos7-devfreq-isp", .id = -1, }; static int exynos7_devfreq_isp_qos_init(void) { pm_qos_add_request(&exynos7_isp_qos, PM_QOS_CAM_THROUGHPUT, exynos7420_qos_isp.default_qos); pm_qos_add_request(&min_isp_thermal_qos, PM_QOS_CAM_THROUGHPUT, exynos7420_qos_isp.default_qos); pm_qos_add_request(&boot_isp_qos, PM_QOS_CAM_THROUGHPUT, exynos7420_qos_isp.default_qos); return 0; } device_initcall(exynos7_devfreq_isp_qos_init); static int __init exynos7_devfreq_isp_init(void) { int ret = 0; exynos7_devfreq_isp_device.dev.platform_data = &exynos7420_qos_isp; ret = platform_device_register(&exynos7_devfreq_isp_device); if (ret) return ret; return platform_driver_register(&exynos7_devfreq_isp_driver); } late_initcall(exynos7_devfreq_isp_init); static void __exit exynos7_devfreq_isp_exit(void) { platform_driver_unregister(&exynos7_devfreq_isp_driver); platform_device_unregister(&exynos7_devfreq_isp_device); } module_exit(exynos7_devfreq_isp_exit);
gpl-2.0
delanoister-Andro-ID/GT-I9300-ICS-3.0.y
arch/arm/mvp/pvtcpkm/check_kconfig.c
423
3051
/* * Linux 2.6.32 and later Kernel module for VMware MVP PVTCP Server * * Copyright (C) 2010-2012 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #line 5 /** * @file * @brief Check for required kernel configuration * * Check to make sure that the kernel options that the MVP hypervisor requires * have been enabled in the kernel that this kernel module is being built * against. */ #include <linux/version.h> /* * Minimum kernel version * - network namespace support is only really functional starting in 2.6.29 * - Android Gingerbread requires 2.6.35 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) #error "MVP requires a host kernel newer than 2.6.35" #endif /* module loading ability */ #ifndef CONFIG_MODULES #error "MVP requires kernel loadable module support be enabled (CONFIG_MODULES)" #endif #ifndef CONFIG_MODULE_UNLOAD #error "MVP requires kernel module unload support be enabled (CONFIG_MODULE_UNLOAD)" #endif /* sysfs */ #ifndef CONFIG_SYSFS #error "MVP requires sysfs support (CONFIG_SYSFS)" #endif /* network traffic isolation */ #ifndef CONFIG_NAMESPACES #error "MVP networking support requires namespace support (CONFIG_NAMESPACES)" #endif #ifndef CONFIG_NET_NS #error "MVP networking support requires Network Namespace support to be enabled (CONFIG_NET_NS)" #endif /* TCP/IP networking */ #ifndef CONFIG_INET #error "MVP networking requires IPv4 support (CONFIG_INET)" #endif #ifndef CONFIG_IPV6 #error "MVP networking requires IPv6 support (CONFIG_IPV6)" #endif /* VPN support */ #if !defined(CONFIG_TUN) && !defined(CONFIG_TUN_MODULE) #error "MVP VPN support requires TUN device support (CONFIG_TUN)" #endif #if !defined(CONFIG_NETFILTER) && !defined(PVTCP_DISABLE_NETFILTER) #error "MVP networking support requires netfilter support (CONFIG_NETFILTER)" #endif /* Force /proc/config.gz support for eng/userdebug builds */ #ifdef MVP_DEBUG #if !defined(CONFIG_IKCONFIG) || !defined(CONFIG_IKCONFIG_PROC) #error "MVP kernel /proc/config.gz support required for debuggability (CONFIG_IKCONFIG_PROC)" #endif #endif /* Sanity check we're only dealing with the memory hotplug + migrate and/or * compaction combo */ #ifdef CONFIG_MIGRATION #if defined(CONFIG_NUMA) || defined(CONFIG_CPUSETS) || defined(CONFIG_MEMORY_FAILURE) #error "MVP not tested with migration features other than CONFIG_MEMORY_HOTPLUG and CONFIG_COMPACTION" #endif #endif
gpl-2.0
ashleyjune/SM-G360T1_kernel
net/core/ethtool.c
679
40234
/* * net/core/ethtool.c - Ethtool ioctl handler * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> * * This file is where we call all the ethtool_ops commands to get * the information ethtool needs. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/sched.h> /* * Some useful ethtool_ops methods that're device independent. * If we find that all drivers want to do the same thing here, * we can turn these into dev_() function calls. */ u32 ethtool_op_get_link(struct net_device *dev) { return netif_carrier_ok(dev) ? 1 : 0; } EXPORT_SYMBOL(ethtool_op_get_link); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; info->phc_index = -1; return 0; } EXPORT_SYMBOL(ethtool_op_get_ts_info); /* Handlers for each ethtool command */ #define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { [NETIF_F_SG_BIT] = "tx-scatter-gather", [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4", [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic", [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert", [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse", [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter", [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", [NETIF_F_GSO_BIT] = "tx-generic-segmentation", [NETIF_F_LLTX_BIT] = "tx-lockless", [NETIF_F_NETNS_LOCAL_BIT] = "netns-local", [NETIF_F_GRO_BIT] = "rx-gro", [NETIF_F_LRO_BIT] = "rx-lro", [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", [NETIF_F_UFO_BIT] = "tx-udp-fragmentation", [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu", [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter", [NETIF_F_RXHASH_BIT] = "rx-hashing", [NETIF_F_RXCSUM_BIT] = "rx-checksum", [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", [NETIF_F_LOOPBACK_BIT] = "loopback", [NETIF_F_RXFCS_BIT] = "rx-fcs", [NETIF_F_RXALL_BIT] = "rx-all", }; static int ethtool_get_features(struct net_device *dev, void __user *useraddr) { struct ethtool_gfeatures cmd = { .cmd = ETHTOOL_GFEATURES, .size = ETHTOOL_DEV_FEATURE_WORDS, }; struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; u32 __user *sizeaddr; u32 copy_size; int i; /* in case feature bits run out again */ BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { features[i].available = (u32)(dev->hw_features >> (32 * i)); features[i].requested = (u32)(dev->wanted_features >> (32 * i)); features[i].active = (u32)(dev->features >> (32 * i)); features[i].never_changed = (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); } sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); if (get_user(copy_size, sizeaddr)) return -EFAULT; if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) copy_size = ETHTOOL_DEV_FEATURE_WORDS; if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; useraddr += sizeof(cmd); if (copy_to_user(useraddr, features, copy_size * sizeof(*features))) return -EFAULT; return 0; } static int ethtool_set_features(struct net_device *dev, void __user *useraddr) { struct ethtool_sfeatures cmd; struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; netdev_features_t wanted = 0, valid = 0; int i, ret = 0; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; useraddr += sizeof(cmd); if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) return -EINVAL; if (copy_from_user(features, useraddr, sizeof(features))) return -EFAULT; for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { valid |= (netdev_features_t)features[i].valid << (32 * i); wanted |= (netdev_features_t)features[i].requested << (32 * i); } if (valid & ~NETIF_F_ETHTOOL_BITS) return -EINVAL; if (valid & ~dev->hw_features) { valid &= dev->hw_features; ret |= ETHTOOL_F_UNSUPPORTED; } dev->wanted_features &= ~valid; dev->wanted_features |= wanted & valid; __netdev_update_features(dev); if ((dev->wanted_features ^ dev->features) & valid) ret |= ETHTOOL_F_WISH; return ret; } static int __ethtool_get_sset_count(struct net_device *dev, int sset) { const struct ethtool_ops *ops = dev->ethtool_ops; if (sset == ETH_SS_FEATURES) return ARRAY_SIZE(netdev_features_strings); if (ops->get_sset_count && ops->get_strings) return ops->get_sset_count(dev, sset); else return -EOPNOTSUPP; } static void __ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *data) { const struct ethtool_ops *ops = dev->ethtool_ops; if (stringset == ETH_SS_FEATURES) memcpy(data, netdev_features_strings, sizeof(netdev_features_strings)); else /* ops->get_strings is valid because checked earlier */ ops->get_strings(dev, stringset, data); } static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) { /* feature masks of legacy discrete ethtool ops */ switch (eth_cmd) { case ETHTOOL_GTXCSUM: case ETHTOOL_STXCSUM: return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM; case ETHTOOL_GRXCSUM: case ETHTOOL_SRXCSUM: return NETIF_F_RXCSUM; case ETHTOOL_GSG: case ETHTOOL_SSG: return NETIF_F_SG; case ETHTOOL_GTSO: case ETHTOOL_STSO: return NETIF_F_ALL_TSO; case ETHTOOL_GUFO: case ETHTOOL_SUFO: return NETIF_F_UFO; case ETHTOOL_GGSO: case ETHTOOL_SGSO: return NETIF_F_GSO; case ETHTOOL_GGRO: case ETHTOOL_SGRO: return NETIF_F_GRO; default: BUG(); } } static int ethtool_get_one_feature(struct net_device *dev, char __user *useraddr, u32 ethcmd) { netdev_features_t mask = ethtool_get_feature_mask(ethcmd); struct ethtool_value edata = { .cmd = ethcmd, .data = !!(dev->features & mask), }; if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_set_one_feature(struct net_device *dev, void __user *useraddr, u32 ethcmd) { struct ethtool_value edata; netdev_features_t mask; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; mask = ethtool_get_feature_mask(ethcmd); mask &= dev->hw_features; if (!mask) return -EOPNOTSUPP; if (edata.data) dev->wanted_features |= mask; else dev->wanted_features &= ~mask; __netdev_update_features(dev); return 0; } #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ NETIF_F_RXHASH) static u32 __ethtool_get_flags(struct net_device *dev) { u32 flags = 0; if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN; if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN; if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; return flags; } static int __ethtool_set_flags(struct net_device *dev, u32 data) { netdev_features_t features = 0, changed; if (data & ~ETH_ALL_FLAGS) return -EINVAL; if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX; if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX; if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; /* allow changing only bits set in hw_features */ changed = (features ^ dev->features) & ETH_ALL_FEATURES; if (changed & ~dev->hw_features) return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; dev->wanted_features = (dev->wanted_features & ~changed) | (features & changed); __netdev_update_features(dev); return 0; } int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { ASSERT_RTNL(); if (!dev->ethtool_ops->get_settings) return -EOPNOTSUPP; memset(cmd, 0, sizeof(struct ethtool_cmd)); cmd->cmd = ETHTOOL_GSET; return dev->ethtool_ops->get_settings(dev, cmd); } EXPORT_SYMBOL(__ethtool_get_settings); static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) { int err; struct ethtool_cmd cmd; err = __ethtool_get_settings(dev, &cmd); if (err < 0) return err; if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; return 0; } static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) { struct ethtool_cmd cmd; if (!dev->ethtool_ops->set_settings) return -EOPNOTSUPP; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; return dev->ethtool_ops->set_settings(dev, &cmd); } static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) { struct ethtool_drvinfo info; const struct ethtool_ops *ops = dev->ethtool_ops; memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GDRVINFO; if (ops->get_drvinfo) { ops->get_drvinfo(dev, &info); } else if (dev->dev.parent && dev->dev.parent->driver) { strlcpy(info.bus_info, dev_name(dev->dev.parent), sizeof(info.bus_info)); strlcpy(info.driver, dev->dev.parent->driver->name, sizeof(info.driver)); } else { return -EOPNOTSUPP; } /* * this method of obtaining string set info is deprecated; * Use ETHTOOL_GSSET_INFO instead. */ if (ops->get_sset_count) { int rc; rc = ops->get_sset_count(dev, ETH_SS_TEST); if (rc >= 0) info.testinfo_len = rc; rc = ops->get_sset_count(dev, ETH_SS_STATS); if (rc >= 0) info.n_stats = rc; rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); if (rc >= 0) info.n_priv_flags = rc; } if (ops->get_regs_len) info.regdump_len = ops->get_regs_len(dev); if (ops->get_eeprom_len) info.eedump_len = ops->get_eeprom_len(dev); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; } static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, void __user *useraddr) { struct ethtool_sset_info info; u64 sset_mask; int i, idx = 0, n_bits = 0, ret, rc; u32 *info_buf = NULL; if (copy_from_user(&info, useraddr, sizeof(info))) return -EFAULT; /* store copy of mask, because we zero struct later on */ sset_mask = info.sset_mask; if (!sset_mask) return 0; /* calculate size of return buffer */ n_bits = hweight64(sset_mask); memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GSSET_INFO; info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); if (!info_buf) return -ENOMEM; /* * fill return buffer based on input bitmask and successful * get_sset_count return */ for (i = 0; i < 64; i++) { if (!(sset_mask & (1ULL << i))) continue; rc = __ethtool_get_sset_count(dev, i); if (rc >= 0) { info.sset_mask |= (1ULL << i); info_buf[idx++] = rc; } } ret = -EFAULT; if (copy_to_user(useraddr, &info, sizeof(info))) goto out; useraddr += offsetof(struct ethtool_sset_info, data); if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) goto out; ret = 0; out: kfree(info_buf); return ret; } static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, u32 cmd, void __user *useraddr) { struct ethtool_rxnfc info; size_t info_size = sizeof(info); int rc; if (!dev->ethtool_ops->set_rxnfc) return -EOPNOTSUPP; /* struct ethtool_rxnfc was originally defined for * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data * members. User-space might still be using that * definition. */ if (cmd == ETHTOOL_SRXFH) info_size = (offsetof(struct ethtool_rxnfc, data) + sizeof(info.data)); if (copy_from_user(&info, useraddr, info_size)) return -EFAULT; rc = dev->ethtool_ops->set_rxnfc(dev, &info); if (rc) return rc; if (cmd == ETHTOOL_SRXCLSRLINS && copy_to_user(useraddr, &info, info_size)) return -EFAULT; return 0; } static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, u32 cmd, void __user *useraddr) { struct ethtool_rxnfc info; size_t info_size = sizeof(info); const struct ethtool_ops *ops = dev->ethtool_ops; int ret; void *rule_buf = NULL; if (!ops->get_rxnfc) return -EOPNOTSUPP; /* struct ethtool_rxnfc was originally defined for * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data * members. User-space might still be using that * definition. */ if (cmd == ETHTOOL_GRXFH) info_size = (offsetof(struct ethtool_rxnfc, data) + sizeof(info.data)); if (copy_from_user(&info, useraddr, info_size)) return -EFAULT; if (info.cmd == ETHTOOL_GRXCLSRLALL) { if (info.rule_cnt > 0) { if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) rule_buf = kzalloc(info.rule_cnt * sizeof(u32), GFP_USER); if (!rule_buf) return -ENOMEM; } } ret = ops->get_rxnfc(dev, &info, rule_buf); if (ret < 0) goto err_out; ret = -EFAULT; if (copy_to_user(useraddr, &info, info_size)) goto err_out; if (rule_buf) { useraddr += offsetof(struct ethtool_rxnfc, rule_locs); if (copy_to_user(useraddr, rule_buf, info.rule_cnt * sizeof(u32))) goto err_out; } ret = 0; err_out: kfree(rule_buf); return ret; } static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr) { u32 user_size, dev_size; u32 *indir; int ret; if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->get_rxfh_indir) return -EOPNOTSUPP; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) return -EOPNOTSUPP; if (copy_from_user(&user_size, useraddr + offsetof(struct ethtool_rxfh_indir, size), sizeof(user_size))) return -EFAULT; if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size), &dev_size, sizeof(dev_size))) return -EFAULT; /* If the user buffer size is 0, this is just a query for the * device table size. Otherwise, if it's smaller than the * device table size it's an error. */ if (user_size < dev_size) return user_size == 0 ? 0 : -EINVAL; indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER); if (!indir) return -ENOMEM; ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); if (ret) goto out; if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, ring_index[0]), indir, dev_size * sizeof(indir[0]))) ret = -EFAULT; out: kfree(indir); return ret; } static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, void __user *useraddr) { struct ethtool_rxnfc rx_rings; u32 user_size, dev_size, i; u32 *indir; const struct ethtool_ops *ops = dev->ethtool_ops; int ret; if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir || !ops->get_rxnfc) return -EOPNOTSUPP; dev_size = ops->get_rxfh_indir_size(dev); if (dev_size == 0) return -EOPNOTSUPP; if (copy_from_user(&user_size, useraddr + offsetof(struct ethtool_rxfh_indir, size), sizeof(user_size))) return -EFAULT; if (user_size != 0 && user_size != dev_size) return -EINVAL; indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER); if (!indir) return -ENOMEM; rx_rings.cmd = ETHTOOL_GRXRINGS; ret = ops->get_rxnfc(dev, &rx_rings, NULL); if (ret) goto out; if (user_size == 0) { for (i = 0; i < dev_size; i++) indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); } else { if (copy_from_user(indir, useraddr + offsetof(struct ethtool_rxfh_indir, ring_index[0]), dev_size * sizeof(indir[0]))) { ret = -EFAULT; goto out; } /* Validate ring indices */ for (i = 0; i < dev_size; i++) { if (indir[i] >= rx_rings.data) { ret = -EINVAL; goto out; } } } ret = ops->set_rxfh_indir(dev, indir); out: kfree(indir); return ret; } static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) { struct ethtool_regs regs; const struct ethtool_ops *ops = dev->ethtool_ops; void *regbuf; int reglen, ret; if (!ops->get_regs || !ops->get_regs_len) return -EOPNOTSUPP; if (copy_from_user(&regs, useraddr, sizeof(regs))) return -EFAULT; reglen = ops->get_regs_len(dev); if (regs.len > reglen) regs.len = reglen; regbuf = vzalloc(reglen); if (reglen && !regbuf) return -ENOMEM; ops->get_regs(dev, &regs, regbuf); ret = -EFAULT; if (copy_to_user(useraddr, &regs, sizeof(regs))) goto out; useraddr += offsetof(struct ethtool_regs, data); if (regbuf && copy_to_user(useraddr, regbuf, regs.len)) goto out; ret = 0; out: vfree(regbuf); return ret; } static int ethtool_reset(struct net_device *dev, char __user *useraddr) { struct ethtool_value reset; int ret; if (!dev->ethtool_ops->reset) return -EOPNOTSUPP; if (copy_from_user(&reset, useraddr, sizeof(reset))) return -EFAULT; ret = dev->ethtool_ops->reset(dev, &reset.data); if (ret) return ret; if (copy_to_user(useraddr, &reset, sizeof(reset))) return -EFAULT; return 0; } static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) { struct ethtool_wolinfo wol; if (!dev->ethtool_ops->get_wol) return -EOPNOTSUPP; memset(&wol, 0, sizeof(struct ethtool_wolinfo)); wol.cmd = ETHTOOL_GWOL; dev->ethtool_ops->get_wol(dev, &wol); if (copy_to_user(useraddr, &wol, sizeof(wol))) return -EFAULT; return 0; } static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) { struct ethtool_wolinfo wol; if (!dev->ethtool_ops->set_wol) return -EOPNOTSUPP; if (copy_from_user(&wol, useraddr, sizeof(wol))) return -EFAULT; return dev->ethtool_ops->set_wol(dev, &wol); } static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) { struct ethtool_eee edata; int rc; if (!dev->ethtool_ops->get_eee) return -EOPNOTSUPP; memset(&edata, 0, sizeof(struct ethtool_eee)); edata.cmd = ETHTOOL_GEEE; rc = dev->ethtool_ops->get_eee(dev, &edata); if (rc) return rc; if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_set_eee(struct net_device *dev, char __user *useraddr) { struct ethtool_eee edata; if (!dev->ethtool_ops->set_eee) return -EOPNOTSUPP; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; return dev->ethtool_ops->set_eee(dev, &edata); } static int ethtool_nway_reset(struct net_device *dev) { if (!dev->ethtool_ops->nway_reset) return -EOPNOTSUPP; return dev->ethtool_ops->nway_reset(dev); } static int ethtool_get_link(struct net_device *dev, char __user *useraddr) { struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; if (!dev->ethtool_ops->get_link) return -EOPNOTSUPP; edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev); if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, int (*getter)(struct net_device *, struct ethtool_eeprom *, u8 *), u32 total_len) { struct ethtool_eeprom eeprom; void __user *userbuf = useraddr + sizeof(eeprom); u32 bytes_remaining; u8 *data; int ret = 0; if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; /* Check for wrap and zero */ if (eeprom.offset + eeprom.len <= eeprom.offset) return -EINVAL; /* Check for exceeding total eeprom len */ if (eeprom.offset + eeprom.len > total_len) return -EINVAL; data = kmalloc(PAGE_SIZE, GFP_USER); if (!data) return -ENOMEM; bytes_remaining = eeprom.len; while (bytes_remaining > 0) { eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); ret = getter(dev, &eeprom, data); if (ret) break; if (copy_to_user(userbuf, data, eeprom.len)) { ret = -EFAULT; break; } userbuf += eeprom.len; eeprom.offset += eeprom.len; bytes_remaining -= eeprom.len; } eeprom.len = userbuf - (useraddr + sizeof(eeprom)); eeprom.offset -= eeprom.len; if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) ret = -EFAULT; kfree(data); return ret; } static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) { const struct ethtool_ops *ops = dev->ethtool_ops; if (!ops->get_eeprom || !ops->get_eeprom_len) return -EOPNOTSUPP; return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, ops->get_eeprom_len(dev)); } static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) { struct ethtool_eeprom eeprom; const struct ethtool_ops *ops = dev->ethtool_ops; void __user *userbuf = useraddr + sizeof(eeprom); u32 bytes_remaining; u8 *data; int ret = 0; if (!ops->set_eeprom || !ops->get_eeprom_len) return -EOPNOTSUPP; if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; /* Check for wrap and zero */ if (eeprom.offset + eeprom.len <= eeprom.offset) return -EINVAL; /* Check for exceeding total eeprom len */ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) return -EINVAL; data = kmalloc(PAGE_SIZE, GFP_USER); if (!data) return -ENOMEM; bytes_remaining = eeprom.len; while (bytes_remaining > 0) { eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); if (copy_from_user(data, userbuf, eeprom.len)) { ret = -EFAULT; break; } ret = ops->set_eeprom(dev, &eeprom, data); if (ret) break; userbuf += eeprom.len; eeprom.offset += eeprom.len; bytes_remaining -= eeprom.len; } kfree(data); return ret; } static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; if (!dev->ethtool_ops->get_coalesce) return -EOPNOTSUPP; dev->ethtool_ops->get_coalesce(dev, &coalesce); if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) return -EFAULT; return 0; } static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce; if (!dev->ethtool_ops->set_coalesce) return -EOPNOTSUPP; if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) return -EFAULT; return dev->ethtool_ops->set_coalesce(dev, &coalesce); } static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; if (!dev->ethtool_ops->get_ringparam) return -EOPNOTSUPP; dev->ethtool_ops->get_ringparam(dev, &ringparam); if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) return -EFAULT; return 0; } static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam; if (!dev->ethtool_ops->set_ringparam) return -EOPNOTSUPP; if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) return -EFAULT; return dev->ethtool_ops->set_ringparam(dev, &ringparam); } static noinline_for_stack int ethtool_get_channels(struct net_device *dev, void __user *useraddr) { struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; if (!dev->ethtool_ops->get_channels) return -EOPNOTSUPP; dev->ethtool_ops->get_channels(dev, &channels); if (copy_to_user(useraddr, &channels, sizeof(channels))) return -EFAULT; return 0; } static noinline_for_stack int ethtool_set_channels(struct net_device *dev, void __user *useraddr) { struct ethtool_channels channels; if (!dev->ethtool_ops->set_channels) return -EOPNOTSUPP; if (copy_from_user(&channels, useraddr, sizeof(channels))) return -EFAULT; return dev->ethtool_ops->set_channels(dev, &channels); } static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) { struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; if (!dev->ethtool_ops->get_pauseparam) return -EOPNOTSUPP; dev->ethtool_ops->get_pauseparam(dev, &pauseparam); if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) return -EFAULT; return 0; } static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) { struct ethtool_pauseparam pauseparam; if (!dev->ethtool_ops->set_pauseparam) return -EOPNOTSUPP; if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) return -EFAULT; return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); } static int ethtool_self_test(struct net_device *dev, char __user *useraddr) { struct ethtool_test test; const struct ethtool_ops *ops = dev->ethtool_ops; u64 *data; int ret, test_len; if (!ops->self_test || !ops->get_sset_count) return -EOPNOTSUPP; test_len = ops->get_sset_count(dev, ETH_SS_TEST); if (test_len < 0) return test_len; WARN_ON(test_len == 0); if (copy_from_user(&test, useraddr, sizeof(test))) return -EFAULT; test.len = test_len; data = kmalloc(test_len * sizeof(u64), GFP_USER); if (!data) return -ENOMEM; ops->self_test(dev, &test, data); ret = -EFAULT; if (copy_to_user(useraddr, &test, sizeof(test))) goto out; useraddr += sizeof(test); if (copy_to_user(useraddr, data, test.len * sizeof(u64))) goto out; ret = 0; out: kfree(data); return ret; } static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) { struct ethtool_gstrings gstrings; u8 *data; int ret; if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) return -EFAULT; ret = __ethtool_get_sset_count(dev, gstrings.string_set); if (ret < 0) return ret; gstrings.len = ret; data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); if (!data) return -ENOMEM; __ethtool_get_strings(dev, gstrings.string_set, data); ret = -EFAULT; if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) goto out; useraddr += sizeof(gstrings); if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) goto out; ret = 0; out: kfree(data); return ret; } static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) { struct ethtool_value id; static bool busy; const struct ethtool_ops *ops = dev->ethtool_ops; int rc; if (!ops->set_phys_id) return -EOPNOTSUPP; if (busy) return -EBUSY; if (copy_from_user(&id, useraddr, sizeof(id))) return -EFAULT; rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); if (rc < 0) return rc; /* Drop the RTNL lock while waiting, but prevent reentry or * removal of the device. */ busy = true; dev_hold(dev); rtnl_unlock(); if (rc == 0) { /* Driver will handle this itself */ schedule_timeout_interruptible( id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); } else { /* Driver expects to be called at twice the frequency in rc */ int n = rc * 2, i, interval = HZ / n; /* Count down seconds */ do { /* Count down iterations per second */ i = n; do { rtnl_lock(); rc = ops->set_phys_id(dev, (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); rtnl_unlock(); if (rc) break; schedule_timeout_interruptible(interval); } while (!signal_pending(current) && --i != 0); } while (!signal_pending(current) && (id.data == 0 || --id.data != 0)); } rtnl_lock(); dev_put(dev); busy = false; (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); return rc; } static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) { struct ethtool_stats stats; const struct ethtool_ops *ops = dev->ethtool_ops; u64 *data; int ret, n_stats; if (!ops->get_ethtool_stats || !ops->get_sset_count) return -EOPNOTSUPP; n_stats = ops->get_sset_count(dev, ETH_SS_STATS); if (n_stats < 0) return n_stats; WARN_ON(n_stats == 0); if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT; stats.n_stats = n_stats; data = kmalloc(n_stats * sizeof(u64), GFP_USER); if (!data) return -ENOMEM; ops->get_ethtool_stats(dev, &stats, data); ret = -EFAULT; if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) goto out; ret = 0; out: kfree(data); return ret; } static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) { struct ethtool_perm_addr epaddr; if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) return -EFAULT; if (epaddr.size < dev->addr_len) return -ETOOSMALL; epaddr.size = dev->addr_len; if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) return -EFAULT; useraddr += sizeof(epaddr); if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) return -EFAULT; return 0; } static int ethtool_get_value(struct net_device *dev, char __user *useraddr, u32 cmd, u32 (*actor)(struct net_device *)) { struct ethtool_value edata = { .cmd = cmd }; if (!actor) return -EOPNOTSUPP; edata.data = actor(dev); if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, void (*actor)(struct net_device *, u32)) { struct ethtool_value edata; if (!actor) return -EOPNOTSUPP; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; actor(dev, edata.data); return 0; } static int ethtool_set_value(struct net_device *dev, char __user *useraddr, int (*actor)(struct net_device *, u32)) { struct ethtool_value edata; if (!actor) return -EOPNOTSUPP; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; return actor(dev, edata.data); } static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) { struct ethtool_flash efl; if (copy_from_user(&efl, useraddr, sizeof(efl))) return -EFAULT; if (!dev->ethtool_ops->flash_device) return -EOPNOTSUPP; efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; return dev->ethtool_ops->flash_device(dev, &efl); } static int ethtool_set_dump(struct net_device *dev, void __user *useraddr) { struct ethtool_dump dump; if (!dev->ethtool_ops->set_dump) return -EOPNOTSUPP; if (copy_from_user(&dump, useraddr, sizeof(dump))) return -EFAULT; return dev->ethtool_ops->set_dump(dev, &dump); } static int ethtool_get_dump_flag(struct net_device *dev, void __user *useraddr) { int ret; struct ethtool_dump dump; const struct ethtool_ops *ops = dev->ethtool_ops; if (!ops->get_dump_flag) return -EOPNOTSUPP; if (copy_from_user(&dump, useraddr, sizeof(dump))) return -EFAULT; ret = ops->get_dump_flag(dev, &dump); if (ret) return ret; if (copy_to_user(useraddr, &dump, sizeof(dump))) return -EFAULT; return 0; } static int ethtool_get_dump_data(struct net_device *dev, void __user *useraddr) { int ret; __u32 len; struct ethtool_dump dump, tmp; const struct ethtool_ops *ops = dev->ethtool_ops; void *data = NULL; if (!ops->get_dump_data || !ops->get_dump_flag) return -EOPNOTSUPP; if (copy_from_user(&dump, useraddr, sizeof(dump))) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.cmd = ETHTOOL_GET_DUMP_FLAG; ret = ops->get_dump_flag(dev, &tmp); if (ret) return ret; len = (tmp.len > dump.len) ? dump.len : tmp.len; if (!len) return -EFAULT; data = vzalloc(tmp.len); if (!data) return -ENOMEM; ret = ops->get_dump_data(dev, &dump, data); if (ret) goto out; if (copy_to_user(useraddr, &dump, sizeof(dump))) { ret = -EFAULT; goto out; } useraddr += offsetof(struct ethtool_dump, data); if (copy_to_user(useraddr, data, len)) ret = -EFAULT; out: vfree(data); return ret; } static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) { int err = 0; struct ethtool_ts_info info; const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GET_TS_INFO; if (phydev && phydev->drv && phydev->drv->ts_info) { err = phydev->drv->ts_info(phydev, &info); } else if (ops->get_ts_info) { err = ops->get_ts_info(dev, &info); } else { info.so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; info.phc_index = -1; } if (err) return err; if (copy_to_user(useraddr, &info, sizeof(info))) err = -EFAULT; return err; } static int ethtool_get_module_info(struct net_device *dev, void __user *useraddr) { int ret; struct ethtool_modinfo modinfo; const struct ethtool_ops *ops = dev->ethtool_ops; if (!ops->get_module_info) return -EOPNOTSUPP; if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) return -EFAULT; ret = ops->get_module_info(dev, &modinfo); if (ret) return ret; if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) return -EFAULT; return 0; } static int ethtool_get_module_eeprom(struct net_device *dev, void __user *useraddr) { int ret; struct ethtool_modinfo modinfo; const struct ethtool_ops *ops = dev->ethtool_ops; if (!ops->get_module_info || !ops->get_module_eeprom) return -EOPNOTSUPP; ret = ops->get_module_info(dev, &modinfo); if (ret) return ret; return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom, modinfo.eeprom_len); } /* The main entry point in this file. Called from net/core/dev.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) { struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; netdev_features_t old_features; if (!dev || !netif_device_present(dev)) return -ENODEV; if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) return -EFAULT; /* Allow some commands to be done by anyone */ switch (ethcmd) { case ETHTOOL_GSET: case ETHTOOL_GDRVINFO: case ETHTOOL_GMSGLVL: case ETHTOOL_GLINK: case ETHTOOL_GCOALESCE: case ETHTOOL_GRINGPARAM: case ETHTOOL_GPAUSEPARAM: case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: case ETHTOOL_GSG: case ETHTOOL_GSSET_INFO: case ETHTOOL_GSTRINGS: case ETHTOOL_GSTATS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: case ETHTOOL_GPFLAGS: case ETHTOOL_GRXFH: case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXFHINDIR: case ETHTOOL_GFEATURES: case ETHTOOL_GCHANNELS: case ETHTOOL_GET_TS_INFO: case ETHTOOL_GEEE: break; default: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; } if (dev->ethtool_ops->begin) { rc = dev->ethtool_ops->begin(dev); if (rc < 0) return rc; } old_features = dev->features; switch (ethcmd) { case ETHTOOL_GSET: rc = ethtool_get_settings(dev, useraddr); break; case ETHTOOL_SSET: rc = ethtool_set_settings(dev, useraddr); break; case ETHTOOL_GDRVINFO: rc = ethtool_get_drvinfo(dev, useraddr); break; case ETHTOOL_GREGS: rc = ethtool_get_regs(dev, useraddr); break; case ETHTOOL_GWOL: rc = ethtool_get_wol(dev, useraddr); break; case ETHTOOL_SWOL: rc = ethtool_set_wol(dev, useraddr); break; case ETHTOOL_GMSGLVL: rc = ethtool_get_value(dev, useraddr, ethcmd, dev->ethtool_ops->get_msglevel); break; case ETHTOOL_SMSGLVL: rc = ethtool_set_value_void(dev, useraddr, dev->ethtool_ops->set_msglevel); break; case ETHTOOL_GEEE: rc = ethtool_get_eee(dev, useraddr); break; case ETHTOOL_SEEE: rc = ethtool_set_eee(dev, useraddr); break; case ETHTOOL_NWAY_RST: rc = ethtool_nway_reset(dev); break; case ETHTOOL_GLINK: rc = ethtool_get_link(dev, useraddr); break; case ETHTOOL_GEEPROM: rc = ethtool_get_eeprom(dev, useraddr); break; case ETHTOOL_SEEPROM: rc = ethtool_set_eeprom(dev, useraddr); break; case ETHTOOL_GCOALESCE: rc = ethtool_get_coalesce(dev, useraddr); break; case ETHTOOL_SCOALESCE: rc = ethtool_set_coalesce(dev, useraddr); break; case ETHTOOL_GRINGPARAM: rc = ethtool_get_ringparam(dev, useraddr); break; case ETHTOOL_SRINGPARAM: rc = ethtool_set_ringparam(dev, useraddr); break; case ETHTOOL_GPAUSEPARAM: rc = ethtool_get_pauseparam(dev, useraddr); break; case ETHTOOL_SPAUSEPARAM: rc = ethtool_set_pauseparam(dev, useraddr); break; case ETHTOOL_TEST: rc = ethtool_self_test(dev, useraddr); break; case ETHTOOL_GSTRINGS: rc = ethtool_get_strings(dev, useraddr); break; case ETHTOOL_PHYS_ID: rc = ethtool_phys_id(dev, useraddr); break; case ETHTOOL_GSTATS: rc = ethtool_get_stats(dev, useraddr); break; case ETHTOOL_GPERMADDR: rc = ethtool_get_perm_addr(dev, useraddr); break; case ETHTOOL_GFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, __ethtool_get_flags); break; case ETHTOOL_SFLAGS: rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); break; case ETHTOOL_GPFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, dev->ethtool_ops->get_priv_flags); break; case ETHTOOL_SPFLAGS: rc = ethtool_set_value(dev, useraddr, dev->ethtool_ops->set_priv_flags); break; case ETHTOOL_GRXFH: case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); break; case ETHTOOL_SRXFH: case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); break; case ETHTOOL_FLASHDEV: rc = ethtool_flash_device(dev, useraddr); break; case ETHTOOL_RESET: rc = ethtool_reset(dev, useraddr); break; case ETHTOOL_GSSET_INFO: rc = ethtool_get_sset_info(dev, useraddr); break; case ETHTOOL_GRXFHINDIR: rc = ethtool_get_rxfh_indir(dev, useraddr); break; case ETHTOOL_SRXFHINDIR: rc = ethtool_set_rxfh_indir(dev, useraddr); break; case ETHTOOL_GFEATURES: rc = ethtool_get_features(dev, useraddr); break; case ETHTOOL_SFEATURES: rc = ethtool_set_features(dev, useraddr); break; case ETHTOOL_GTXCSUM: case ETHTOOL_GRXCSUM: case ETHTOOL_GSG: case ETHTOOL_GTSO: case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: rc = ethtool_get_one_feature(dev, useraddr, ethcmd); break; case ETHTOOL_STXCSUM: case ETHTOOL_SRXCSUM: case ETHTOOL_SSG: case ETHTOOL_STSO: case ETHTOOL_SUFO: case ETHTOOL_SGSO: case ETHTOOL_SGRO: rc = ethtool_set_one_feature(dev, useraddr, ethcmd); break; case ETHTOOL_GCHANNELS: rc = ethtool_get_channels(dev, useraddr); break; case ETHTOOL_SCHANNELS: rc = ethtool_set_channels(dev, useraddr); break; case ETHTOOL_SET_DUMP: rc = ethtool_set_dump(dev, useraddr); break; case ETHTOOL_GET_DUMP_FLAG: rc = ethtool_get_dump_flag(dev, useraddr); break; case ETHTOOL_GET_DUMP_DATA: rc = ethtool_get_dump_data(dev, useraddr); break; case ETHTOOL_GET_TS_INFO: rc = ethtool_get_ts_info(dev, useraddr); break; case ETHTOOL_GMODULEINFO: rc = ethtool_get_module_info(dev, useraddr); break; case ETHTOOL_GMODULEEEPROM: rc = ethtool_get_module_eeprom(dev, useraddr); break; default: rc = -EOPNOTSUPP; } if (dev->ethtool_ops->complete) dev->ethtool_ops->complete(dev); if (old_features != dev->features) netdev_features_change(dev); return rc; }
gpl-2.0
botioni/aml_linux_kernel
drivers/scsi/cxgb3i/cxgb3i_iscsi.c
935
27850
/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver. * * Copyright (c) 2008 Chelsio Communications, Inc. * Copyright (c) 2008 Mike Christie * Copyright (c) 2008 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Karen Xie (kxie@chelsio.com) */ #include <linux/inet.h> #include <linux/slab.h> #include <linux/crypto.h> #include <linux/if_vlan.h> #include <net/dst.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/iscsi_proto.h> #include <scsi/libiscsi.h> #include <scsi/scsi_transport_iscsi.h> #include "cxgb3i.h" #include "cxgb3i_pdu.h" #ifdef __DEBUG_CXGB3I_TAG__ #define cxgb3i_tag_debug cxgb3i_log_debug #else #define cxgb3i_tag_debug(fmt...) #endif #ifdef __DEBUG_CXGB3I_API__ #define cxgb3i_api_debug cxgb3i_log_debug #else #define cxgb3i_api_debug(fmt...) #endif /* * align pdu size to multiple of 512 for better performance */ #define align_pdu_size(n) do { n = (n) & (~511); } while (0) static struct scsi_transport_template *cxgb3i_scsi_transport; static struct scsi_host_template cxgb3i_host_template; static struct iscsi_transport cxgb3i_iscsi_transport; static unsigned char sw_tag_idx_bits; static unsigned char sw_tag_age_bits; static LIST_HEAD(cxgb3i_snic_list); static DEFINE_RWLOCK(cxgb3i_snic_rwlock); /** * cxgb3i_adpater_find_by_tdev - find the cxgb3i_adapter structure via t3cdev * @tdev: t3cdev pointer */ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *tdev) { struct cxgb3i_adapter *snic; read_lock(&cxgb3i_snic_rwlock); list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { if (snic->tdev == tdev) { read_unlock(&cxgb3i_snic_rwlock); return snic; } } read_unlock(&cxgb3i_snic_rwlock); return NULL; } static inline int adapter_update(struct cxgb3i_adapter *snic) { cxgb3i_log_info("snic 0x%p, t3dev 0x%p, updating.\n", snic, snic->tdev); return cxgb3i_adapter_ddp_info(snic->tdev, &snic->tag_format, &snic->tx_max_size, &snic->rx_max_size); } static int adapter_add(struct cxgb3i_adapter *snic) { struct t3cdev *t3dev = snic->tdev; struct adapter *adapter = tdev2adap(t3dev); int i, err; snic->pdev = adapter->pdev; snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; err = cxgb3i_adapter_ddp_info(t3dev, &snic->tag_format, &snic->tx_max_size, &snic->rx_max_size); if (err < 0) return err; for_each_port(adapter, i) { snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]); if (!snic->hba[i]) return -EINVAL; } snic->hba_cnt = adapter->params.nports; /* add to the list */ write_lock(&cxgb3i_snic_rwlock); list_add_tail(&snic->list_head, &cxgb3i_snic_list); write_unlock(&cxgb3i_snic_rwlock); cxgb3i_log_info("t3dev 0x%p open, snic 0x%p, %u scsi hosts added.\n", t3dev, snic, snic->hba_cnt); return 0; } /** * cxgb3i_adapter_open - init a s3 adapter structure and any h/w settings * @t3dev: t3cdev adapter */ void cxgb3i_adapter_open(struct t3cdev *t3dev) { struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev); int err; if (snic) err = adapter_update(snic); else { snic = kzalloc(sizeof(*snic), GFP_KERNEL); if (snic) { spin_lock_init(&snic->lock); snic->tdev = t3dev; err = adapter_add(snic); } else err = -ENOMEM; } if (err < 0) { cxgb3i_log_info("snic 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", snic, snic ? snic->flags : 0, t3dev, err); if (snic) { snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET; cxgb3i_adapter_close(t3dev); } } } /** * cxgb3i_adapter_close - release the resources held and cleanup h/w settings * @t3dev: t3cdev adapter */ void cxgb3i_adapter_close(struct t3cdev *t3dev) { struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev); int i; if (!snic || snic->flags & CXGB3I_ADAPTER_FLAG_RESET) { cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, f 0x%x.\n", t3dev, snic, snic ? snic->flags : 0); return; } /* remove from the list */ write_lock(&cxgb3i_snic_rwlock); list_del(&snic->list_head); write_unlock(&cxgb3i_snic_rwlock); for (i = 0; i < snic->hba_cnt; i++) { if (snic->hba[i]) { cxgb3i_hba_host_remove(snic->hba[i]); snic->hba[i] = NULL; } } cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, %u scsi hosts removed.\n", t3dev, snic, snic->hba_cnt); kfree(snic); } /** * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device * @t3dev: t3cdev adapter */ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) { struct cxgb3i_adapter *snic; int i; if (ndev->priv_flags & IFF_802_1Q_VLAN) ndev = vlan_dev_real_dev(ndev); read_lock(&cxgb3i_snic_rwlock); list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { for (i = 0; i < snic->hba_cnt; i++) { if (snic->hba[i]->ndev == ndev) { read_unlock(&cxgb3i_snic_rwlock); return snic->hba[i]; } } } read_unlock(&cxgb3i_snic_rwlock); return NULL; } /** * cxgb3i_hba_host_add - register a new host with scsi/iscsi * @snic: the cxgb3i adapter * @ndev: associated net_device */ struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic, struct net_device *ndev) { struct cxgb3i_hba *hba; struct Scsi_Host *shost; int err; shost = iscsi_host_alloc(&cxgb3i_host_template, sizeof(struct cxgb3i_hba), 1); if (!shost) { cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_alloc failed.\n", snic, ndev); return NULL; } shost->transportt = cxgb3i_scsi_transport; shost->max_lun = CXGB3I_MAX_LUN; shost->max_id = CXGB3I_MAX_TARGET; shost->max_channel = 0; shost->max_cmd_len = 16; hba = iscsi_host_priv(shost); hba->snic = snic; hba->ndev = ndev; hba->shost = shost; pci_dev_get(snic->pdev); err = iscsi_host_add(shost, &snic->pdev->dev); if (err) { cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_add failed.\n", snic, ndev); goto pci_dev_put; } cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n", shost, hba, shost->host_no); return hba; pci_dev_put: pci_dev_put(snic->pdev); scsi_host_put(shost); return NULL; } /** * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi * @hba: the cxgb3i hba */ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba) { cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n", hba->shost, hba, hba->shost->host_no); iscsi_host_remove(hba->shost); pci_dev_put(hba->snic->pdev); iscsi_host_free(hba->shost); } /** * cxgb3i_ep_connect - establish TCP connection to target portal * @shost: scsi host to use * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * Initiates a TCP/IP connection to the dst_addr */ static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { struct iscsi_endpoint *ep; struct cxgb3i_endpoint *cep; struct cxgb3i_hba *hba = NULL; struct s3_conn *c3cn = NULL; int err = 0; if (shost) hba = iscsi_host_priv(shost); cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba); c3cn = cxgb3i_c3cn_create(); if (!c3cn) { cxgb3i_log_info("ep connect OOM.\n"); err = -ENOMEM; goto release_conn; } err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn, (struct sockaddr_in *)dst_addr); if (err < 0) { cxgb3i_log_info("ep connect failed.\n"); goto release_conn; } hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); if (!hba) { err = -ENOSPC; cxgb3i_log_info("NOT going through cxgbi device.\n"); goto release_conn; } if (shost && hba != iscsi_host_priv(shost)) { err = -ENOSPC; cxgb3i_log_info("Could not connect through request host%u\n", shost->host_no); goto release_conn; } if (c3cn_is_closing(c3cn)) { err = -ENOSPC; cxgb3i_log_info("ep connect unable to connect.\n"); goto release_conn; } ep = iscsi_create_endpoint(sizeof(*cep)); if (!ep) { err = -ENOMEM; cxgb3i_log_info("iscsi alloc ep, OOM.\n"); goto release_conn; } cep = ep->dd_data; cep->c3cn = c3cn; cep->hba = hba; cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n", ep, cep, c3cn, hba); return ep; release_conn: cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn); if (c3cn) cxgb3i_c3cn_release(c3cn); return ERR_PTR(err); } /** * cxgb3i_ep_poll - polls for TCP connection establishement * @ep: TCP connection (endpoint) handle * @timeout_ms: timeout value in milli secs * * polls for TCP connect request to complete */ static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct cxgb3i_endpoint *cep = ep->dd_data; struct s3_conn *c3cn = cep->c3cn; if (!c3cn_is_established(c3cn)) return 0; cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn); return 1; } /** * cxgb3i_ep_disconnect - teardown TCP connection * @ep: TCP connection (endpoint) handle * * teardown TCP connection */ static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep) { struct cxgb3i_endpoint *cep = ep->dd_data; struct cxgb3i_conn *cconn = cep->cconn; cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep); if (cconn && cconn->conn) { /* * stop the xmit path so the xmit_pdu function is * not being called */ iscsi_suspend_tx(cconn->conn); write_lock_bh(&cep->c3cn->callback_lock); cep->c3cn->user_data = NULL; cconn->cep = NULL; write_unlock_bh(&cep->c3cn->callback_lock); } cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n", ep, cep, cep->c3cn); cxgb3i_c3cn_release(cep->c3cn); iscsi_destroy_endpoint(ep); } /** * cxgb3i_session_create - create a new iscsi session * @cmds_max: max # of commands * @qdepth: scsi queue depth * @initial_cmdsn: initial iscsi CMDSN for this session * * Creates a new iSCSI session */ static struct iscsi_cls_session * cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, u32 initial_cmdsn) { struct cxgb3i_endpoint *cep; struct cxgb3i_hba *hba; struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct iscsi_session *session; if (!ep) { cxgb3i_log_error("%s, missing endpoint.\n", __func__); return NULL; } cep = ep->dd_data; hba = cep->hba; shost = hba->shost; cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba); BUG_ON(hba != iscsi_host_priv(shost)); cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, cmds_max, 0, sizeof(struct iscsi_tcp_task) + sizeof(struct cxgb3i_task_data), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; session = cls_session->dd_data; if (iscsi_tcp_r2tpool_alloc(session)) goto remove_session; return cls_session; remove_session: iscsi_session_teardown(cls_session); return NULL; } /** * cxgb3i_session_destroy - destroys iscsi session * @cls_session: pointer to iscsi cls session * * Destroys an iSCSI session instance and releases its all resources held */ static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session) { cxgb3i_api_debug("sess 0x%p.\n", cls_session); iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_session_teardown(cls_session); } /** * cxgb3i_conn_max_xmit_dlength -- calc the max. xmit pdu segment size * @conn: iscsi connection * check the max. xmit pdu payload, reduce it if needed */ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM); max = min(cconn->hba->snic->tx_max_size, max); if (conn->max_xmit_dlength) conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); else conn->max_xmit_dlength = max; align_pdu_size(conn->max_xmit_dlength); cxgb3i_api_debug("conn 0x%p, max xmit %u.\n", conn, conn->max_xmit_dlength); return 0; } /** * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size * @conn: iscsi connection * return 0 if the value is valid, < 0 otherwise. */ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; unsigned int max = cconn->hba->snic->rx_max_size; align_pdu_size(max); if (conn->max_recv_dlength) { if (conn->max_recv_dlength > max) { cxgb3i_log_error("MaxRecvDataSegmentLength %u too big." " Need to be <= %u.\n", conn->max_recv_dlength, max); return -EINVAL; } conn->max_recv_dlength = min(conn->max_recv_dlength, max); align_pdu_size(conn->max_recv_dlength); } else conn->max_recv_dlength = max; cxgb3i_api_debug("conn 0x%p, max recv %u.\n", conn, conn->max_recv_dlength); return 0; } /** * cxgb3i_conn_create - create iscsi connection instance * @cls_session: pointer to iscsi cls session * @cid: iscsi cid * * Creates a new iSCSI connection instance for a given session */ static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session *cls_session, u32 cid) { struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct cxgb3i_conn *cconn; cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid); cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); if (!cls_conn) return NULL; conn = cls_conn->dd_data; tcp_conn = conn->dd_data; cconn = tcp_conn->dd_data; cconn->conn = conn; return cls_conn; } /** * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together * @cls_session: pointer to iscsi cls session * @cls_conn: pointer to iscsi cls conn * @transport_eph: 64-bit EP handle * @is_leading: leading connection on this session? * * Binds together an iSCSI session, an iSCSI connection and a * TCP connection. This routine returns error code if the TCP * connection does not belong on the device iSCSI sess/conn is bound */ static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, u64 transport_eph, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; struct cxgb3i_adapter *snic; struct iscsi_endpoint *ep; struct cxgb3i_endpoint *cep; struct s3_conn *c3cn; int err; ep = iscsi_lookup_endpoint(transport_eph); if (!ep) return -EINVAL; /* setup ddp pagesize */ cep = ep->dd_data; c3cn = cep->c3cn; snic = cep->hba->snic; err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0); if (err < 0) return err; cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n", ep, cls_session, cls_conn); err = iscsi_conn_bind(cls_session, cls_conn, is_leading); if (err) return -EINVAL; /* calculate the tag idx bits needed for this conn based on cmds_max */ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n", conn->session->cmds_max, cconn->task_idx_bits); read_lock(&c3cn->callback_lock); c3cn->user_data = conn; cconn->hba = cep->hba; cconn->cep = cep; cep->cconn = cconn; read_unlock(&c3cn->callback_lock); cxgb3i_conn_max_xmit_dlength(conn); cxgb3i_conn_max_recv_dlength(conn); spin_lock_bh(&conn->session->lock); sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr); conn->portal_port = ntohs(c3cn->daddr.sin_port); spin_unlock_bh(&conn->session->lock); /* init recv engine */ iscsi_tcp_hdr_recv_prep(tcp_conn); return 0; } /** * cxgb3i_conn_get_param - return iscsi connection parameter to caller * @cls_conn: pointer to iscsi cls conn * @param: parameter type identifier * @buf: buffer pointer * * returns iSCSI connection parameters */ static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; int len; cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param); switch (param) { case ISCSI_PARAM_CONN_PORT: spin_lock_bh(&conn->session->lock); len = sprintf(buf, "%hu\n", conn->portal_port); spin_unlock_bh(&conn->session->lock); break; case ISCSI_PARAM_CONN_ADDRESS: spin_lock_bh(&conn->session->lock); len = sprintf(buf, "%s\n", conn->portal_address); spin_unlock_bh(&conn->session->lock); break; default: return iscsi_conn_get_param(cls_conn, param, buf); } return len; } /** * cxgb3i_conn_set_param - set iscsi connection parameter * @cls_conn: pointer to iscsi cls conn * @param: parameter type identifier * @buf: buffer pointer * @buflen: buffer length * * set iSCSI connection parameters */ static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; struct cxgb3i_adapter *snic = cconn->hba->snic; struct s3_conn *c3cn = cconn->cep->c3cn; int value, err = 0; switch (param) { case ISCSI_PARAM_HDRDGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->hdrdgst_en) err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid, conn->hdrdgst_en, conn->datadgst_en, 0); break; case ISCSI_PARAM_DATADGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->datadgst_en) err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid, conn->hdrdgst_en, conn->datadgst_en, 0); break; case ISCSI_PARAM_MAX_R2T: sscanf(buf, "%d", &value); if (value <= 0 || !is_power_of_2(value)) return -EINVAL; if (session->max_r2t == value) break; iscsi_tcp_r2tpool_free(session); err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && iscsi_tcp_r2tpool_alloc(session)) return -ENOMEM; case ISCSI_PARAM_MAX_RECV_DLENGTH: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err) err = cxgb3i_conn_max_recv_dlength(conn); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err) err = cxgb3i_conn_max_xmit_dlength(conn); break; default: return iscsi_set_param(cls_conn, param, buf, buflen); } return err; } /** * cxgb3i_host_set_param - configure host (adapter) related parameters * @shost: scsi host pointer * @param: parameter type identifier * @buf: buffer pointer */ static int cxgb3i_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf, int buflen) { struct cxgb3i_hba *hba = iscsi_host_priv(shost); if (!hba->ndev) { shost_printk(KERN_ERR, shost, "Could not set host param. " "Netdev for host not set.\n"); return -ENODEV; } cxgb3i_api_debug("param %d, buf %s.\n", param, buf); switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: { __be32 addr = in_aton(buf); cxgb3i_set_private_ipv4addr(hba->ndev, addr); return 0; } case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_NETDEV_NAME: /* ignore */ return 0; default: return iscsi_host_set_param(shost, param, buf, buflen); } } /** * cxgb3i_host_get_param - returns host (adapter) related parameters * @shost: scsi host pointer * @param: parameter type identifier * @buf: buffer pointer */ static int cxgb3i_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct cxgb3i_hba *hba = iscsi_host_priv(shost); int len = 0; if (!hba->ndev) { shost_printk(KERN_ERR, shost, "Could not set host param. " "Netdev for host not set.\n"); return -ENODEV; } cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "%s\n", hba->ndev->name); break; case ISCSI_HOST_PARAM_IPADDRESS: { __be32 addr; addr = cxgb3i_get_private_ipv4addr(hba->ndev); len = sprintf(buf, "%pI4", &addr); break; } default: return iscsi_host_get_param(shost, param, buf); } return len; } /** * cxgb3i_conn_get_stats - returns iSCSI stats * @cls_conn: pointer to iscsi cls conn * @stats: pointer to iscsi statistic struct */ static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; stats->custom_length = 1; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; } /** * cxgb3i_parse_itt - get the idx and age bits from a given tag * @conn: iscsi connection * @itt: itt tag * @idx: task index, filled in by this function * @age: session age, filled in by this function */ static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; struct cxgb3i_adapter *snic = cconn->hba->snic; u32 tag = ntohl((__force u32) itt); u32 sw_bits; sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag); if (idx) *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); if (age) *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n", tag, itt, sw_bits, idx ? *idx : 0xFFFFF, age ? *age : 0xFF); } /** * cxgb3i_reserve_itt - generate tag for a give task * @task: iscsi task * @hdr_itt: tag, filled in by this function * Set up ddp for scsi read tasks if possible. */ int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) { struct scsi_cmnd *sc = task->sc; struct iscsi_conn *conn = task->conn; struct iscsi_session *sess = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; struct cxgb3i_adapter *snic = cconn->hba->snic; struct cxgb3i_tag_format *tformat = &snic->tag_format; u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; u32 tag; int err = -EINVAL; if (sc && (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && cxgb3i_sw_tag_usable(tformat, sw_tag)) { struct s3_conn *c3cn = cconn->cep->c3cn; struct cxgb3i_gather_list *gl; gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length, scsi_in(sc)->table.sgl, scsi_in(sc)->table.nents, snic->pdev, GFP_ATOMIC); if (gl) { tag = sw_tag; err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid, tformat, &tag, gl, GFP_ATOMIC); if (err < 0) cxgb3i_ddp_release_gl(gl, snic->pdev); } } if (err < 0) tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag); /* the itt need to sent in big-endian order */ *hdr_itt = (__force itt_t)htonl(tag); cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n", tag, *hdr_itt, task->itt, sess->age); return 0; } /** * cxgb3i_release_itt - release the tag for a given task * @task: iscsi task * @hdr_itt: tag * If the tag is a ddp tag, release the ddp setup */ void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt) { struct scsi_cmnd *sc = task->sc; struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgb3i_conn *cconn = tcp_conn->dd_data; struct cxgb3i_adapter *snic = cconn->hba->snic; struct cxgb3i_tag_format *tformat = &snic->tag_format; u32 tag = ntohl((__force u32)hdr_itt); cxgb3i_tag_debug("release tag 0x%x.\n", tag); if (sc && (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && cxgb3i_is_ddp_tag(tformat, tag)) cxgb3i_ddp_tag_release(snic->tdev, tag); } /** * cxgb3i_host_template -- Scsi_Host_Template structure * used when registering with the scsi mid layer */ static struct scsi_host_template cxgb3i_host_template = { .module = THIS_MODULE, .name = "Chelsio S3xx iSCSI Initiator", .proc_name = "cxgb3i", .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .can_queue = CXGB3I_SCSI_HOST_QDEPTH, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, .use_clustering = DISABLE_CLUSTERING, .this_id = -1, }; static struct iscsi_transport cxgb3i_iscsi_transport = { .owner = THIS_MODULE, .name = "cxgb3i", .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | CAP_PADDING_OFFLOAD, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | ISCSI_PERSISTENT_ADDRESS | ISCSI_TARGET_NAME | ISCSI_TPGT | ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME, .get_host_param = cxgb3i_host_get_param, .set_host_param = cxgb3i_host_set_param, /* session management */ .create_session = cxgb3i_session_create, .destroy_session = cxgb3i_session_destroy, .get_session_param = iscsi_session_get_param, /* connection management */ .create_conn = cxgb3i_conn_create, .bind_conn = cxgb3i_conn_bind, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, .get_conn_param = cxgb3i_conn_get_param, .set_param = cxgb3i_conn_set_param, .get_stats = cxgb3i_conn_get_stats, /* pdu xmit req. from user space */ .send_pdu = iscsi_conn_send_pdu, /* task */ .init_task = iscsi_tcp_task_init, .xmit_task = iscsi_tcp_task_xmit, .cleanup_task = cxgb3i_conn_cleanup_task, /* pdu */ .alloc_pdu = cxgb3i_conn_alloc_pdu, .init_pdu = cxgb3i_conn_init_pdu, .xmit_pdu = cxgb3i_conn_xmit_pdu, .parse_pdu_itt = cxgb3i_parse_itt, /* TCP connect/disconnect */ .ep_connect = cxgb3i_ep_connect, .ep_poll = cxgb3i_ep_poll, .ep_disconnect = cxgb3i_ep_disconnect, /* Error recovery timeout call */ .session_recovery_timedout = iscsi_session_recovery_timedout, }; int cxgb3i_iscsi_init(void) { sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", ISCSI_ITT_MASK, sw_tag_idx_bits, ISCSI_AGE_MASK, sw_tag_age_bits); cxgb3i_scsi_transport = iscsi_register_transport(&cxgb3i_iscsi_transport); if (!cxgb3i_scsi_transport) { cxgb3i_log_error("Could not register cxgb3i transport.\n"); return -ENODEV; } cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport); return 0; } void cxgb3i_iscsi_cleanup(void) { if (cxgb3i_scsi_transport) { cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport); iscsi_unregister_transport(&cxgb3i_iscsi_transport); } }
gpl-2.0
marioaugustorama/linux
arch/arm/mach-imx/mach-vpr200.c
935
8900
/* * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2009 Marc Kleine-Budde, Pengutronix * Copyright 2010 Creative Product Design * * Derived from mx35 3stack. * Original author: Fabio Estevam <fabio.estevam@freescale.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/memory.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <linux/i2c.h> #include <linux/platform_data/at24.h> #include <linux/mfd/mc13xxx.h> #include "common.h" #include "devices-imx35.h" #include "ehci.h" #include "hardware.h" #include "iomux-mx35.h" #define GPIO_LCDPWR IMX_GPIO_NR(1, 2) #define GPIO_PMIC_INT IMX_GPIO_NR(2, 0) #define GPIO_BUTTON1 IMX_GPIO_NR(1, 4) #define GPIO_BUTTON2 IMX_GPIO_NR(1, 5) #define GPIO_BUTTON3 IMX_GPIO_NR(1, 7) #define GPIO_BUTTON4 IMX_GPIO_NR(1, 8) #define GPIO_BUTTON5 IMX_GPIO_NR(1, 9) #define GPIO_BUTTON6 IMX_GPIO_NR(1, 10) #define GPIO_BUTTON7 IMX_GPIO_NR(1, 11) #define GPIO_BUTTON8 IMX_GPIO_NR(1, 12) static const struct fb_videomode fb_modedb[] = { { /* 800x480 @ 60 Hz */ .name = "PT0708048", .refresh = 60, .xres = 800, .yres = 480, .pixclock = KHZ2PICOS(33260), .left_margin = 50, .right_margin = 156, .upper_margin = 10, .lower_margin = 10, .hsync_len = 1, /* note: DE only display */ .vsync_len = 1, /* note: DE only display */ .sync = FB_SYNC_CLK_IDLE_EN | FB_SYNC_OE_ACT_HIGH, .vmode = FB_VMODE_NONINTERLACED, .flag = 0, }, { /* 800x480 @ 60 Hz */ .name = "CTP-CLAA070LC0ACW", .refresh = 60, .xres = 800, .yres = 480, .pixclock = KHZ2PICOS(27000), .left_margin = 50, .right_margin = 50, /* whole line should have 900 clocks */ .upper_margin = 10, .lower_margin = 10, /* whole frame should have 500 lines */ .hsync_len = 1, /* note: DE only display */ .vsync_len = 1, /* note: DE only display */ .sync = FB_SYNC_CLK_IDLE_EN | FB_SYNC_OE_ACT_HIGH, .vmode = FB_VMODE_NONINTERLACED, .flag = 0, } }; static struct mx3fb_platform_data mx3fb_pdata __initdata = { .name = "PT0708048", .mode = fb_modedb, .num_modes = ARRAY_SIZE(fb_modedb), }; static struct physmap_flash_data vpr200_flash_data = { .width = 2, }; static struct resource vpr200_flash_resource = { .start = MX35_CS0_BASE_ADDR, .end = MX35_CS0_BASE_ADDR + SZ_64M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device vpr200_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &vpr200_flash_data, }, .resource = &vpr200_flash_resource, .num_resources = 1, }; static const struct mxc_nand_platform_data vpr200_nand_board_info __initconst = { .width = 1, .hw_ecc = 1, .flash_bbt = 1, }; #define VPR_KEY_DEBOUNCE 500 static struct gpio_keys_button vpr200_gpio_keys_table[] = { {KEY_F2, GPIO_BUTTON1, 1, "vpr-keys: F2", 0, VPR_KEY_DEBOUNCE}, {KEY_F3, GPIO_BUTTON2, 1, "vpr-keys: F3", 0, VPR_KEY_DEBOUNCE}, {KEY_F4, GPIO_BUTTON3, 1, "vpr-keys: F4", 0, VPR_KEY_DEBOUNCE}, {KEY_F5, GPIO_BUTTON4, 1, "vpr-keys: F5", 0, VPR_KEY_DEBOUNCE}, {KEY_F6, GPIO_BUTTON5, 1, "vpr-keys: F6", 0, VPR_KEY_DEBOUNCE}, {KEY_F7, GPIO_BUTTON6, 1, "vpr-keys: F7", 0, VPR_KEY_DEBOUNCE}, {KEY_F8, GPIO_BUTTON7, 1, "vpr-keys: F8", 1, VPR_KEY_DEBOUNCE}, {KEY_F9, GPIO_BUTTON8, 1, "vpr-keys: F9", 1, VPR_KEY_DEBOUNCE}, }; static const struct gpio_keys_platform_data vpr200_gpio_keys_data __initconst = { .buttons = vpr200_gpio_keys_table, .nbuttons = ARRAY_SIZE(vpr200_gpio_keys_table), }; static struct mc13xxx_platform_data vpr200_pmic = { .flags = MC13XXX_USE_ADC | MC13XXX_USE_TOUCHSCREEN, }; static const struct imxi2c_platform_data vpr200_i2c0_data __initconst = { .bitrate = 50000, }; static struct at24_platform_data vpr200_eeprom = { .byte_len = 2048 / 8, .page_size = 1, }; static struct i2c_board_info vpr200_i2c_devices[] = { { I2C_BOARD_INFO("at24", 0x50), /* E0=0, E1=0, E2=0 */ .platform_data = &vpr200_eeprom, }, { I2C_BOARD_INFO("mc13892", 0x08), .platform_data = &vpr200_pmic, /* irq number is run-time assigned */ } }; static const iomux_v3_cfg_t vpr200_pads[] __initconst = { /* UART1 */ MX35_PAD_TXD1__UART1_TXD_MUX, MX35_PAD_RXD1__UART1_RXD_MUX, /* UART3 */ MX35_PAD_ATA_DATA10__UART3_RXD_MUX, MX35_PAD_ATA_DATA11__UART3_TXD_MUX, /* FEC */ MX35_PAD_FEC_TX_CLK__FEC_TX_CLK, MX35_PAD_FEC_RX_CLK__FEC_RX_CLK, MX35_PAD_FEC_RX_DV__FEC_RX_DV, MX35_PAD_FEC_COL__FEC_COL, MX35_PAD_FEC_RDATA0__FEC_RDATA_0, MX35_PAD_FEC_TDATA0__FEC_TDATA_0, MX35_PAD_FEC_TX_EN__FEC_TX_EN, MX35_PAD_FEC_MDC__FEC_MDC, MX35_PAD_FEC_MDIO__FEC_MDIO, MX35_PAD_FEC_TX_ERR__FEC_TX_ERR, MX35_PAD_FEC_RX_ERR__FEC_RX_ERR, MX35_PAD_FEC_CRS__FEC_CRS, MX35_PAD_FEC_RDATA1__FEC_RDATA_1, MX35_PAD_FEC_TDATA1__FEC_TDATA_1, MX35_PAD_FEC_RDATA2__FEC_RDATA_2, MX35_PAD_FEC_TDATA2__FEC_TDATA_2, MX35_PAD_FEC_RDATA3__FEC_RDATA_3, MX35_PAD_FEC_TDATA3__FEC_TDATA_3, /* Display */ MX35_PAD_LD0__IPU_DISPB_DAT_0, MX35_PAD_LD1__IPU_DISPB_DAT_1, MX35_PAD_LD2__IPU_DISPB_DAT_2, MX35_PAD_LD3__IPU_DISPB_DAT_3, MX35_PAD_LD4__IPU_DISPB_DAT_4, MX35_PAD_LD5__IPU_DISPB_DAT_5, MX35_PAD_LD6__IPU_DISPB_DAT_6, MX35_PAD_LD7__IPU_DISPB_DAT_7, MX35_PAD_LD8__IPU_DISPB_DAT_8, MX35_PAD_LD9__IPU_DISPB_DAT_9, MX35_PAD_LD10__IPU_DISPB_DAT_10, MX35_PAD_LD11__IPU_DISPB_DAT_11, MX35_PAD_LD12__IPU_DISPB_DAT_12, MX35_PAD_LD13__IPU_DISPB_DAT_13, MX35_PAD_LD14__IPU_DISPB_DAT_14, MX35_PAD_LD15__IPU_DISPB_DAT_15, MX35_PAD_LD16__IPU_DISPB_DAT_16, MX35_PAD_LD17__IPU_DISPB_DAT_17, MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK, MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY, MX35_PAD_CONTRAST__IPU_DISPB_CONTR, /* LCD Enable */ MX35_PAD_D3_VSYNC__GPIO1_2, /* USBOTG */ MX35_PAD_USBOTG_PWR__USB_TOP_USBOTG_PWR, MX35_PAD_USBOTG_OC__USB_TOP_USBOTG_OC, /* SDCARD */ MX35_PAD_SD1_CMD__ESDHC1_CMD, MX35_PAD_SD1_CLK__ESDHC1_CLK, MX35_PAD_SD1_DATA0__ESDHC1_DAT0, MX35_PAD_SD1_DATA1__ESDHC1_DAT1, MX35_PAD_SD1_DATA2__ESDHC1_DAT2, MX35_PAD_SD1_DATA3__ESDHC1_DAT3, /* PMIC */ MX35_PAD_GPIO2_0__GPIO2_0, /* GPIO keys */ MX35_PAD_SCKR__GPIO1_4, MX35_PAD_COMPARE__GPIO1_5, MX35_PAD_SCKT__GPIO1_7, MX35_PAD_FST__GPIO1_8, MX35_PAD_HCKT__GPIO1_9, MX35_PAD_TX5_RX0__GPIO1_10, MX35_PAD_TX4_RX1__GPIO1_11, MX35_PAD_TX3_RX2__GPIO1_12, }; /* USB Device config */ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = { .operating_mode = FSL_USB2_DR_DEVICE, .phy_mode = FSL_USB2_PHY_UTMI, .workaround = FLS_USB2_WORKAROUND_ENGCM09152, }; static int vpr200_usbh_init(struct platform_device *pdev) { return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY); } /* USB HOST config */ static const struct mxc_usbh_platform_data usb_host_pdata __initconst = { .init = vpr200_usbh_init, .portsc = MXC_EHCI_MODE_SERIAL, }; static struct platform_device *devices[] __initdata = { &vpr200_flash, }; /* * Board specific initialization. */ static void __init vpr200_board_init(void) { imx35_soc_init(); mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads)); imx35_add_fec(NULL); imx35_add_imx2_wdt(); imx_add_gpio_keys(&vpr200_gpio_keys_data); platform_add_devices(devices, ARRAY_SIZE(devices)); if (0 != gpio_request(GPIO_LCDPWR, "LCDPWR")) printk(KERN_WARNING "vpr200: Couldn't get LCDPWR gpio\n"); else gpio_direction_output(GPIO_LCDPWR, 0); if (0 != gpio_request(GPIO_PMIC_INT, "PMIC_INT")) printk(KERN_WARNING "vpr200: Couldn't get PMIC_INT gpio\n"); else gpio_direction_input(GPIO_PMIC_INT); imx35_add_imx_uart0(NULL); imx35_add_imx_uart2(NULL); imx35_add_ipu_core(); imx35_add_mx3_sdc_fb(&mx3fb_pdata); imx35_add_fsl_usb2_udc(&otg_device_pdata); imx35_add_mxc_ehci_hs(&usb_host_pdata); imx35_add_mxc_nand(&vpr200_nand_board_info); imx35_add_sdhci_esdhc_imx(0, NULL); vpr200_i2c_devices[1].irq = gpio_to_irq(GPIO_PMIC_INT); i2c_register_board_info(0, vpr200_i2c_devices, ARRAY_SIZE(vpr200_i2c_devices)); imx35_add_imx_i2c0(&vpr200_i2c0_data); } static void __init vpr200_timer_init(void) { mx35_clocks_init(); } MACHINE_START(VPR200, "VPR200") /* Maintainer: Creative Product Design */ .map_io = mx35_map_io, .init_early = imx35_init_early, .init_irq = mx35_init_irq, .init_time = vpr200_timer_init, .init_machine = vpr200_board_init, .restart = mxc_restart, MACHINE_END
gpl-2.0
hroark13/Warp_Kernel-Jellybean
drivers/scsi/aha1740.c
935
19631
/* $Id$ * 1993/03/31 * linux/kernel/aha1740.c * * Based loosely on aha1542.c which is * Copyright (C) 1992 Tommy Thorn and * Modified by Eric Youngdale * * This file is aha1740.c, written and * Copyright (C) 1992,1993 Brad McLean * brad@saturn.gaylord.com or brad@bradpc.gaylord.com. * * Modifications to makecode and queuecommand * for proper handling of multiple devices courteously * provided by Michael Weller, March, 1993 * * Multiple adapter support, extended translation detection, * update to current scsi subsystem changes, proc fs support, * working (!) module support based on patches from Andreas Arens, * by Andreas Degert <ad@papyrus.hamburg.com>, 2/1997 * * aha1740_makecode may still need even more work * if it doesn't work for your devices, take a look. * * Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk> * * Converted to EISA and generic DMA APIs by Marc Zyngier * <maz@wild-wind.fr.eu.org>, 4/2003. * * Shared interrupt support added by Rask Ingemann Lambertsen * <rask@sygehus.dk>, 10/2003 * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. */ #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/device.h> #include <linux/eisa.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <asm/dma.h> #include <asm/system.h> #include <asm/io.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "aha1740.h" /* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH IT WORK, THEN: #define DEBUG */ #ifdef DEBUG #define DEB(x) x #else #define DEB(x) #endif struct aha1740_hostdata { struct eisa_device *edev; unsigned int translation; unsigned int last_ecb_used; dma_addr_t ecb_dma_addr; struct ecb ecb[AHA1740_ECBS]; }; struct aha1740_sg { struct aha1740_chain sg_chain[AHA1740_SCATTER]; dma_addr_t sg_dma_addr; dma_addr_t buf_dma_addr; }; #define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata) static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host, dma_addr_t dma) { struct aha1740_hostdata *hdata = HOSTDATA (host); dma_addr_t offset; offset = dma - hdata->ecb_dma_addr; return (struct ecb *)(((char *) hdata->ecb) + (unsigned int) offset); } static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu) { struct aha1740_hostdata *hdata = HOSTDATA (host); dma_addr_t offset; offset = (char *) cpu - (char *) hdata->ecb; return hdata->ecb_dma_addr + offset; } static int aha1740_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout) { int len; struct aha1740_hostdata *host; if (inout) return-ENOSYS; host = HOSTDATA(shpnt); len = sprintf(buffer, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n" "Extended translation %sabled.\n", shpnt->io_port, shpnt->irq, host->edev->slot, host->translation ? "en" : "dis"); if (offset > len) { *start = buffer; return 0; } *start = buffer + offset; len -= offset; if (len > length) len = length; return len; } static int aha1740_makecode(unchar *sense, unchar *status) { struct statusword { ushort don:1, /* Command Done - No Error */ du:1, /* Data underrun */ :1, qf:1, /* Queue full */ sc:1, /* Specification Check */ dor:1, /* Data overrun */ ch:1, /* Chaining Halted */ intr:1, /* Interrupt issued */ asa:1, /* Additional Status Available */ sns:1, /* Sense information Stored */ :1, ini:1, /* Initialization Required */ me:1, /* Major error or exception */ :1, eca:1, /* Extended Contingent alliance */ :1; } status_word; int retval = DID_OK; status_word = * (struct statusword *) status; #ifdef DEBUG printk("makecode from %x,%x,%x,%x %x,%x,%x,%x", status[0], status[1], status[2], status[3], sense[0], sense[1], sense[2], sense[3]); #endif if (!status_word.don) { /* Anything abnormal was detected */ if ( (status[1]&0x18) || status_word.sc ) { /*Additional info available*/ /* Use the supplied info for further diagnostics */ switch ( status[2] ) { case 0x12: if ( status_word.dor ) retval=DID_ERROR; /* It's an Overrun */ /* If not overrun, assume underrun and * ignore it! */ case 0x00: /* No info, assume no error, should * not occur */ break; case 0x11: case 0x21: retval=DID_TIME_OUT; break; case 0x0a: retval=DID_BAD_TARGET; break; case 0x04: case 0x05: retval=DID_ABORT; /* Either by this driver or the * AHA1740 itself */ break; default: retval=DID_ERROR; /* No further * diagnostics * possible */ } } else { /* Michael suggests, and Brad concurs: */ if ( status_word.qf ) { retval = DID_TIME_OUT; /* forces a redo */ /* I think this specific one should * not happen -Brad */ printk("aha1740.c: WARNING: AHA1740 queue overflow!\n"); } else if ( status[0]&0x60 ) { /* Didn't find a better error */ retval = DID_ERROR; } /* In any other case return DID_OK so for example CONDITION_CHECKS make it through to the appropriate device driver */ } } /* Under all circumstances supply the target status -Michael */ return status[3] | retval << 16; } static int aha1740_test_port(unsigned int base) { if ( inb(PORTADR(base)) & PORTADDR_ENH ) return 1; /* Okay, we're all set */ printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n"); return 0; } /* A "high" level interrupt handler */ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) { struct Scsi_Host *host = (struct Scsi_Host *) dev_id; void (*my_done)(Scsi_Cmnd *); int errstatus, adapstat; int number_serviced; struct ecb *ecbptr; Scsi_Cmnd *SCtmp; unsigned int base; unsigned long flags; int handled = 0; struct aha1740_sg *sgptr; struct eisa_device *edev; if (!host) panic("aha1740.c: Irq from unknown host!\n"); spin_lock_irqsave(host->host_lock, flags); base = host->io_port; number_serviced = 0; edev = HOSTDATA(host)->edev; while(inb(G2STAT(base)) & G2STAT_INTPEND) { handled = 1; DEB(printk("aha1740_intr top of loop.\n")); adapstat = inb(G2INTST(base)); ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base))); outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */ switch ( adapstat & G2INTST_MASK ) { case G2INTST_CCBRETRY: case G2INTST_CCBERROR: case G2INTST_CCBGOOD: /* Host Ready -> Mailbox in complete */ outb(G2CNTRL_HRDY,G2CNTRL(base)); if (!ecbptr) { printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n", inb(G2STAT(base)),adapstat, inb(G2INTST(base)), number_serviced++); continue; } SCtmp = ecbptr->SCpnt; if (!SCtmp) { printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n", inb(G2STAT(base)),adapstat, inb(G2INTST(base)), number_serviced++); continue; } sgptr = (struct aha1740_sg *) SCtmp->host_scribble; scsi_dma_unmap(SCtmp); /* Free the sg block */ dma_free_coherent (&edev->dev, sizeof (struct aha1740_sg), SCtmp->host_scribble, sgptr->sg_dma_addr); /* Fetch the sense data, and tuck it away, in the required slot. The Adaptec automatically fetches it, and there is no guarantee that we will still have it in the cdb when we come back */ if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) { memcpy(SCtmp->sense_buffer, ecbptr->sense, SCSI_SENSE_BUFFERSIZE); errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status); } else errstatus = 0; DEB(if (errstatus) printk("aha1740_intr_handle: returning %6x\n", errstatus)); SCtmp->result = errstatus; my_done = ecbptr->done; memset(ecbptr,0,sizeof(struct ecb)); if ( my_done ) my_done(SCtmp); break; case G2INTST_HARDFAIL: printk(KERN_ALERT "aha1740 hardware failure!\n"); panic("aha1740.c"); /* Goodbye */ case G2INTST_ASNEVENT: printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n", adapstat, inb(MBOXIN0(base)), inb(MBOXIN1(base)), inb(MBOXIN2(base)), inb(MBOXIN3(base))); /* Say What? */ /* Host Ready -> Mailbox in complete */ outb(G2CNTRL_HRDY,G2CNTRL(base)); break; case G2INTST_CMDGOOD: /* set immediate command success flag here: */ break; case G2INTST_CMDERROR: /* Set immediate command failure flag here: */ break; } number_serviced++; } spin_unlock_irqrestore(host->host_lock, flags); return IRQ_RETVAL(handled); } static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) { unchar direction; unchar *cmd = (unchar *) SCpnt->cmnd; unchar target = scmd_id(SCpnt); struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host); unsigned long flags; dma_addr_t sg_dma; struct aha1740_sg *sgptr; int ecbno, nseg; DEB(int i); if(*cmd == REQUEST_SENSE) { SCpnt->result = 0; done(SCpnt); return 0; } #ifdef DEBUG if (*cmd == READ_10 || *cmd == WRITE_10) i = xscsi2int(cmd+2); else if (*cmd == READ_6 || *cmd == WRITE_6) i = scsi2int(cmd+2); else i = -1; printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen); printk("scsi cmd:"); for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]); printk("\n"); #endif /* locate an available ecb */ spin_lock_irqsave(SCpnt->device->host->host_lock, flags); ecbno = host->last_ecb_used + 1; /* An optimization */ if (ecbno >= AHA1740_ECBS) ecbno = 0; do { if (!host->ecb[ecbno].cmdw) break; ecbno++; if (ecbno >= AHA1740_ECBS) ecbno = 0; } while (ecbno != host->last_ecb_used); if (host->ecb[ecbno].cmdw) panic("Unable to find empty ecb for aha1740.\n"); host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command doubles as reserved flag */ host->last_ecb_used = ecbno; spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); #ifdef DEBUG printk("Sending command (%d %x)...", ecbno, done); #endif host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command * Descriptor Block * Length */ direction = 0; if (*cmd == READ_10 || *cmd == READ_6) direction = 1; else if (*cmd == WRITE_10 || *cmd == WRITE_6) direction = 0; memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len); SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev, sizeof (struct aha1740_sg), &sg_dma, GFP_ATOMIC); if(SCpnt->host_scribble == NULL) { printk(KERN_WARNING "aha1740: out of memory in queuecommand!\n"); return 1; } sgptr = (struct aha1740_sg *) SCpnt->host_scribble; sgptr->sg_dma_addr = sg_dma; nseg = scsi_dma_map(SCpnt); BUG_ON(nseg < 0); if (nseg) { struct scatterlist *sg; struct aha1740_chain * cptr; int i; DEB(unsigned char * ptr); host->ecb[ecbno].sg = 1; /* SCSI Initiator Command * w/scatter-gather*/ cptr = sgptr->sg_chain; scsi_for_each_sg(SCpnt, sg, nseg, i) { cptr[i].datalen = sg_dma_len (sg); cptr[i].dataptr = sg_dma_address (sg); } host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain); host->ecb[ecbno].dataptr = sg_dma; #ifdef DEBUG printk("cptr %x: ",cptr); ptr = (unsigned char *) cptr; for(i=0;i<24;i++) printk("%02x ", ptr[i]); #endif } else { host->ecb[ecbno].datalen = 0; host->ecb[ecbno].dataptr = 0; } host->ecb[ecbno].lun = SCpnt->device->lun; host->ecb[ecbno].ses = 1; /* Suppress underrun errors */ host->ecb[ecbno].dir = direction; host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */ host->ecb[ecbno].senselen = 12; host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host, host->ecb[ecbno].sense); host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host, host->ecb[ecbno].status); host->ecb[ecbno].done = done; host->ecb[ecbno].SCpnt = SCpnt; #ifdef DEBUG { int i; printk("aha1740_command: sending.. "); for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++) printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]); } printk("\n"); #endif if (done) { /* The Adaptec Spec says the card is so fast that the loops will only be executed once in the code below. Even if this was true with the fastest processors when the spec was written, it doesn't seem to be true with todays fast processors. We print a warning if the code is executed more often than LOOPCNT_WARN. If this happens, it should be investigated. If the count reaches LOOPCNT_MAX, we assume something is broken; since there is no way to return an error (the return value is ignored by the mid-level scsi layer) we have to panic (and maybe that's the best thing we can do then anyhow). */ #define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */ #define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */ int loopcnt; unsigned int base = SCpnt->device->host->io_port; DEB(printk("aha1740[%d] critical section\n",ecbno)); spin_lock_irqsave(SCpnt->device->host->host_lock, flags); for (loopcnt = 0; ; loopcnt++) { if (inb(G2STAT(base)) & G2STAT_MBXOUT) break; if (loopcnt == LOOPCNT_WARN) { printk("aha1740[%d]_mbxout wait!\n",ecbno); } if (loopcnt == LOOPCNT_MAX) panic("aha1740.c: mbxout busy!\n"); } outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno), MBOXOUT0(base)); for (loopcnt = 0; ; loopcnt++) { if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break; if (loopcnt == LOOPCNT_WARN) { printk("aha1740[%d]_attn wait!\n",ecbno); } if (loopcnt == LOOPCNT_MAX) panic("aha1740.c: attn wait failed!\n"); } outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */ spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); DEB(printk("aha1740[%d] request queued.\n",ecbno)); } else printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n"); return 0; } /* Query the board for its irq_level and irq_type. Nothing else matters in enhanced mode on an EISA bus. */ static void aha1740_getconfig(unsigned int base, unsigned int *irq_level, unsigned int *irq_type, unsigned int *translation) { static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 }; *irq_level = intab[inb(INTDEF(base)) & 0x7]; *irq_type = (inb(INTDEF(base)) & 0x8) >> 3; *translation = inb(RESV1(base)) & 0x1; outb(inb(INTDEF(base)) | 0x10, INTDEF(base)); } static int aha1740_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int* ip) { int size = capacity; int extended = HOSTDATA(sdev->host)->translation; DEB(printk("aha1740_biosparam\n")); if (extended && (ip[2] > 1024)) { ip[0] = 255; ip[1] = 63; ip[2] = size / (255 * 63); } else { ip[0] = 64; ip[1] = 32; ip[2] = size >> 11; } return 0; } static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy) { /* * From Alan Cox : * The AHA1740 has firmware handled abort/reset handling. The "head in * sand" kernel code is correct for once 8) * * So we define a dummy handler just to keep the kernel SCSI code as * quiet as possible... */ return 0; } static struct scsi_host_template aha1740_template = { .module = THIS_MODULE, .proc_name = "aha1740", .proc_info = aha1740_proc_info, .name = "Adaptec 174x (EISA)", .queuecommand = aha1740_queuecommand, .bios_param = aha1740_biosparam, .can_queue = AHA1740_ECBS, .this_id = 7, .sg_tablesize = AHA1740_SCATTER, .cmd_per_lun = AHA1740_CMDLUN, .use_clustering = ENABLE_CLUSTERING, .eh_abort_handler = aha1740_eh_abort_handler, }; static int aha1740_probe (struct device *dev) { int slotbase, rc; unsigned int irq_level, irq_type, translation; struct Scsi_Host *shpnt; struct aha1740_hostdata *host; struct eisa_device *edev = to_eisa_device (dev); DEB(printk("aha1740_probe: \n")); slotbase = edev->base_addr + EISA_VENDOR_ID_OFFSET; if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */ return -EBUSY; if (!aha1740_test_port(slotbase)) goto err_release_region; aha1740_getconfig(slotbase,&irq_level,&irq_type,&translation); if ((inb(G2STAT(slotbase)) & (G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT) { /* If the card isn't ready, hard reset it */ outb(G2CNTRL_HRST, G2CNTRL(slotbase)); outb(0, G2CNTRL(slotbase)); } printk(KERN_INFO "Configuring slot %d at IO:%x, IRQ %u (%s)\n", edev->slot, slotbase, irq_level, irq_type ? "edge" : "level"); printk(KERN_INFO "aha174x: Extended translation %sabled.\n", translation ? "en" : "dis"); shpnt = scsi_host_alloc(&aha1740_template, sizeof(struct aha1740_hostdata)); if(shpnt == NULL) goto err_release_region; shpnt->base = 0; shpnt->io_port = slotbase; shpnt->n_io_port = SLOTSIZE; shpnt->irq = irq_level; shpnt->dma_channel = 0xff; host = HOSTDATA(shpnt); host->edev = edev; host->translation = translation; host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb, sizeof (host->ecb), DMA_BIDIRECTIONAL); if (!host->ecb_dma_addr) { printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n"); scsi_unregister (shpnt); goto err_host_put; } DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level)); if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED, "aha1740",shpnt)) { printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n", irq_level); goto err_unmap; } eisa_set_drvdata (edev, shpnt); rc = scsi_add_host (shpnt, dev); if (rc) goto err_irq; scsi_scan_host (shpnt); return 0; err_irq: free_irq(irq_level, shpnt); err_unmap: dma_unmap_single (&edev->dev, host->ecb_dma_addr, sizeof (host->ecb), DMA_BIDIRECTIONAL); err_host_put: scsi_host_put (shpnt); err_release_region: release_region(slotbase, SLOTSIZE); return -ENODEV; } static __devexit int aha1740_remove (struct device *dev) { struct Scsi_Host *shpnt = dev_get_drvdata(dev); struct aha1740_hostdata *host = HOSTDATA (shpnt); scsi_remove_host(shpnt); free_irq (shpnt->irq, shpnt); dma_unmap_single (dev, host->ecb_dma_addr, sizeof (host->ecb), DMA_BIDIRECTIONAL); release_region (shpnt->io_port, SLOTSIZE); scsi_host_put (shpnt); return 0; } static struct eisa_device_id aha1740_ids[] = { { "ADP0000" }, /* 1740 */ { "ADP0001" }, /* 1740A */ { "ADP0002" }, /* 1742A */ { "ADP0400" }, /* 1744 */ { "" } }; MODULE_DEVICE_TABLE(eisa, aha1740_ids); static struct eisa_driver aha1740_driver = { .id_table = aha1740_ids, .driver = { .name = "aha1740", .probe = aha1740_probe, .remove = __devexit_p (aha1740_remove), }, }; static __init int aha1740_init (void) { return eisa_driver_register (&aha1740_driver); } static __exit void aha1740_exit (void) { eisa_driver_unregister (&aha1740_driver); } module_init (aha1740_init); module_exit (aha1740_exit); MODULE_LICENSE("GPL");
gpl-2.0
sduc/linux
drivers/net/ethernet/amd/mvme147.c
1703
5786
/* mvme147.c : the Linux/mvme147/lance ethernet driver * * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk> * Based on the Sun Lance driver and the NetBSD HP Lance driver * Uses the generic 7990.c LANCE code. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/gfp.h> /* Used for the temporal inet entries and routing */ #include <linux/socket.h> #include <linux/route.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/mvme147hw.h> /* We have 32K of RAM for the init block and buffers. This places * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes. */ #define LANCE_LOG_TX_BUFFERS 1 #define LANCE_LOG_RX_BUFFERS 3 #include "7990.h" /* use generic LANCE code */ /* Our private data structure */ struct m147lance_private { struct lance_private lance; unsigned long ram; }; /* function prototypes... This is easy because all the grot is in the * generic LANCE support. All we have to support is probing for boards, * plus board-specific init, open and close actions. * Oh, and we need to tell the generic code how to read and write LANCE registers... */ static int m147lance_open(struct net_device *dev); static int m147lance_close(struct net_device *dev); static void m147lance_writerap(struct lance_private *lp, unsigned short value); static void m147lance_writerdp(struct lance_private *lp, unsigned short value); static unsigned short m147lance_readrdp(struct lance_private *lp); typedef void (*writerap_t)(void *, unsigned short); typedef void (*writerdp_t)(void *, unsigned short); typedef unsigned short (*readrdp_t)(void *); static const struct net_device_ops lance_netdev_ops = { .ndo_open = m147lance_open, .ndo_stop = m147lance_close, .ndo_start_xmit = lance_start_xmit, .ndo_set_rx_mode = lance_set_multicast, .ndo_tx_timeout = lance_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; /* Initialise the one and only on-board 7990 */ struct net_device * __init mvme147lance_probe(int unit) { struct net_device *dev; static int called; static const char name[] = "MVME147 LANCE"; struct m147lance_private *lp; u_long *addr; u_long address; int err; if (!MACH_IS_MVME147 || called) return ERR_PTR(-ENODEV); called++; dev = alloc_etherdev(sizeof(struct m147lance_private)); if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) sprintf(dev->name, "eth%d", unit); /* Fill the dev fields */ dev->base_addr = (unsigned long)MVME147_LANCE_BASE; dev->netdev_ops = &lance_netdev_ops; dev->dma = 0; addr = (u_long *)ETHERNET_ADDRESS; address = *addr; dev->dev_addr[0] = 0x08; dev->dev_addr[1] = 0x00; dev->dev_addr[2] = 0x3e; address = address >> 8; dev->dev_addr[5] = address&0xff; address = address >> 8; dev->dev_addr[4] = address&0xff; address = address >> 8; dev->dev_addr[3] = address&0xff; printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n", dev->name, dev->base_addr, MVME147_LANCE_IRQ, dev->dev_addr); lp = netdev_priv(dev); lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */ if (!lp->ram) { printk("%s: No memory for LANCE buffers\n", dev->name); free_netdev(dev); return ERR_PTR(-ENOMEM); } lp->lance.name = name; lp->lance.base = dev->base_addr; lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */ lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ lp->lance.irq = MVME147_LANCE_IRQ; lp->lance.writerap = (writerap_t)m147lance_writerap; lp->lance.writerdp = (writerdp_t)m147lance_writerdp; lp->lance.readrdp = (readrdp_t)m147lance_readrdp; lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; err = register_netdev(dev); if (err) { free_pages(lp->ram, 3); free_netdev(dev); return ERR_PTR(err); } return dev; } static void m147lance_writerap(struct lance_private *lp, unsigned short value) { out_be16(lp->base + LANCE_RAP, value); } static void m147lance_writerdp(struct lance_private *lp, unsigned short value) { out_be16(lp->base + LANCE_RDP, value); } static unsigned short m147lance_readrdp(struct lance_private *lp) { return in_be16(lp->base + LANCE_RDP); } static int m147lance_open(struct net_device *dev) { int status; status = lance_open(dev); /* call generic lance open code */ if (status) return status; /* enable interrupts at board level. */ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */ return 0; } static int m147lance_close(struct net_device *dev) { /* disable interrupts at boardlevel */ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */ lance_close(dev); return 0; } #ifdef MODULE MODULE_LICENSE("GPL"); static struct net_device *dev_mvme147_lance; int __init init_module(void) { dev_mvme147_lance = mvme147lance_probe(-1); return PTR_ERR_OR_ZERO(dev_mvme147_lance); } void __exit cleanup_module(void) { struct m147lance_private *lp = netdev_priv(dev_mvme147_lance); unregister_netdev(dev_mvme147_lance); free_pages(lp->ram, 3); free_netdev(dev_mvme147_lance); } #endif /* MODULE */
gpl-2.0
liqiang199105/linux
arch/arm/mach-tegra/cpuidle.c
1959
1541
/* * arch/arm/mach-tegra/cpuidle.c * * CPU idle driver for Tegra CPUs * * Copyright (c) 2010-2012, NVIDIA Corporation. * Copyright (c) 2011 Google, Inc. * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> * * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <soc/tegra/fuse.h> #include "cpuidle.h" void __init tegra_cpuidle_init(void) { switch (tegra_get_chip_id()) { case TEGRA20: if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) tegra20_cpuidle_init(); break; case TEGRA30: if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC)) tegra30_cpuidle_init(); break; case TEGRA114: case TEGRA124: if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) tegra114_cpuidle_init(); break; } } void tegra_cpuidle_pcie_irqs_in_use(void) { switch (tegra_get_chip_id()) { case TEGRA20: if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) tegra20_cpuidle_pcie_irqs_in_use(); break; } }
gpl-2.0
civato/Note8.0-StormCharger-Android-4.2.2
fs/gfs2/export.c
1959
4970
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include "gfs2.h" #include "incore.h" #include "dir.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "super.h" #include "rgrp.h" #include "util.h" #define GFS2_SMALL_FH_SIZE 4 #define GFS2_LARGE_FH_SIZE 8 #define GFS2_OLD_FH_SIZE 10 static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, int connectable) { __be32 *fh = (__force __be32 *)p; struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct gfs2_inode *ip = GFS2_I(inode); if (connectable && (*len < GFS2_LARGE_FH_SIZE)) { *len = GFS2_LARGE_FH_SIZE; return 255; } else if (*len < GFS2_SMALL_FH_SIZE) { *len = GFS2_SMALL_FH_SIZE; return 255; } fh[0] = cpu_to_be32(ip->i_no_formal_ino >> 32); fh[1] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); fh[2] = cpu_to_be32(ip->i_no_addr >> 32); fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_SMALL_FH_SIZE; if (!connectable || inode == sb->s_root->d_inode) return *len; spin_lock(&dentry->d_lock); inode = dentry->d_parent->d_inode; ip = GFS2_I(inode); igrab(inode); spin_unlock(&dentry->d_lock); fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32); fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); fh[6] = cpu_to_be32(ip->i_no_addr >> 32); fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_LARGE_FH_SIZE; iput(inode); return *len; } struct get_name_filldir { struct gfs2_inum_host inum; char *name; }; static int get_name_filldir(void *opaque, const char *name, int length, loff_t offset, u64 inum, unsigned int type) { struct get_name_filldir *gnfd = opaque; if (inum != gnfd->inum.no_addr) return 0; memcpy(gnfd->name, name, length); gnfd->name[length] = 0; return 1; } static int gfs2_get_name(struct dentry *parent, char *name, struct dentry *child) { struct inode *dir = parent->d_inode; struct inode *inode = child->d_inode; struct gfs2_inode *dip, *ip; struct get_name_filldir gnfd; struct gfs2_holder gh; u64 offset = 0; int error; if (!dir) return -EINVAL; if (!S_ISDIR(dir->i_mode) || !inode) return -EINVAL; dip = GFS2_I(dir); ip = GFS2_I(inode); *name = 0; gnfd.inum.no_addr = ip->i_no_addr; gnfd.inum.no_formal_ino = ip->i_no_formal_ino; gnfd.name = name; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); if (error) return error; error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir); gfs2_glock_dq_uninit(&gh); if (!error && !*name) error = -ENOENT; return error; } static struct dentry *gfs2_get_parent(struct dentry *child) { return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); } static struct dentry *gfs2_get_dentry(struct super_block *sb, struct gfs2_inum_host *inum) { struct gfs2_sbd *sdp = sb->s_fs_info; struct inode *inode; inode = gfs2_ilookup(sb, inum->no_addr, 0); if (inode) { if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) { iput(inode); return ERR_PTR(-ESTALE); } goto out_inode; } inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino, GFS2_BLKST_DINODE); if (IS_ERR(inode)) return ERR_CAST(inode); out_inode: return d_obtain_alias(inode); } static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct gfs2_inum_host this; __be32 *fh = (__force __be32 *)fid->raw; switch (fh_type) { case GFS2_SMALL_FH_SIZE: case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32; this.no_formal_ino |= be32_to_cpu(fh[1]); this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32; this.no_addr |= be32_to_cpu(fh[3]); return gfs2_get_dentry(sb, &this); default: return NULL; } } static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct gfs2_inum_host parent; __be32 *fh = (__force __be32 *)fid->raw; switch (fh_type) { case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32; parent.no_formal_ino |= be32_to_cpu(fh[5]); parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32; parent.no_addr |= be32_to_cpu(fh[7]); return gfs2_get_dentry(sb, &parent); default: return NULL; } } const struct export_operations gfs2_export_ops = { .encode_fh = gfs2_encode_fh, .fh_to_dentry = gfs2_fh_to_dentry, .fh_to_parent = gfs2_fh_to_parent, .get_name = gfs2_get_name, .get_parent = gfs2_get_parent, };
gpl-2.0